query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The `ComputeCluster` data source can be used to discover the ID of a cluster in vSphere. This is useful to fetch the ID of a cluster that you want to use for virtual machine placement via the `VirtualMachine` resource, allowing to specify the cluster's root resource pool directly versus using the alias available through the `ResourcePool` data source. > You may also wish to see the `ComputeCluster` resource for more information about clusters and how to managed the resource in this provider. Example Usage ```python import pulumi import pulumi_vsphere as vsphere datacenter = vsphere.get_datacenter(name="dc01") compute_cluster = vsphere.get_compute_cluster(name="cluster01", datacenter_id=datacenter.id) ```
def get_compute_cluster_output(datacenter_id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetComputeClusterResult]: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_compute_cluster(datacenter_id: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeClusterResult:\n __args__ = dict()\n __args__['datacenterId'] = datacenter_id\n __args__['name'] = n...
[ "0.72311264", "0.70518404", "0.67735595", "0.6768537", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.66624004", "0.66624004", "0.66624004", "0.66624004", "0.6613303", "0.660601", "0.660601", "0.6474703", "0.6474703", "0.6474703", "0.63924193", "0.6386118...
0.70052683
2
Despliega el formulario de creacion de usuario y empleado para una sucursal.
def get(self,request,*args,**kwargs): sucursal = Sucursal.objects.get(id=kwargs['spk']) user_form = UserForm() empleado_form = EmpleadoForm( initial={'sucursal':sucursal.id} ) forms = [user_form,empleado_form] context = { 'section_title':'Nuevo Empleado', 'button_text':'Crear', 'sucursal':sucursal, 'user_form':user_form, 'empleado_form':empleado_form } return render_to_response( 'empleado/empleado_form.html', context, context_instance=RequestContext(request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_user(request):\n user = User.objects.get(username=request.user.username)\n #Validacion de permisos----------------------------------------------\n roles = UsuarioRolSistema.objects.filter(usuario = user).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.per...
[ "0.67103773", "0.65624523", "0.64488596", "0.62953645", "0.62946165", "0.62440854", "0.62051827", "0.6188632", "0.60768074", "0.60262233", "0.6007331", "0.5995433", "0.5959869", "0.592796", "0.5894467", "0.58874565", "0.58790886", "0.58777773", "0.5834341", "0.5799069", "0.57...
0.5909672
14
Test addition for Complex with Complex, complex, int and float
def test_add(): z = Complex(1, -2) w = Complex(1, 1) assert (z + w) == Complex(2, -1) assert (z + (1+1j)) == Complex(2, -1) assert (z + 2) == Complex(3, -2) assert (z + 2.0) == Complex(3, -2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def _cmplx_add_ ( s , o ) :\n return o + complex ( s )", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\...
[ "0.76475245", "0.7613455", "0.73216003", "0.7232131", "0.7204029", "0.7114866", "0.6915121", "0.69098693", "0.6900085", "0.6607692", "0.6509762", "0.6498399", "0.6357027", "0.6335295", "0.63045627", "0.6205945", "0.61870325", "0.6182034", "0.6174292", "0.6170315", "0.61585486...
0.81614006
0
Test subtraction for Complex with Complex, complex, int and float
def test_sub(): z = Complex(1, -2) w = Complex(1, 1) assert (z - w) == Complex(0, -3) assert (z - (1+1j)) == Complex(0, -3) assert (z - 2) == Complex(-1, -2) assert (z - 2.0) == Complex(-1, -2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def _cmplx_sub_ ( s , o ) :\n return (-o ) + complex ( s )", "def complex(real, imag):", "def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and retu...
[ "0.73002094", "0.69852185", "0.6779295", "0.6768346", "0.6679916", "0.6596779", "0.6549099", "0.64673406", "0.6422183", "0.6415886", "0.63986313", "0.628705", "0.6265425", "0.62318534", "0.61975026", "0.6182748", "0.6176905", "0.6081503", "0.60485506", "0.6047534", "0.6038145...
0.75719965
0
(12i)(2+2i) = 2 + 2i 4i + 4 = 6 2i
def test_mul(): z = Complex(1, -2) v = Complex(2, 2) assert z*v == Complex(6, -2) assert v*z == z*v assert z*2 == Complex(2, -4) assert z*2.0 == Complex(2, -4) assert z*(2+2j) == v*z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sw(n):\n return 4*n*n + 2*n + 1", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def f(x):\n return ((x[0] - 1) ** 2) + ((x[1] + 3) ** 2)", "def triangular_number_solution():\n return 5 * partial_sum(199) + 3 * partial_sum(333) - 15 * p...
[ "0.6762975", "0.61464405", "0.6060709", "0.599007", "0.59566283", "0.59457093", "0.5927478", "0.59023225", "0.5873529", "0.5866177", "0.58424276", "0.58367133", "0.58342266", "0.58271873", "0.58252615", "0.5824011", "0.5807665", "0.58015585", "0.5773675", "0.5770493", "0.5753...
0.0
-1
Compute LDA model & find perplexity, save topics list for coherence calc
def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start): perplexity_values = [] lda_time = [] topics_list = [] i = rand_start for num_topics in n_topics: # create model t1 = time.time() lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics, topic_word_prior=0.1, n_jobs=39, random_state = i) lda_model.fit_transform(doc_term_matrix) t2 = time.time() lda_time.append(t2-t1) print(f" Model time: {t2-t1}", flush = True) # compute perplexity perplexity_values.append(lda_model.bound_) # create list of topics topics = list_topics(lda_model.components_, vectorizer, top_n=10) topics_list.append(topics) # output completion message i = i+1 print('Number of topics =', num_topics, "complete.", flush = True) return perplexity_values, lda_time, topics_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n coherence_c_v = []\n coherence_u_mass = []\n print(\"Fitting models\"...
[ "0.7421184", "0.73544043", "0.72307", "0.7002529", "0.6943727", "0.673741", "0.6723259", "0.6671794", "0.6657565", "0.6648194", "0.6644296", "0.66000706", "0.6559571", "0.6551705", "0.65513426", "0.65454745", "0.6489818", "0.64809227", "0.6480784", "0.6443412", "0.63931346", ...
0.744949
0
In case we're running innon autoreload mode we need to restart server
def reload(self): puts('Reloading application...') local('touch ../reload.txt')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()", "def _restart(self):\n pass", "def at_server_reload(self):\n self.db.started = True", "def restart_nginx():\n run_command_on_selected_server(_restart_nginx)",...
[ "0.72675717", "0.72130495", "0.704559", "0.668948", "0.66131496", "0.65728164", "0.65231895", "0.6460572", "0.64301753", "0.6418728", "0.6396875", "0.63923776", "0.6356839", "0.63194877", "0.6300053", "0.6296835", "0.6279461", "0.6236136", "0.62225044", "0.62004274", "0.62004...
0.6135069
27
Workaround manage.py migrate complications run syncdb in case it's our first run, so we make sure south_migrationhistory table is created run migrate to apply latest migrations run syncdb again to populate contrib.auth.models
def smart_syncdb_migrate(self): local('python manage.py syncdb') local('python manage.py migrate') local('python manage.py syncdb --all')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')", "def post_migrations(self):", "def migrate(self):\n\tpass", "def syncdb():\n with virtualenv():\n run('python manage.py syncdb --noinput')\n run('python manage.py migrate')", "def update_db():\r\n ...
[ "0.7362598", "0.7175394", "0.6962745", "0.67925906", "0.66853064", "0.665665", "0.66325575", "0.65849835", "0.6555793", "0.65435", "0.6471299", "0.6459678", "0.6422288", "0.6384289", "0.63629246", "0.6273245", "0.615992", "0.61169046", "0.6091286", "0.60912824", "0.60731256",...
0.7330435
1
Takes an image and assigns a speed from 01 based upon it.
def process(img): global start frame = cv2.GaussianBlur(img, (21, 21), 0) fgmask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) fgmask = cv2.absdiff(start, fgmask) avg = max(np.average(fgmask), 10) fgmask = cv2.dilate(fgmask, None, iterations=2) ret, fgmask = cv2.threshold(fgmask, avg, 255, cv2.THRESH_BINARY) image, contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) bigContours = [] for contour in contours: if cv2.contourArea(contour) >= 3000: bigContours.append(contour) ax = 0 ay = 0 for contour in bigContours: moments = cv2.moments(contour) cx = int(moments['m10']/moments['m00']) cy = int(moments['m01']/moments['m00']) ax += cx ay += cy if not bigContours: speed = 0 else: ax /= len(bigContours) ay /= len(bigContours) my, mx, channels = img.shape my /= 2 mx /= 2 dist = math.sqrt((ax - mx)**2 + (ay - my)**2) speed = max(min((mx - dist) / my, 1), 0.1) if speed > 0.8: speed = 1 return speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speed(self, speed: int, time: int = 0, /) -> None:", "def set_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n set_left_speed(speed)\n #time.sleep(.1)\n set_right_speed(speed)", "def set_speed():\n pass", "def speed(self, value: int, /) -> None:", ...
[ "0.6555752", "0.6486258", "0.63528025", "0.63336885", "0.6330112", "0.6309524", "0.6249848", "0.6223644", "0.61646354", "0.615034", "0.6127424", "0.6127424", "0.60513264", "0.60458", "0.6031286", "0.6002849", "0.5983297", "0.5926888", "0.5913099", "0.5911406", "0.5908372", ...
0.0
-1
Ready handler. Import signals.
def ready(self): import roles.signals # pylint: disable=unused-import
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ready(self):\n import main.signals # noqa", "def ready(self):\n\n from . import signals # noqa", "def ready(self):\n import exams.signals # pylint: disable=unused-import", "def ready(self):\n logger.info('game.ready')\n import game.signals", "def ready(self):\n ...
[ "0.85961705", "0.8477948", "0.7967753", "0.795883", "0.77848405", "0.7611131", "0.7388606", "0.7360492", "0.7330842", "0.7245289", "0.72177434", "0.7132327", "0.705162", "0.69547033", "0.691362", "0.6873511", "0.68556416", "0.672121", "0.6704173", "0.66943926", "0.6646311", ...
0.7746457
5
ssum([1,2,3]) 6 ssum([2,3]) 5 ssum([3]) 3 ssum([]) 0
def ssum(L: list) -> int: return 0 if not L else L[0]+ssum(L[1:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def sum_unique(l):\n pass", "def sum_of_squares(seq):\n if len(seq) == 0:\n return 0\n else:\n result = 0\n for num in seq:\n result += num ** 2\n return result", "d...
[ "0.6345257", "0.62525344", "0.6204409", "0.61191237", "0.6101727", "0.6077421", "0.60648704", "0.60515577", "0.60255706", "0.5993455", "0.5983791", "0.5964002", "0.5958428", "0.594942", "0.5904664", "0.5882846", "0.5862508", "0.5862386", "0.5833747", "0.582293", "0.58177805",...
0.7876537
0
factorial(5) > 120 factorial(4) > 24 factorial(3) > 6 factorial(2) > 2 factorial(1) > 1 factorial(0) > 1
def factorial(N: int) -> int: return N*factorial(N-1) if N else 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def factorial(n):\n\n # the code for factorial", "def factorial(n):\n return reduce(mul, range(1, n), 1)", "def calculateFactorials():\n\n ni = []\n ni.append( 295232799039604140847618609643520000000) # 34!\n ITERATIONS = 34\n for n in range( 1, ITERATIONS,1 ) :\n ni.append(math.floor...
[ "0.8139361", "0.79235435", "0.78876895", "0.7874468", "0.7866474", "0.784772", "0.7841788", "0.7832985", "0.7827345", "0.7794977", "0.7794691", "0.7788781", "0.7775511", "0.7756817", "0.77249163", "0.77127343", "0.76884425", "0.7687114", "0.7661024", "0.76508266", "0.7629169"...
0.74416447
44
hello hell hel he h
def pars_str(stroka: str) -> None: print(stroka) return pars_str(stroka[:-1]) if stroka else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shout(word):\n print(word+\"!\")", "def middle(word):\n return word[1:-1]", "def think(s):", "def middle(word):\n\treturn word[1:-1]", "def gibberish(*args):\n \n # Initialize an empty string: hodgepodge\n hodgepodge = ''\n\n # Concatenate the strings in args\n for word in args:\n ...
[ "0.58079946", "0.5710749", "0.56786484", "0.5669718", "0.56644154", "0.5633301", "0.56264555", "0.55352926", "0.55349237", "0.55043215", "0.5499545", "0.5480967", "0.54765", "0.54765", "0.54730135", "0.54675144", "0.5462265", "0.54491264", "0.5444947", "0.5444947", "0.5444947...
0.0
-1
print_stars(5) \n\n\n\n\n print_stars(4) \n\n\n\n print_stars(3) \n\n\n print_stars(2) \n\n print_stars(1) \n print_stars(0) ''
def print_stars(N: int) -> str: # if N: # return f'*\n{print_stars(N-1)}' # return '' return '' if not N else f'*\n{print_stars(N-1)}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')", "def star():\n print('*', end='')", "def starry_box(phrase):\n numStars = len(phrase) + 4\n print '*' * numStars\n print '*', phrase, '*'\n print '*' * num...
[ "0.78595215", "0.7326586", "0.6608579", "0.66084236", "0.64868563", "0.6461798", "0.62855875", "0.6244473", "0.6224193", "0.621459", "0.61932653", "0.61023444", "0.6071811", "0.59977406", "0.5921401", "0.5901315", "0.58597124", "0.5856981", "0.58483934", "0.5816308", "0.57875...
0.8280417
0
Mutual Information by Entropy norm between specific items.
def mmi_norm(self, x, y, tuples): P_ = {x: self.P(x, tuples), y: self.P(y, tuples)} P_xy = self.condP(x, y, tuples) return - P_[x] * log2(P_[x]) - P_[y] * (-P_xy * log2(P_xy))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized_mutual_information(cl: np.ndarray, org: np.ndarray):\n assert cl.shape == org.shape\n\n return mutual_info_score(org, cl) / (abs(entropy(cl) + entropy(org)) / 2)", "def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n ...
[ "0.6674753", "0.61827624", "0.60942334", "0.6003122", "0.5959863", "0.5958628", "0.5811477", "0.57843643", "0.569512", "0.5683817", "0.56066775", "0.55940205", "0.5578646", "0.557442", "0.5545874", "0.55332446", "0.55060184", "0.55060184", "0.5491687", "0.5481766", "0.5452757...
0.5009674
49
Mutual Information by Entropy norm between specific item and the D_k text sample.
def cmi_norm(self, query, tuples): P_, vocab, H_D = self.entropy_norm(tuples) I_D = [] # I(D; q) = H(D) - H(D|q) I_as = H_D # I(D; q1, q2,...,qn) = H(D) - H(D|q1) - ... - H(D|qn) for q in query: P_wq = odict({w + '|' + q: self.condP(w, q, tuples) for w in vocab}) H_Dq = -P_[q] * sum(P_wq[pwq] * log2(P_wq[pwq]) for pwq in P_wq.keys()) I_D.append((q, H_D - H_Dq)) I_as -= H_Dq # TODO: define a,b,c for giving negative reward # if not (mmi_norm(a,b, tuples) > mmi_norm(b,c, tuples) > mmi_norm(a,c, tuples)): # I_as = self.zeta return I_D, I_as
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized_mutual_information(cl: np.ndarray, org: np.ndarray):\n assert cl.shape == org.shape\n\n return mutual_info_score(org, cl) / (abs(entropy(cl) + entropy(org)) / 2)", "def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n ...
[ "0.5616423", "0.5585292", "0.5472835", "0.53419083", "0.5322406", "0.5318265", "0.5269511", "0.526485", "0.5195288", "0.5182691", "0.51384085", "0.5133426", "0.5114074", "0.5107224", "0.5082305", "0.50695926", "0.5055395", "0.50359356", "0.5018013", "0.5010474", "0.50092155",...
0.51792824
10
a RPA that adds a word to Anki
def add_word(word, option, scraped_info, t_sleep=2.75): subprocess.Popen('C:\\Program Files\\Anki\\anki.exe') # opening the anki program time.sleep(t_sleep+5) focus_screen() time.sleep(t_sleep) pyautogui.hotkey('a') # opening the add window - in the front area n_example = len(glob.glob(f'./words/{word}/meaning_{option}/example[0-9].txt')) # numbers of examples time.sleep(t_sleep) pyautogui.write(word + '\n') try: # try to write the inflections with open(f'./words/{word}/inflections.txt') as file: # add inflection (if exist) pyautogui.write('Inflections: ' + file.readline() + '\n\n') except FileNotFoundError: # inflections not found, pass pass if scraped_info['searched word']['mp3'] != None: # adding the word pronunciation pyautogui.hotkey('f3') # attach picture/audio/video time.sleep(t_sleep) pyautogui.hotkey('ctrl', 'l') # path insert mode pyautogui.write(os.getcwd() + f'\\words\\{word}') time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.hotkey('alt', 'n') time.sleep(t_sleep) pyautogui.write(f'{word}.mp3') time.sleep(t_sleep) pyautogui.press('enter') for example_number in range(n_example): with open(f'./words/{word}/meaning_{option}/example{example_number}.txt', 'r') as file: pyautogui.write(('\n' if example_number!=0 else '') + f'Example {example_number+1}:' + next(file) + '\n') # write the example pyautogui.hotkey('f3') # attach picture/audio/video time.sleep(t_sleep) pyautogui.hotkey('ctrl', 'l') # path insert mode pyautogui.write(os.getcwd() + f'\\words\\{word}\\meaning_{option}') time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.hotkey('alt', 'n') time.sleep(t_sleep) pyautogui.write(f'example{example_number}.mp3') time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.press('tab') # switch to back with open(f'./words/{word}/meaning_{option}/meaning{option}.txt') as file: pyautogui.write(next(file)) # insert the meaning time.sleep(t_sleep) pyautogui.press('tab') # switch to back with open(f'./words/{word}/meaning_{option}/tag.txt') as file: pyautogui.write(next(file) + ' [CLAC]') # insert the vim time.sleep(t_sleep) pyautogui.press('tab') # switch to back time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.press('esc')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addWord(self, word: str) -> None:\n tmp = self.root\n for i, letter in enumerate(word):\n if letter not in tmp.seq:\n tmp.seq[letter] = Node()\n \n tmp = tmp.seq[letter]\n \n tmp.value = word", "def add_word(self, word):\...
[ "0.6528331", "0.64054376", "0.636892", "0.62993944", "0.6260526", "0.62493396", "0.61003536", "0.61003536", "0.6083645", "0.6070733", "0.6045671", "0.6045019", "0.60449773", "0.603758", "0.6037229", "0.6024666", "0.6018381", "0.5992664", "0.59754694", "0.59717524", "0.5958048...
0.5661711
62
Theano implementation of the Fast Gradient Sign method.
def fgm(x, predictions, y=None, eps=0.3, ord=np.inf, clip_min=None, clip_max=None): assert ord == np.inf, "Theano implementation not available for this norm." if y is None: # Using model predictions as ground truth to avoid label leaking y = T.eq(predictions, T.max(predictions, axis=1, keepdims=True)) y = T.cast(y, utils_th.floatX) y = y / T.sum(y, 1, keepdims=True) # Compute loss loss = utils_th.model_loss(y, predictions, mean=True) # Define gradient of loss wrt input grad = T.grad(loss, x) # Take sign of gradient signed_grad = T.sgn(grad) # Multiply by constant epsilon scaled_signed_grad = eps * signed_grad # Add perturbation to original example to obtain adversarial example adv_x = theano.gradient.disconnected_grad(x + scaled_signed_grad) # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) and (clip_max is not None): adv_x = T.clip(adv_x, clip_min, clip_max) return adv_x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grad_sigmoid(self):\n return self.sigmoid(self.x)*(self.sigmoid(-self.x))\n raise NotImplementedError(\"Sigmoid gradient not implemented\")", "def sigmoid_grad(self, X):\n var=self.sigmoid(X)\n return var*(1-var)", "def sigmoid_grad(z):\n return Sigmoid(z) * (1 - Sigmoid(z))"...
[ "0.62575996", "0.620261", "0.6106974", "0.6093499", "0.60716397", "0.5985662", "0.5984108", "0.59664273", "0.59456444", "0.590908", "0.5791624", "0.574362", "0.5741642", "0.5734987", "0.5703245", "0.56998265", "0.5676139", "0.566065", "0.5639867", "0.5634217", "0.5617749", ...
0.0
-1
Assert that the first (leftmost) protocol value is correctly fetched from the xforwardedheader.
def test_get_protocol_with_more_than_one_value(): request = Mock( headers={"X-Forwarded-Proto": "https,http,http"}, protocol="http", ) expected = "https" protocol = get_browser_protocol(request) assert expected == protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_h2_header_ok(self):\n self.set_frang_config(frang_config=\"http_strict_host_checking true;\")\n client = self.get_client(\"deproxy-1\")\n client.start()\n client.parsing = False\n\n first_headers = [(\":authority\", \"localhost\"), (\":path\", \"/\")]\n second_hea...
[ "0.6413071", "0.6368929", "0.63600814", "0.6126467", "0.6093351", "0.6008764", "0.597977", "0.59638256", "0.5935303", "0.5915244", "0.59103775", "0.588814", "0.58332074", "0.5819531", "0.58032525", "0.57846427", "0.5782095", "0.57428664", "0.5648308", "0.5636194", "0.5635823"...
0.72213775
0
Assert that a dict of k/v's is correctly created when receiving encoded values.
def test_convert_request_arguments_with_encoded_items_to_dict(): arguments = { "key1": [b"value1"], "key2": [b"value2"], "key3": [b"value3"], } expected = { "key1": "value1", "key2": "value2", "key3": "value3", } result = convert_request_to_dict(arguments) assert expected == result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dict(self):\n self.assertValue(\n {'foo': 'foo', 'bar': 43, 'zippy': 'zoo'},\n 'bar: 43 foo: foo zippy: zoo\\n'\n )", "def verifyData(self, expectedDict):\n pass", "def test_key_dict(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n ...
[ "0.7359073", "0.68950987", "0.6765567", "0.6608221", "0.6606146", "0.6555588", "0.64671695", "0.6464478", "0.64596117", "0.6452391", "0.64428294", "0.64428294", "0.64036715", "0.63711655", "0.63386524", "0.63026303", "0.629081", "0.6284405", "0.6279023", "0.6257576", "0.62568...
0.656517
5
Extract metadata like original image name and crop position from the given file name. Change this function to use a different file name pattern.
def get_metadata_from_filename(file_name: str) -> namedtuple: if os.path.isabs(f): file_name = os.path.basename(file_name) original_image_name = file_name.split('-')[0] x_pos = int(file_name.split('.')[-2].split('+')[-2:][0]) Metadata = namedtuple('Metadata', ['original_image_name', 'x_pos']) return Metadata(original_image_name, x_pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images...
[ "0.7071573", "0.66357124", "0.64707863", "0.6266383", "0.61719465", "0.6033898", "0.600292", "0.59774214", "0.59133536", "0.5879702", "0.5877382", "0.5843988", "0.5837125", "0.5832479", "0.58029586", "0.57413375", "0.5720259", "0.57090443", "0.5669945", "0.5668011", "0.564875...
0.6781023
1
Insert the crop represented by file_name into this image.
def insert(self, file_path: str, annot_type: str) -> None: if self._valid_file_name_regex.match(os.path.basename(file_path)) is None: raise ValueError(f'Illegal file name: {os.path.basename(file_path)}') x_pos = get_metadata_from_filename(file_path).x_pos if x_pos in self._x_positions: col = self._cols[x_pos] else: col = Column() self._x_positions.append(x_pos) self._x_positions.sort() col.insert(Crop(file_path, annot_type)) self._cols[x_pos] = col self.n_cols = len(self._cols)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_current(self, image_name):\n # Sets the position of the crop\n self.j ,self.i = 0, 0\n\n # loads the image\n self.image = convert2int(tifffile.imread(image_name)).astype(numpy.float32)\n\n # Computes the number of crops in x and y\n self.ny = numpy.ceil(self.image....
[ "0.60004956", "0.5452988", "0.53952855", "0.53464556", "0.53207326", "0.52505255", "0.5239172", "0.518678", "0.5163597", "0.5147501", "0.5141308", "0.513236", "0.5121384", "0.5045127", "0.5008329", "0.50027025", "0.49967998", "0.4968934", "0.49625248", "0.4922145", "0.4885444...
0.56664383
1
Randomly divide this image into training, validation and test split. The image is divided into three randomly ordered consecutive patches. The fractions of the image going into each patch are given by val_split, test_split, and 1 (val_split + test_split), respectively. Columns in which the patches overlap are removed.
def select_randomly(self, val_split: float, test_split: float) -> {str: int}: def _select(start, n, label) -> int: """ Label all columns in [start, start+n) with label. """ n_selected = 0 for i in range(start, int(start + n)): x = self._x_positions[i] n_selected += self._cols[x].mark_as(label) return n_selected def _remove_overlaps(start, end) -> int: """ Remove unlabelled columns in [start-col_width, end+col_width]. """ start = self._x_positions[start % self.n_cols] end = self._x_positions[int(end) % self.n_cols] n_removed = 0 for x, col in self._cols.items(): if start - self.col_width <= x <= start or end <= x <= end + self.col_width: if col.label is None: n_removed += col.mark_as('ignore') return n_removed def _next_unlabelled_col(x): """ Return index of first unlabelled column after x. """ for i in range(self.n_cols): idx = (x + i) % self.n_cols x_current = self._x_positions[idx] if self._cols[x_current].label is None: return idx # When computing number of columns per split we must take into account # that some columns will be removed, i.e. we want to compute the split # sizes as fraction of the number of actual selected columns, not of # the total number of columns. delta_x = self._x_positions[1] - self._x_positions[0] n_to_remove_per_split = self.col_width / delta_x # * 2 because 2 gaps between 3 splits n_to_keep = self.n_cols - n_to_remove_per_split * 2 n_val = round(n_to_keep * val_split) n_test = round(n_to_keep * test_split) n_train = n_to_keep - n_val - n_test n_selected_crops_per_split = dict.fromkeys(['training', 'validation', 'test', 'ignore'], 0) # Place patches in arbitrary order start = 0 for n, label in random.sample(list(zip([n_train, n_val, n_test], ['training', 'validation', 'test'])), k=3): # Mark patch n_selected_crops_per_split[label] += _select(start, n, label) # Remove columns overlapping this patch n_selected_crops_per_split['ignore'] += _remove_overlaps(start, start + n - 1) # Next patch starts at next unlabelled column start = _next_unlabelled_col(start) return n_selected_crops_per_split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def split_valid_sampling(inpath,\n patch_size, \n train_prop,\n val_prop,\n outpath,\n padding_mode='constant', \n padding_valu...
[ "0.6673594", "0.6663889", "0.6621361", "0.65526426", "0.6512163", "0.6374599", "0.63245976", "0.6266016", "0.62305325", "0.62242997", "0.6173855", "0.61674833", "0.61449915", "0.612918", "0.610995", "0.6105582", "0.6080789", "0.6075082", "0.6059651", "0.6052234", "0.6046871",...
0.0
-1
Label all columns in [start, start+n) with label.
def _select(start, n, label) -> int: n_selected = 0 for i in range(start, int(start + n)): x = self._x_positions[i] n_selected += self._cols[x].mark_as(label) return n_selected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_excel_column_labels(start: str, end: str):\n if not start or not end:\n raise ValueError(\"{0} missing\".format(\"start\" if start is None else \"end\"))\n\n end = end.upper()\n start = start.upper()\n\n _check_start_end_acceptable(start, end)\n\n range_builder = [start]\n\n start...
[ "0.6146362", "0.60743713", "0.6023064", "0.5982423", "0.5977497", "0.5878763", "0.5794926", "0.56516623", "0.5605927", "0.55946416", "0.55929387", "0.5586301", "0.54550844", "0.5445182", "0.5400012", "0.5359969", "0.5359969", "0.5359969", "0.5359969", "0.5342608", "0.53189415...
0.6012854
3
Remove unlabelled columns in [startcol_width, end+col_width].
def _remove_overlaps(start, end) -> int: start = self._x_positions[start % self.n_cols] end = self._x_positions[int(end) % self.n_cols] n_removed = 0 for x, col in self._cols.items(): if start - self.col_width <= x <= start or end <= x <= end + self.col_width: if col.label is None: n_removed += col.mark_as('ignore') return n_removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cols_drop():", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValue...
[ "0.6347648", "0.60383844", "0.5991607", "0.5967662", "0.57150346", "0.56798846", "0.5672258", "0.5656547", "0.56305486", "0.5617258", "0.55419534", "0.5483048", "0.54566985", "0.54525024", "0.5401275", "0.5390643", "0.5390326", "0.5388381", "0.53292286", "0.53068745", "0.5285...
0.67192227
0
Return index of first unlabelled column after x.
def _next_unlabelled_col(x): for i in range(self.n_cols): idx = (x + i) % self.n_cols x_current = self._x_positions[idx] if self._cols[x_current].label is None: return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XToCol(self, x):\r\n \r\n colLeft = 0\r\n numColumns = self.GetColumnCount()\r\n for col in xrange(numColumns):\r\n \r\n if not self.IsColumnShown(col):\r\n continue \r\n\r\n column = self.GetColumn(col)\r\n\r\n if x < (colLeft ...
[ "0.670367", "0.66707695", "0.65949285", "0.63676727", "0.6300657", "0.62851495", "0.6224062", "0.62041897", "0.61869335", "0.6082615", "0.6070392", "0.6063719", "0.60264426", "0.6016226", "0.59535724", "0.59106576", "0.58775485", "0.5842333", "0.5821943", "0.5775763", "0.5773...
0.84793603
0
Return a mapping of column positions and labels.
def get_labels(self) -> {int: str}: return {x: col.label for x, col in self._cols.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_columns(mapping):\n columns = []\n for name, column in mapping.items():\n columns.append(column.label(name))\n return columns", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def column_labels(self):\n return tuple(self._columns.k...
[ "0.73458505", "0.72522795", "0.68872035", "0.68740356", "0.6553106", "0.650619", "0.64447373", "0.6226804", "0.61503714", "0.60560364", "0.6047371", "0.6043159", "0.59742296", "0.5932194", "0.5919516", "0.5908463", "0.5892866", "0.5887391", "0.5864398", "0.5817711", "0.579350...
0.7213927
2
Move all files of this image to the output directories defined by each column's label. Returns number of files moved.
def to_disk(self, dry_run: bool) -> int: file_counter = 0 for k, col in self._cols.items(): self._moved_cols.append(k) file_counter += col.move(dry_run=dry_run) return file_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(...
[ "0.6666721", "0.6293736", "0.61736655", "0.6053043", "0.59070665", "0.5705878", "0.56739455", "0.5666597", "0.5597771", "0.55276024", "0.55109316", "0.5431153", "0.54137915", "0.5404886", "0.5398599", "0.53864944", "0.53838104", "0.5334871", "0.532649", "0.53132355", "0.53118...
0.6105557
3
Undo all former file movements.
def rollback(self) -> None: for k in self._moved_cols: self._cols[k].move_back()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(...
[ "0.7702794", "0.7388811", "0.7248675", "0.71586466", "0.71578836", "0.70705074", "0.69751996", "0.6805957", "0.6794466", "0.6791538", "0.6788477", "0.67617655", "0.6698153", "0.66829187", "0.6664879", "0.66583955", "0.66569996", "0.6571274", "0.65703166", "0.65516204", "0.645...
0.58613473
48
Move the file associated with this crop to the directory path/annot_type, where annot_type is this crop's annotation type.
def move_to(self, path: str) -> None: self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path)) os.rename(self._file_path, self._new_path) self._file_was_moved = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath....
[ "0.6297892", "0.5993658", "0.58924556", "0.58497065", "0.574657", "0.5685253", "0.5554716", "0.544296", "0.5296529", "0.5167809", "0.5167291", "0.51232123", "0.5116003", "0.51118386", "0.5106717", "0.50523245", "0.5047933", "0.50246215", "0.50095254", "0.5005804", "0.49968418...
0.7432932
0
Undo a former file movement by moving the file back to its origin.
def move_back(self) -> None: if self._file_was_moved: os.rename(self._new_path, self._file_path) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')", "def undo():", "def undo_moves(self):...
[ "0.6898709", "0.6796202", "0.67440903", "0.66569364", "0.6606794", "0.6522146", "0.6483597", "0.6466735", "0.6456174", "0.6436866", "0.64312315", "0.6424864", "0.63239264", "0.62896913", "0.62687606", "0.61865735", "0.6161903", "0.614128", "0.6138943", "0.6126032", "0.6055876...
0.7445615
0
Insert the Crop into this column.
def insert(self, item: Crop) -> None: self._content.append(item) self._file_counts[item.annot_type] = self._file_counts.get(item.annot_type, 0) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, file_path: str, annot_type: str) -> None:\n if self._valid_file_name_regex.match(os.path.basename(file_path)) is None:\n raise ValueError(f'Illegal file name: {os.path.basename(file_path)}')\n x_pos = get_metadata_from_filename(file_path).x_pos\n if x_pos in self._x...
[ "0.61796373", "0.55629903", "0.5431215", "0.5284711", "0.5271825", "0.52523685", "0.5205015", "0.51911217", "0.5175209", "0.5126394", "0.51254916", "0.51038134", "0.50434446", "0.49886337", "0.49873215", "0.49750116", "0.49747944", "0.49618477", "0.49073657", "0.48555687", "0...
0.4963357
17
Mark this column with the provided label. Returns number of labelled crops.
def mark_as(self, label: str) -> int: self.label = label return len(self._content) // len(ANNOTATIONS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit(self, label=None):\n self.labels[label] += 1", "def label_index(self, label: Text) -> int:\n count = 0\n for l in self.le.classes_:\n if(l == label):\n return count\n count += 1", "def get_count_by_label(self, label=None):\n if label is N...
[ "0.6160278", "0.6008114", "0.5806169", "0.54976237", "0.5418939", "0.5353223", "0.5351463", "0.5350682", "0.52923065", "0.5270436", "0.52658045", "0.52610487", "0.52374464", "0.5227169", "0.5211575", "0.5198103", "0.51884943", "0.5185271", "0.51499337", "0.5137268", "0.510938...
0.6514316
0
Move all files of this column to the corresponding directory, if this column is not labeled to be ignored. Returns number of files moved.
def move(self, dry_run: bool) -> int: if self.label == 'ignore': return 0 file_counter = 0 for crop in self._content: if not dry_run: crop.move_to(self.label) file_counter += 1 return file_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_disk(self, dry_run: bool) -> int:\n file_counter = 0\n for k, col in self._cols.items():\n self._moved_cols.append(k)\n file_counter += col.move(dry_run=dry_run)\n return file_counter", "def organizeDir(self):\n # Classify every file in dir\n for fi...
[ "0.6540349", "0.6040318", "0.59826165", "0.5923412", "0.5619923", "0.55904424", "0.5581633", "0.54979753", "0.54583585", "0.5416308", "0.53824425", "0.53258795", "0.5291548", "0.52602667", "0.52368546", "0.5232204", "0.52239007", "0.52128977", "0.52018017", "0.5200375", "0.51...
0.6531521
1
Undo all former file movements.
def move_back(self) -> None: if self.label == 'ignore': return for crop in self._content: crop.move_back()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(...
[ "0.7702279", "0.73885363", "0.7248035", "0.7157842", "0.7157292", "0.70697516", "0.69746536", "0.680541", "0.6793342", "0.67908883", "0.67872065", "0.67608637", "0.66969573", "0.66815704", "0.6664292", "0.6657332", "0.6656268", "0.6570457", "0.6570197", "0.6550941", "0.645881...
0.5601747
71
Defines and returns a parser for given command line arguments.
def get_args_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description='Partition the Ecotron-EInsect-2018 dataset into training, validation, and testing sets.', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=__doc__ ) data_group = parser.add_argument_group('Data input') data_group.add_argument( '--images', type=pathlib.Path, required=True, metavar='DIR', help='directory with images' ) data_group.add_argument( '--roots', type=pathlib.Path, required=True, metavar='DIR', help='directory with root masks for given images' ) data_group.add_argument( '--centerlines', type=pathlib.Path, required=True, metavar='DIR', help='directory with center line masks for given images' ) data_group.add_argument( '--radii', type=pathlib.Path, required=True, metavar='DIR', help='directory with radii maps for given images' ) data_group.add_argument( '--sin', type=pathlib.Path, required=True, metavar='DIR', help='directory with sine maps for given images' ) data_group.add_argument( '--cos', type=pathlib.Path, required=True, metavar='DIR', help='directory with cosine maps for given images' ) data_group.add_argument( '--crop-width', type=int, required=True, metavar='INT', help='crop width' ) split_group = parser.add_argument_group('Split control') split_group.add_argument( '--val-split', type=int, required=True, metavar='INT', help='percentage of data going into validation set' ) split_group.add_argument( '--test-split', type=int, required=True, metavar='INT', help='percentage of data going into test set' ) parser.add_argument( '-y', '--yes', action='store_true', help='Assume answer "yes" for all questions' ) parser.add_argument( '--dry-run', action='store_true', help='Only simulate the process, don\'t actually touch anything' ) parser.add_argument( '--vis', action='store_true', help='Create visualization of selection in ./vis (requires matplotlib)' ) parser.add_argument( '-r', '--random-seed', metavar='INT', type=int, help='Seed for the random number generator' ) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser", "def get_parser():\n p = argparse.ArgumentParser(description='such a good program')\n p.add_argument('infile')\n p.add_argument('outfile')\n return p", "...
[ "0.7608149", "0.75529045", "0.7468674", "0.7456568", "0.7440838", "0.7423374", "0.7418182", "0.7364865", "0.7351196", "0.7348984", "0.73461956", "0.7296354", "0.72922224", "0.72769743", "0.7264185", "0.7261001", "0.72606117", "0.7259605", "0.72396505", "0.7237962", "0.7237962...
0.0
-1
Fetch Forex datasets. Fetches the ECB Forex and Coindesk Bitcoin datasets. More info at
def fetch(start=date(2015, 1, 1), end=date.today(), currency_1='USD', currency_2='EUR'): if currency_1 == 'BTC': X = _load_bitcoin(start=start, end=end, currency=currency_2) descr = 'BTC-' + str(currency_2) elif currency_2 == 'BTC': X = _load_bitcoin(start=start, end=end, currency=currency_1) descr = 'BTC-' + str(currency_1) else: X = _load_forex(start=start, end=end, currency_1=currency_1, currency_2=currency_2) descr = str(currency_1) + '-' + str(currency_2) descr = descr + start.strftime('%Y-%m-%d') + '-' + end.strftime('%Y-%m-%d') return Bunch(data=X, target=None, data_test=None, target_test=None, inner_cv=None, outer_cv=None, DESCR=descr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch(tickers: List[str], limit:Optional[int]=None):\n srs = [tck.split(\".\")[1] for tck in tickers]\n resp = requests.get(URL, verify=False)\n if resp.ok:\n df = pd.read_csv(StringIO(resp.text), delimiter=\";\", decimal=\",\")\n else:\n logger.error(f\"Data from {resp.url} not avail...
[ "0.63306177", "0.6272984", "0.62559444", "0.62323636", "0.6168453", "0.6152495", "0.5925996", "0.5868122", "0.58555883", "0.5853049", "0.5850573", "0.58333457", "0.5734732", "0.57281804", "0.57072836", "0.56839794", "0.5660408", "0.56425864", "0.562225", "0.56212765", "0.5619...
0.5893806
7
Create metrics of gauge type for filesystem replica link lag, with the local filesystem name, replication direction, remote array name, remote filesystem name and replication status as labels.
def _replica_links_lag(self): for f in self.fb.get_filesystem_replica_links(): self.replica_links_lag.add_metric([f.local_file_system.name, f.direction, f.remote.name, f.remote_file_system.name, f.status], -1 if f.lag is None else f.lag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n us...
[ "0.5539488", "0.5088195", "0.507494", "0.5027533", "0.49304163", "0.4900687", "0.4889911", "0.47771588", "0.47663313", "0.47630692", "0.47085527", "0.46948", "0.46515706", "0.45758998", "0.45718196", "0.4553225", "0.45504344", "0.45424002", "0.453815", "0.45122313", "0.451044...
0.5900812
0
Builds and sends an embed message with new commits information.
async def process_push_hook(push: models.PushHook): repository = push.repository project = push.project commit_str = "commit" if push.total_commits_count == 1 else "commits" # Show link to commit compare if there's more than one commit if push.total_commits_count > 1: embed_url = f"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}" else: embed_url = f"{repository.homepage}/commit/{push.after[:7]}" if push.before == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] New branch created {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) elif push.after == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] Branch deleted {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) # If there are no commits, do not show a message if not push.total_commits_count: return embed = discord.Embed(title=f"[{project.namespace}/{project.name}:{push.branch}] " f"{push.total_commits_count} new {commit_str}", url=embed_url, colour=discord.Colour.blurple()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) embed.description = "" for commit in push.commits: message = commit.message.splitlines()[0] embed.description += f"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\n" print("Sending push message") await send_message(None, embed=embed, avatar_url=push.project.avatar_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self, bot, comm, groups):\n commit_message = self.plugin.get_commit_message()\n bot.reply(comm, u'{user}: {msg}', kwvars={'msg': commit_message})", "def _generate_commit(\n self, msg: Optional[str] = None, author: Optional[str] = None\n ) -> dict:\n if author:\n...
[ "0.5981543", "0.5900061", "0.58865404", "0.5829465", "0.5829347", "0.58225244", "0.5794675", "0.5761677", "0.5699984", "0.56958634", "0.5648896", "0.5571014", "0.5564511", "0.5560195", "0.5509813", "0.550364", "0.54569304", "0.54245734", "0.5418751", "0.5418156", "0.53992987"...
0.6393245
0
Builds and sends an embed message with issues information.
async def process_issue_hook(issue_data): project = issue_data.project issue = issue_data.issue user = issue_data.user description = "" action = "Issue updated" colour = discord.Colour.light_grey() if issue.action == "open": action = "Issue opened" description = issue.description colour = discord.Colour.green() elif issue.action == "close": action = "Issue closed" colour = discord.Colour.dark_grey() embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: #{issue.iid} {issue.title}" , url=issue.url, description=description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def issue(msg: telebot.types.Message) -> None:\n data = msg.text.split()\n if len(data) == 1:\n bot.send_message(\n msg.from_user.id,\n 'Use this command to tell the developer about an issue. '\n 'Example usage: `/issue I got 4 in a row but game did not end.`',\n ...
[ "0.6676813", "0.63654536", "0.6173436", "0.59853166", "0.5810283", "0.57947206", "0.57469696", "0.57408506", "0.56667405", "0.56306005", "0.5611144", "0.5595954", "0.5525713", "0.55207026", "0.55166113", "0.5467382", "0.54175764", "0.5404614", "0.53625447", "0.5345686", "0.53...
0.5929863
4
Builds and sends an embed message with notes information.
async def process_note_hook(data: models.NoteHook): note = data.note user = data.user project = data.project colour = discord.Colour.greyple() embed = discord.Embed(url=note.url, description=note.description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) if data.issue: issue = data.issue embed.title = f"[{project.namespace}/{project.name}] New comment on issue #{issue.iid}: {issue.title}" if data.commit: commit = data.commit embed.title = f"[{project.namespace}/{project.name}] New comment on commit `{commit.id[:7]}`" if data.merge_request: merge = data.merge_request embed.title = f"[{project.namespace}/{project.name}] New comment on merge request !{merge.iid}: {merge.title}" await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed():", "async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=i...
[ "0.653712", "0.6195667", "0.5904277", "0.59017", "0.58894485", "0.58441114", "0.5828449", "0.5747832", "0.5732143", "0.57031065", "0.5664874", "0.56358755", "0.55843145", "0.5578333", "0.55621606", "0.5557521", "0.55531627", "0.5505942", "0.54942673", "0.54742163", "0.5460713...
0.63638
1
Builds and sends an embed message with merge request information.
async def process_merge_request_hook(data: models.MergeRequestHook): project = data.project merge = data.merge_request user = data.user description = "" action = "Issue updated" colour = discord.Colour.light_grey() if merge.action == "open": action = "Merge request opened" description = merge.description colour = discord.Colour.dark_green() elif merge.action == "close": action = "Merge request closed" colour = discord.Colour.dark_grey() embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: !{merge.iid} {merge.title}", url=merge.url, description=description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) embed.set_footer(text=f"{merge.source_branch} → {merge.target_branch}") await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "def build_embed(s...
[ "0.5688839", "0.51731527", "0.514443", "0.5102154", "0.50153357", "0.5012565", "0.4874328", "0.48101324", "0.47793704", "0.46545884", "0.46417007", "0.46417007", "0.46417007", "0.4641089", "0.46038324", "0.45967078", "0.45881125", "0.4585066", "0.45759517", "0.4558107", "0.45...
0.6398354
0
Test of function that open another window
def credits_window(): credits = tk.Toplevel() credits_lbl = tk.Label(credits, text='Software Developed By Allan / SpideyKeiiti\n' 'Made for Prototype purposes for Streets Of Rage Remake Community!') credits_lbl.pack()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_window_loaded(self):", "def test_openDialog_pass(self):\n self.run_script(\"\"\"\n foo.openDialog(\"foo\")\n foo.openDialog(\"chrome://foo/bar\")\n \"\"\")\n self.assert_silent()", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(sel...
[ "0.70974696", "0.703137", "0.68839926", "0.6804897", "0.6764545", "0.6681683", "0.65590286", "0.6494494", "0.6479063", "0.64303726", "0.61622506", "0.6118606", "0.60861903", "0.60753334", "0.60275924", "0.6021859", "0.6018818", "0.6013958", "0.6012709", "0.6001387", "0.599817...
0.0
-1
Function that represents the window which Character Mods can be applied.
def chars_window(): path_dir = r'Sor_Mods_Storage\chars' char_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen chars = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(chars, image=mainTitleImg) title = tk.Label(chars, text="Characters Mods") comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys())) def apply_char_mod(): char_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars') value = f'Character Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def _get_window_width(self):", "def modifiers_coding_map_creator(self):\n self.mapCreatorWindow = map_creator.ModifiersMapCreatorWindow()...
[ "0.60196453", "0.5610308", "0.5482973", "0.5434321", "0.5421594", "0.5412486", "0.5364167", "0.5364138", "0.5340363", "0.5302406", "0.530197", "0.5284308", "0.5282597", "0.5280957", "0.52492845", "0.52104515", "0.517051", "0.5156331", "0.5152639", "0.51405567", "0.51250046", ...
0.65728295
0
Function that represents the window which Enemy Mods can be applied.
def enemy_window(): path_dir = r'Sor_Mods_Storage\enemies' enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen enemies = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(enemies, image=mainTitleImg) title = tk.Label(enemies, text="Enemies Mods") comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys())) def apply_enemy_mod(): char_selected = comboBox_enemies.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies') value = f'Enemy Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod) title.grid(row=0, column=0) comboBox_enemies.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def maya_window():\n return to_qwidget(\"MayaWindow\")", ...
[ "0.64761674", "0.63803315", "0.6314744", "0.6216543", "0.62010247", "0.6149478", "0.60766095", "0.60618967", "0.6037659", "0.60210836", "0.60210836", "0.600711", "0.6003741", "0.5992173", "0.59492654", "0.58889663", "0.58503634", "0.57341456", "0.56976503", "0.56692207", "0.5...
0.678502
0
Function that represents the window which Enemy Mods can be applied.
def pallete_window(): path_dir = r'Sor_Mods_Storage\palletes' char_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen palletes = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(palletes, image=mainTitleImg) title = tk.Label(palletes, text="Pallete Mods") comboBox_palletes = ttk.Combobox(palletes, values=list(char_mods_dict.keys())) def apply_pallete_mod(): pallete_selected = comboBox_palletes.get() result_window = tk.Toplevel() value = '' if pallete_selected == '': value = f'{value} Please Select an Pallete to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=pallete_selected, type='palletes') value = f'Enemy Mod {pallete_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(palletes, text='Apply', command=apply_pallete_mod) title.grid(row=0, column=0) comboBox_palletes.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemie...
[ "0.67854697", "0.6475978", "0.63797003", "0.63149863", "0.6215324", "0.62009335", "0.6149193", "0.60768974", "0.60621846", "0.6039159", "0.6020179", "0.6020179", "0.6007678", "0.60024905", "0.599055", "0.5948242", "0.58887446", "0.585083", "0.5733424", "0.56965476", "0.567042...
0.56174505
26
Function that represents the window which Stage Mods can be applied.
def stage_window(): path_dir = r'Sor_Mods_Storage\stages' stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen stages = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(stages, image=mainTitleImg) title = tk.Label(stages, text="Stage Mods") comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys())) def apply_stage_mod(): stage_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if stage_selected == '': value = f'{value} Please Select an Stage Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages') value = f'Enemy Mod {stage_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def GetWindow(self):\r\n\r\n return self.window", "def window(self):\n return self._window", "def window(self):\n return self._window", "def showWindow(*args, ...
[ "0.6928395", "0.684634", "0.6601684", "0.6564466", "0.6564466", "0.64463425", "0.6378235", "0.6353312", "0.6351047", "0.629362", "0.62887776", "0.6145612", "0.6136457", "0.6110733", "0.60973674", "0.6074384", "0.6053072", "0.6018539", "0.59742665", "0.5963318", "0.59591293", ...
0.6935589
0
takes list of files as parameter, prints out readible version Fn strips each file by line, gets rid of duplicates, then splits the line into usable chunks before printing out the usable version
def turn_files_into_pretty_text(text_files): list_of_all_lines = [] for item in text_files: for line in item: line = line.rstrip() if line not in list_of_all_lines: list_of_all_lines.append(line) for item in list_of_all_lines: words = item.split('|') melon = words[0] count = words[1] amount = words[2] print "Delivered {} {}s for total of ${}".format(count, melon, amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findDuplicateReleaseFiles(self, initialList, workingTowerName, newInfix):\n Release_Tower_Name = self.getReleaseVersion(workingTowerName, newInfix)\n Duplicate_List = []\n for fname in initialList:\n prefixStream, postfixStream = string.split(fname, workingTowerName)\n ...
[ "0.5725787", "0.5708584", "0.5698696", "0.5599208", "0.5580545", "0.5571598", "0.5569876", "0.5565192", "0.5528212", "0.55211085", "0.551443", "0.55045366", "0.54886717", "0.54601747", "0.5417886", "0.54174167", "0.5414416", "0.54129356", "0.5406018", "0.53970194", "0.5385436...
0.52034676
43
Description Handle missing values by replacing them with either the default value or the mean/min/max value (for nontext columns only). An indicator column can optionally be concatenated, if theinput column type is numeric.
def transforms_missingvaluehandler( column, data, output_data=None, model=None, replace_with='Def', impute_by_slot=True, concat=True, **params): entrypoint_name = 'Transforms.MissingValueHandler' inputs = {} outputs = {} if column is not None: inputs['Column'] = try_set( obj=column, none_acceptable=False, is_of_type=list, is_column=True) if data is not None: inputs['Data'] = try_set( obj=data, none_acceptable=False, is_of_type=str) if replace_with is not None: inputs['ReplaceWith'] = try_set( obj=replace_with, none_acceptable=True, is_of_type=str, values=[ 'DefaultValue', 'Mean', 'Minimum', 'Maximum']) if impute_by_slot is not None: inputs['ImputeBySlot'] = try_set( obj=impute_by_slot, none_acceptable=True, is_of_type=bool) if concat is not None: inputs['Concat'] = try_set( obj=concat, none_acceptable=True, is_of_type=bool) if output_data is not None: outputs['OutputData'] = try_set( obj=output_data, none_acceptable=False, is_of_type=str) if model is not None: outputs['Model'] = try_set( obj=model, none_acceptable=False, is_of_type=str) input_variables = { x for x in unlist(inputs.values()) if isinstance(x, str) and x.startswith("$")} output_variables = { x for x in unlist(outputs.values()) if isinstance(x, str) and x.startswith("$")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables) return entrypoint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_missing(df, col, name, na_dict):\n if is_numeric_dtype(col):\n if pd.isnull(col).sum() or (name in na_dict):\n df[name+'_na'] = pd.isnull(col)\n filler = na_dict[name] if name in na_dict else col.median()\n df[name] = col.fillna(filler)\n na_dict[name] ...
[ "0.58770204", "0.57689667", "0.5714844", "0.5643517", "0.56218046", "0.5612133", "0.5571508", "0.552125", "0.55067784", "0.5474215", "0.5456898", "0.54213846", "0.53971267", "0.53654104", "0.5360555", "0.53490347", "0.53490347", "0.5342924", "0.53326184", "0.53209394", "0.530...
0.54455906
11
Append shore abbreviation to the base reference.
def reference(self): licence = self.context if IEnvironmentBase.providedBy(licence): return licence.reference to_shore = queryAdapter(licence, IShore) ref = '{} {}'.format(licence.reference, to_shore.display()) return ref
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_abbr(over_abbreviated, street_name):\n m = over_abbr_re.search(street_name)\n if m:\n abbr = m.group()\n over_abbreviated[abbr].add(street_name)", "def expand_abbreviation(abbr, doc_type = 'html', profile_name = 'plain'):\n\ttree = parse_into_tree(abbr, doc_type)\n\tif tree:\n\t\tre...
[ "0.59830004", "0.5703842", "0.5681109", "0.5655727", "0.5474441", "0.54697645", "0.5435609", "0.5420919", "0.53656024", "0.53300464", "0.5218761", "0.5197725", "0.51889247", "0.51828516", "0.51511014", "0.5144388", "0.5134165", "0.5127529", "0.51188904", "0.51158106", "0.5102...
0.4860687
45
if the intent name exists, then return without action, otherwise create a blank intent
def create_intent(intent_name): try: response=client.get_intent( name=intent_name, version="$LATEST" ) print "There is a %s intent in your account, please consider delete it or using another name" %(intent_name) return except: pass response=client.put_intent( name=intent_name, description='the demo intent', sampleUtterances=[ 'Can I book a hotel', ], confirmationPrompt={ 'messages': [ { 'contentType': 'PlainText', 'content': 'Your hotel booking is ready, do you want to place an order?' }, ], 'maxAttempts': 2, }, rejectionStatement={ 'messages': [ { 'contentType': 'PlainText' , 'content': 'Ok. I will discard the hotel booking information' }, ], }, conclusionStatement={ 'messages': [ { 'contentType': 'PlainText', 'content': 'Your hotel booking has been confirmed' }, ], }, fulfillmentActivity={ 'type': 'ReturnIntent' } ) print "Intent %s created successfully" %(intent_name) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_intent(self, intent_name):\n for name, intent in self:\n if name == intent_name:\n return intent\n else:\n return None", "def exists_intent_action(self, intent_keyword):\n pass", "def get_intent_action(self, intent_keyword):\n pass", "d...
[ "0.67302954", "0.6667681", "0.6538136", "0.6383118", "0.61762303", "0.59395283", "0.58650994", "0.58650994", "0.58105075", "0.5734404", "0.5690178", "0.56890965", "0.5672978", "0.5672482", "0.5650137", "0.56120867", "0.55995744", "0.55600524", "0.5517704", "0.54916435", "0.54...
0.62379736
4
delete the specified intentfrom your account.
def delete_intent(intent_name): try: client.get_intent( name=intent_name, versionOrAlias='$LATEST' ) answer=raw_input("Do you want to delete %s from your account(Y/y for YES, other NO):" %(intent_name)) if answer in ['Y', 'y']: client.delete_intent( name=intent_name ) print "You chose to delete the intent %s, deleted..." %(intent_name) else: print "You chose not to delete the inten t%s, exiting..." %(intent_name) except: print "There is no intent called %s, exiting..." %(intent_name) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_account(self, account):\n \n pass", "def delete(self, args):\n try:\n db = get_db('intents')\n intents = db.delete_intent(args['intent'])\n resp = jsonify(intents=intents)\n resp.status_code = 200\n return resp\n except...
[ "0.7136622", "0.7109074", "0.6793135", "0.6618114", "0.6454273", "0.6342631", "0.63189256", "0.62924826", "0.6259348", "0.6142261", "0.6112738", "0.60232806", "0.5955118", "0.59354204", "0.5897123", "0.58737737", "0.5854009", "0.5853782", "0.5827274", "0.5824352", "0.57835615...
0.7211831
0
demo function to get the intent's latest configuration
def get_intent_configuration(intent_name, version ="$LATEST"): response=client.get_intent( name=intent_name, version=version ) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config():\n return CONFIG", "def getConfig(self):\n pass", "def config(self) -> \"AutomationConfig\":", "def config(self) -> \"AutomationConfig\":", "def get_config(self,config):\n return self.parser.get(\"main\", config)", "def get_details(self):\n return self.__config_da...
[ "0.6338923", "0.6251342", "0.61532223", "0.61532223", "0.6144978", "0.60848004", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", ...
0.73107255
0
a help function to print the intentinformation in format
def format_print_jobs(intent): print "\nintentName: %s" %(intent['name']) for k,v in intent.iteritems(): if k <> 'name': print "\t" + str(k) + ": " + str(v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printhelp():", "def info(self):", "def info(self):", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\...
[ "0.64891857", "0.64813703", "0.64813703", "0.6405768", "0.6331518", "0.6306247", "0.62967855", "0.6288361", "0.6245361", "0.6203289", "0.61881757", "0.617994", "0.6178175", "0.6174316", "0.6146131", "0.614331", "0.6139111", "0.61244285", "0.6111058", "0.60988563", "0.60816693...
0.6709204
0
Initializes the Crawler and decides which action to take based on the mode.
def __init__(self, restUrl: str, mode: CrawlMode = CrawlMode.NO, loginUrl: str = None, loginName: str = None, loginPW: str = None, furtherparams: str = None, workers: int = 10, mongoDB: Database = None, foldername: str = None, bugList: Union[List, str] = None) -> None: self.session = requests.session() self.workers = workers if loginUrl: #bugzilla user data user = loginName pw = loginPW #login process loginURL = loginUrl self.session.post(loginURL, {'Bugzilla_login': user, 'Bugzilla_password': pw}) #checks for the right ending of restUrl if restUrl[-1] != '/': restUrl += '/' #prepares URLs for crawling of bugs and comments self.bugURL = restUrl + 'bug?limit=500' + furtherparams self.commentURL = restUrl + 'bug/{}/comment' #database if given one self.mongoDB = mongoDB #foldername if given one self.folder = foldername if foldername: #creates directory self.createFolder(foldername) self.folderpath = foldername + '/' #checks on which crawl operation to execute self.decide_action(mode, bugList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_modes(self):\n self._verify_not_using_threaded_mpm()\n\n self._init_screenshot_mode()\n self._init_debug_mode()\n self._init_webapi_cors_header()\n self.init_theme()", "def decide_action(self, mode: CrawlMode = CrawlMode.NO, bugList: Union[List, str] = None) -> None:\n...
[ "0.6156874", "0.5882419", "0.5669092", "0.5625389", "0.56192595", "0.5612265", "0.55834305", "0.5568586", "0.55475867", "0.55300885", "0.5503639", "0.54796416", "0.54796416", "0.5456761", "0.53957784", "0.5347344", "0.53471684", "0.5346393", "0.53363496", "0.53115886", "0.530...
0.5355751
15
Decides which action to start depending on the mode.
def decide_action(self, mode: CrawlMode = CrawlMode.NO, bugList: Union[List, str] = None) -> None: # checks on which crawl operation to execute if mode == CrawlMode.BUG: self.get_all_bugs() elif mode == CrawlMode.COMMENT: if bugList: self.get_all_comments(bugList) else: print('Error: No buglist to be found. Please check your params and start again.') return elif mode == CrawlMode.BOTH: bugIDList = self.get_all_bugs() self.get_all_comments(bugIDList) elif mode == CrawlMode.CFAST: self.get_all_comments_mp(bugList, self.workers) elif mode == CrawlMode.BFAST: bugsIDList = self.get_all_bugs() self.get_all_comments_mp(bugsIDList, self.workers) else: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_action(self):\r\n pass", "def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()", "d...
[ "0.67960316", "0.66079754", "0.63345975", "0.61656135", "0.6149419", "0.61217403", "0.6095867", "0.6050132", "0.60292625", "0.60050106", "0.5970908", "0.5939976", "0.592196", "0.59155333", "0.5853063", "0.58509487", "0.58477354", "0.5835604", "0.5818035", "0.5796116", "0.5762...
0.59804595
10
Crawls all requested bug data and bug ids. Saves them in files (bugIDListP.pickle, bugIDList.csv, bugsData.txt ) and/or Mongo DB collections (BugIDs, BugsData) depending if they are given at initialization.
def get_all_bugs(self) -> List: #starting point offset = 0 #list for all bugs resultBugList = [] #list for bug IDs bugIDList = [] #checks if there are still results returned notEmpty = True #queries in 500 bug steps until the result list is empty while notEmpty: print("entered") #interpretation of result as list plus formatting for eval errors result = ast.literal_eval(self.session.get(self.bugURL + "&offset=" + str(offset)).text. replace('true', 'True').replace('false', 'False').replace('null', 'None'))["bugs"] #checks if the query needs to be set again with a new offset if result: resultBugList += result else: notEmpty = False #gets the ID out of all comments partList = [bug["id"] for bug in result] bugIDList += partList #sets new starting point offset += 500 #inserts bug ids and bugs into db if given one if self.mongoDB: for id in bugIDList: self.mongoDB["BugIDs"].insert_one({"ID": id}) self.mongoDB["BugsData"].insert_many(resultBugList) #creates files for bug ids and bugs if given a folder if self.folder: #saves bug list as python object with open(self.folderpath + "bugIDListP.pickle", "wb") as a: pickle.dump(bugIDList, a) #saves bug list as csv with open(self.folderpath + "bugIDList.csv", "w") as b: for id in bugIDList: b.write(str(id) + "\n") with open(self.folderpath + "bugsData.txt", "w") as c: for bug in resultBugList: c.write(str(bug) + "\n") #returns List Object for further processing return(bugIDList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) ==...
[ "0.6098931", "0.581118", "0.58095926", "0.5599205", "0.55810404", "0.55514055", "0.5436911", "0.5393213", "0.5392565", "0.52618045", "0.5231558", "0.52075624", "0.519109", "0.514051", "0.5135371", "0.5125481", "0.5122285", "0.50928766", "0.50890225", "0.50798535", "0.50633335...
0.7346313
0
Crawls for all comments belonging to the bugs in the BugIDList.
def get_all_comments(self, idList: Union[List, str]) -> None: #loads pickle list if it is one if type(idList) == str and ".pickle" in idList: print("pickle load") with open(idList, "rb") as f: idList = pickle.load(f) elif type(idList) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #goes through idList for id in tqdm(idList): #performs request and replaces trouble some parts commentsString = self.session.get(self.commentURL.format(id)).text.\ replace('true', 'True').replace('false', 'False').replace('null', 'None') #gets only the comments commentsDict = ast.literal_eval(commentsString)["bugs"][str(id)]["comments"] #enters comments into db or file if there are any comments for the id if commentsDict: if self.mongoDB: self.mongoDB["Comments"].insert_many(commentsDict) if self.folder: with open(self.folderpath + "Bugzilla_Comments.txt", 'a') as f: f.write(str(commentsDict) + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result...
[ "0.64830816", "0.6414829", "0.64109683", "0.6044062", "0.5997073", "0.5927429", "0.5870754", "0.58518946", "0.57999045", "0.5750483", "0.5748707", "0.5685098", "0.5681446", "0.5678411", "0.5673924", "0.56592536", "0.5652936", "0.5610223", "0.5589951", "0.55766493", "0.5568257...
0.7094204
0
Crawls for all comments belonging to the bugs in the BugIDList utilizing parallelization.
def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None: # loads pickle list if it is one if type(list) == str and ".pickle" in list: print("wat") with open(list, "rb") as f: list = pickle.load(f) elif type(list) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #gets workers and splits list into chunks fitting the worker amount pool = Pool(workers) list = np.array(list) lists = np.array_split(list, workers) #each worker crawls for comments for sub_list in lists: print(sub_list) pool.apply_async(self.get_all_comments, (sub_list,)) pool.close() pool.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) ==...
[ "0.6642331", "0.6419399", "0.6326556", "0.590753", "0.58366686", "0.5725965", "0.56005704", "0.55982554", "0.55817443", "0.5574108", "0.5573544", "0.55464286", "0.5534647", "0.55332947", "0.55286044", "0.5525885", "0.55153495", "0.54998296", "0.549831", "0.5463732", "0.544268...
0.64263386
1
Creates a directory if it doesn't exist already
def createFolder(self, foldername: str) -> None: try: if not os.path.exists(foldername): os.makedirs(foldername) except OSError: print('Error: Creating following directory: ' + foldername)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return", "def create_directory(dir_path):\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_...
[ "0.84880376", "0.84691334", "0.8421431", "0.8365199", "0.83478045", "0.83478045", "0.83265615", "0.8294003", "0.8283774", "0.82814693", "0.8278838", "0.8278838", "0.82774234", "0.827556", "0.8255593", "0.824047", "0.82380694", "0.82136655", "0.82106537", "0.8207876", "0.82056...
0.0
-1
download sequencing file from SRA archive requires local install of SRA tools in path requires verification of filenames and paths
def download_SRA(SRA): print("Downloading SRA archive") output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT) print("Extracting FASTQ data") output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_sra_files(remote_location, local_location = '', max_recursion = 3, verbose = False):\n\n downloaded_files = list();\n\n def printv(*args):\n if(verbose):\n print(*args);\n sys.stdout.flush();\n\n printv(\"Reading folder: \", remote_location);\n\n req = urllib2....
[ "0.6527539", "0.63202286", "0.6183208", "0.61473656", "0.6141469", "0.5976712", "0.5897661", "0.58915883", "0.58283", "0.5815497", "0.5812849", "0.58102864", "0.58102864", "0.5790022", "0.57642645", "0.5758532", "0.57328504", "0.57257515", "0.57255936", "0.5705985", "0.570499...
0.77889556
0
put path to indices / pass paths as arg, e.g. STAR_DIR
def build_indices(genome_fasta, genome_gtf, rRNA_fasta, transcriptome_fasta): if not os.path.exists("data/indices"): os.mkdir("data/indices") # 1. Bowtie index print("Building Bowtie index") if not os.path.exists(BOWTIE_DIR): os.mkdir(BOWTIE_DIR) cmd_bowtie = 'bowtie-build' + ' ' + genome_fasta + ' ' + BOWTIE_DIR+'/yeast' output = subprocess.run(cmd_bowtie, shell=True) cmd_rRNA = 'bowtie-build' + ' ' + rRNA_fasta + ' ' + BOWTIE_DIR+'/rRNA' output = subprocess.run(cmd_rRNA, shell=True) # 2. STAR index print("Building STAR index") if not os.path.exists(STAR_DIR): os.mkdir(STAR_DIR) cmd_STAR = 'STAR' + ' ' + '--runThreadN' + ' ' + '4' + ' ' + '--runMode' + ' ' + 'genomeGenerate' + ' ' + '--genomeDir' + ' ' + STAR_DIR + ' ' + '--genomeFastaFiles' + ' ' + genome_fasta + ' ' + '--sjdbGTFfile' + ' ' + genome_gtf #+ ' ' + '--sjdbOverhang' + ' ' + 'max(ReadLength)-1' output = subprocess.run(cmd_STAR, shell=True) # run build transcriptome fasta. if not os.path.exists(STAR_TRANSCRIPTOME_DIR): os.mkdir(STAR_TRANSCRIPTOME_DIR) cmd_STAR = 'STAR' + ' ' + '--runThreadN' + ' ' + '4' + ' ' + '--runMode' + ' ' + 'genomeGenerate' + ' ' + '--genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--genomeFastaFiles' + ' ' + transcriptome_fasta # + ' ' + '--sjdbGTFfile' + ' ' + genome_gtf #+ ' ' + '--sjdbOverhang' + ' ' + 'max(ReadLength)-1' output = subprocess.run(cmd_STAR, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indexes_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(dataset_path(dataset, work_dir), consts.INDEXES_DIR)", "def index_STAR(args):\n\n # make STAR index folder for merged path\n merged_STAR_watson_index = os.path.join(args.output_dir,'STAR_merged_watson')\n merged_STAR_crick_inde...
[ "0.6537887", "0.63956136", "0.6359709", "0.60461843", "0.58392173", "0.56591266", "0.5648433", "0.5629673", "0.56082153", "0.5570931", "0.5479381", "0.5462276", "0.5429903", "0.54241526", "0.5421316", "0.5394815", "0.5362421", "0.53275514", "0.5321562", "0.5312183", "0.530585...
0.57987577
5
maps reads (bowtie to rRNA for legacy?) to extract ambiguous and uniquely mapped reads
def map_reads(SRA): #1. bowtie to rRNA print("Bowtie alignement on contaminant RNA...") cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam' output = subprocess.run(cmd_bowtie, shell=True) # 2. STAR to ref genome print("STAR alignement to yeast genome...") cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_' output = subprocess.run(cmd_STAR, shell=True) # 3. Samtools keep uniquely mapped reads and sort print("Samtools to keep uniquely mapped reads and sort...") cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam' output = subprocess.run(cmd_samtools1, shell=True) cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' output = subprocess.run(cmd_samtools2, shell=True) cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' output = subprocess.run(cmd_samtools3, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if ...
[ "0.6826506", "0.66215014", "0.6525083", "0.6418822", "0.6416063", "0.631023", "0.6223504", "0.6182403", "0.60843307", "0.6026228", "0.6004919", "0.5979466", "0.5960708", "0.59569067", "0.59227425", "0.59207463", "0.5912195", "0.58296555", "0.5819795", "0.5811446", "0.5797608"...
0.6700885
1
wrapper to run scikitribo from the same pipeline requires local install of modified scikitribo toolbox requires local install of all dependencies of scikitribo environment (see conda environment file)
def run_scikit_ribo(SRA, genome_fasta, genome_gtf): # 3. Scikit-ribo index print("Building scikit-ribo index") if not os.path.exists(SCIKIT_DIR): os.mkdir(SCIKIT_DIR) cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-build.py' + ' ' + '-g' + ' ' + genome_gtf + ' ' + '-f' + ' ' + genome_fasta + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + SCIKIT_DIR output = subprocess.run(cmd_scikit, shell=True) print("scikit-ribo-run.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-run.py' + ' ' + '-i' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + '-f' + ' ' + SCIKIT_DIR + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + ' ' + 'TMP/scikit_'+SRA output = subprocess.run(cmd_scikit, shell=True) print("plot_ribo_density_dict.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'plot_ribo_density_dict_noCDT.py' + ' ' + '-i' + ' ' + TMP_DIR+'scikit_'+SRA+'/riboseq_input.txt' + ' ' + '-g' + ' ' + 'all' + ' ' + '-o' + ' ' + TMP_DIR+'scikit_'+SRA #+'_profiles' output = subprocess.run(cmd_scikit, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transformation_catalog():\n tc = TransformationCatalog()\n\n # Add docker container\n #crisis_container = Container(\n # 'crisis_container',\n # Container.DOCKER,\n # image = \"docker://slnagark/crisis_wf:latest\",\n # arguments=\"--runtime=nvidi...
[ "0.57629657", "0.56087464", "0.54360694", "0.54355013", "0.5393979", "0.53826815", "0.5367081", "0.5317132", "0.52905655", "0.5287661", "0.52496874", "0.5199404", "0.51819116", "0.51798064", "0.51774645", "0.5171329", "0.50645137", "0.50610745", "0.50353134", "0.5028036", "0....
0.6405816
0
identify all reads that map ambigously and their positions
def run_multimapping(SRA): if not os.path.exists("TMP/ambiguous_reads/"): os.mkdir("TMP/ambiguous_reads/") cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_' output = subprocess.run(cmd_STAR, shell=True) # Keep only multi-mapping reads: cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all' output = subprocess.run(cmd_filter, shell=True) cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' output = subprocess.run(cmd_samtools2, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def determine_crossmapped_reads(self, read_alignment_path):\n references_by_species = self._get_references_by_species()\n crossmapped_reads = set()\n done_replicon_comparison = []\n with pysam.AlignmentFile(read_alignment_path) as bam:\n for org, replicon_ids in references_by...
[ "0.6598933", "0.64177066", "0.6393043", "0.6074415", "0.6032629", "0.60148144", "0.57585204", "0.5614244", "0.55744815", "0.55740666", "0.5564409", "0.55503625", "0.5542675", "0.55369735", "0.55324143", "0.55242544", "0.5486904", "0.547165", "0.5459795", "0.54455733", "0.5401...
0.0
-1
consolidate results into dataframes
def parse_dataframes(genome_gtf, sralist): def gather_strand_by_geneID_dict(genome_gtf): """ Returns dictionary with strand orientation as values and geneIDs as Keys/ e.g.: {'YAL012W': '+', 'YAL069W': '+', 'YAL068W-A': '+', """ strand_by_geneID_dict = {} with open(genome_gtf) as f: for line in f: current_line = line.split('\t') if current_line[2] == "CDS": current_orf = current_line[8].split(';')[2].split()[1].strip('\"') current_strand = current_line[6] strand_by_geneID_dict[current_orf] = current_strand return strand_by_geneID_dict def import_scikit_data(sralist): """ Import results from scikit pipeline for all datasets contained in datsets_names. """ scikit_data_dict = {} for dataset in sralist: with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data: scikit_data_dict[dataset] = [json.load(scikit_data)] return scikit_data_dict def build_mat_scikit_strandOriented(sralist, scikit_data): """ Building of scikit_df based on the output of plot_ribo_density_dict.py script. C/-/reverse/complementary strand are taken into account and the profile values ("codon_density_profile", "codon_triplet", "codon_AA") are reversed. This is performed by adding [::-1] to C strands profile ends. Same profile values are also have their extremities trimmed out of 8 codons. (This is because the scikit-ribo pipeline considers 8 extra codons on each end, but here we are only interested in the coding sequence). This is performed by adding [8:-8] to profile lists ends. """ scikit_mat = {} seq_codons = {} seq_aa = {} for geneID in scikit_data[sralist[0]][0].keys(): for ix, dataset in enumerate(sralist): if geneID in scikit_data[dataset][0].keys(): current_profile = scikit_data[dataset][0].get(geneID, np.nan) current_ribo = current_profile[0] current_ribo = current_ribo[8:-8] N = len(sralist) M = len(current_ribo) print(geneID, M) if ix == 0: current_matrix = np.zeros((N,M)) * np.nan current_seq_codons = current_profile[1] current_seq_codons = current_seq_codons[8:-8] current_seq_aa = current_profile[2] current_seq_aa = current_seq_aa[8:-8] if strand_by_geneID_dict.get(geneID, "NA") == "+": seq_codons[geneID] = current_seq_codons seq_aa[geneID] = current_seq_aa elif strand_by_geneID_dict.get(geneID, "NA") == "-": seq_codons[geneID] = current_seq_codons[::-1] seq_aa[geneID] = current_seq_aa[::-1] if strand_by_geneID_dict.get(geneID, "NA") == "+": current_matrix[ix,:] = current_ribo elif strand_by_geneID_dict.get(geneID, "NA") == "-": current_matrix[ix,:] = current_ribo[::-1] if np.sum(current_matrix) > 0: scikit_mat[geneID] = current_matrix # scikit_df = pd.DataFrame(values_list, columns=columns_list) return scikit_mat, seq_codons, seq_aa def mean_norm(row): codon_dens_prof = row.codon_density_profile profile_average = np.average(codon_dens_prof) return [x/profile_average for x in codon_dens_prof] #scikit_data_df["mean_norm_codon_density_profile"] = scikit_data_df.apply(mean_norm, axis=1) #scikit_data_df["mean_norm_codon_density_profile"] = scikit_data_df['mean_norm_codon_density_profile'].apply(lambda x: x[8:-8]) strand_by_geneID_dict = gather_strand_by_geneID_dict(genome_gtf) scikit_data_dict = import_scikit_data(sralist) scikit_data_mat, seq_codons_dict, seq_aa_dict = build_mat_scikit_strandOriented(sralist, scikit_data_dict) with open('../data/processed/scikit_mat.pkl', 'wb') as f: pickle.dump(scikit_data_mat, f) with open('../data/processed/scikit_codonseq.pkl', 'wb') as f_seq: pickle.dump(seq_codons_dict, f_seq) return scikit_data_mat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def consolidate_results(path='./Data'):\n model_files = [load(os.path.join(p...
[ "0.6732909", "0.66638446", "0.6632721", "0.65633905", "0.65257215", "0.6472688", "0.6428347", "0.63872194", "0.6289251", "0.6240572", "0.6240572", "0.62322944", "0.6207928", "0.61921304", "0.61735207", "0.61727184", "0.6156177", "0.61521775", "0.6060341", "0.6056508", "0.6026...
0.0
-1
Returns dictionary with strand orientation as values and geneIDs as Keys/
def gather_strand_by_geneID_dict(genome_gtf): strand_by_geneID_dict = {} with open(genome_gtf) as f: for line in f: current_line = line.split('\t') if current_line[2] == "CDS": current_orf = current_line[8].split(';')[2].split()[1].strip('\"') current_strand = current_line[6] strand_by_geneID_dict[current_orf] = current_strand return strand_by_geneID_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.spl...
[ "0.6775984", "0.63313013", "0.6185268", "0.6166058", "0.61002773", "0.5993038", "0.5952857", "0.5945249", "0.5915649", "0.59024686", "0.5877856", "0.5875492", "0.5870065", "0.5817717", "0.57851946", "0.57501155", "0.57454574", "0.5728968", "0.57258415", "0.56919414", "0.56362...
0.6580928
1
Import results from scikit pipeline for all datasets contained in datsets_names.
def import_scikit_data(sralist): scikit_data_dict = {} for dataset in sralist: with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data: scikit_data_dict[dataset] = [json.load(scikit_data)] return scikit_data_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_processed_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n path = os.path.join('downloads', name)\n path_processed = os.path.join(path, 'processed')\n\n if name == 'iris':\n return pd.read_csv(os...
[ "0.6478788", "0.6423142", "0.6417792", "0.6409163", "0.64047015", "0.6366228", "0.62911695", "0.62740004", "0.6178644", "0.61630666", "0.6153745", "0.6104244", "0.6079098", "0.6072685", "0.6070125", "0.60272676", "0.60272676", "0.59892446", "0.5962235", "0.59133875", "0.59104...
0.0
-1
Building of scikit_df based on the output of plot_ribo_density_dict.py script. C//reverse/complementary strand are taken into account and the profile values ("codon_density_profile", "codon_triplet", "codon_AA") are reversed. This is
def build_mat_scikit_strandOriented(sralist, scikit_data): scikit_mat = {} seq_codons = {} seq_aa = {} for geneID in scikit_data[sralist[0]][0].keys(): for ix, dataset in enumerate(sralist): if geneID in scikit_data[dataset][0].keys(): current_profile = scikit_data[dataset][0].get(geneID, np.nan) current_ribo = current_profile[0] current_ribo = current_ribo[8:-8] N = len(sralist) M = len(current_ribo) print(geneID, M) if ix == 0: current_matrix = np.zeros((N,M)) * np.nan current_seq_codons = current_profile[1] current_seq_codons = current_seq_codons[8:-8] current_seq_aa = current_profile[2] current_seq_aa = current_seq_aa[8:-8] if strand_by_geneID_dict.get(geneID, "NA") == "+": seq_codons[geneID] = current_seq_codons seq_aa[geneID] = current_seq_aa elif strand_by_geneID_dict.get(geneID, "NA") == "-": seq_codons[geneID] = current_seq_codons[::-1] seq_aa[geneID] = current_seq_aa[::-1] if strand_by_geneID_dict.get(geneID, "NA") == "+": current_matrix[ix,:] = current_ribo elif strand_by_geneID_dict.get(geneID, "NA") == "-": current_matrix[ix,:] = current_ribo[::-1] if np.sum(current_matrix) > 0: scikit_mat[geneID] = current_matrix # scikit_df = pd.DataFrame(values_list, columns=columns_list) return scikit_mat, seq_codons, seq_aa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n super().__init__()\n self.upperBoundUsed = False # True if the distribution is right truncated\n self.lowerBoundUsed = False # True if the distribution is left truncated\n self.hasInfiniteBound = False # True if the untruncated distribution has bounds of +- syst...
[ "0.5600742", "0.5545156", "0.5511175", "0.5406428", "0.5405446", "0.53374326", "0.52538216", "0.52383655", "0.5226638", "0.519781", "0.51927817", "0.51903063", "0.516903", "0.5139694", "0.5136511", "0.5129903", "0.51277244", "0.51252174", "0.50994223", "0.5087249", "0.5086157...
0.0
-1
dictionary of boolean multimapping matrices
def build_mm_df(sralist): def convert_to_codon(nts_array): """ pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution. This function converts nucleotide arrays to codon length (nts to codon resolution): """ nts_array = np.array(nts_array) codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3. return codon_array def compute_mm(mmdata): """ get per gene average multi-mapping score """ mm_df = pd.DataFrame(columns=['ORF', 'MM']) counter = 0 for gene in mmdata.keys(): current_matrix = mmdata[gene] current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] ) mm_df.loc[counter] = [gene, current_avrg] counter += 1 return mm_df mm_mat = {} mm_pct = {} N = len(sralist) for ix, dataset in enumerate(sralist): samfile = pysam.AlignmentFile(TMP_DIR+'/ambiguous_reads/'+dataset+'_STAR_transcriptome_multi_mapped_sorted.bam', 'rb') genes_list = list(samfile.references) print(ix, dataset) for geneID in genes_list: # count the coverage of genomic positions by reads in region. # Returns: four array.arrays of the same length in order A C G T # The coverage is computed per-base [ACGT] cov = samfile.count_coverage(geneID, read_callback='nofilter') # Summ all 4 arrays cov_sum = np.sum(cov, axis=0) #print(geneID, cov_sum) codon_cov = convert_to_codon(cov_sum) codon_bool = np.asarray([1 if i > 0 else 0 for i in codon_cov]) M = len(codon_bool) if ix == 0: mm_mat[geneID] = np.zeros((N,M)) * np.nan current_matrix = mm_mat[geneID] current_matrix[ix,:] = np.copy(codon_bool) mm_mat[geneID] = current_matrix mm_avrg = compute_mm(mm_mat) #mm_avrg.to_json('yeast_mm.json') #mm_avrg.to_csv('yeast_mm.txt', header=True, index=False, sep='\t') mm_profile = {} theta_mm = 5 for orf in mm_mat.keys(): current_mat = mm_mat[orf] current_bool = np.sum(current_mat, 0) <= theta_mm mm_profile[orf] = current_bool with open('../data/processed/mm_consensus.pkl', 'wb') as f_mm: pickle.dump(mm_profile, f_mm) return mm_mat, mm_avrg, mm_profile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dict_of_bool2(self):\n pass", "def eye(m):\n data = dict()\n for i, j in itertools.product(range(m), range(m)):\n data[i, j] = mpfr(i == j)\n return MPMatrix((m, m), data)", "def getMatrixMap(self):\n return self.M_array", "def buildDicts(resComp, comparisons, matrices, ...
[ "0.6008547", "0.559379", "0.5578552", "0.55711734", "0.5570441", "0.5570441", "0.55518544", "0.5523987", "0.5333082", "0.5278061", "0.5247944", "0.5236596", "0.5219008", "0.5200241", "0.5198334", "0.51891565", "0.5130926", "0.5113897", "0.51134247", "0.50944567", "0.5081737",...
0.0
-1
pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.
def convert_to_codon(nts_array): nts_array = np.array(nts_array) codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3. return codon_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_mm_df(sralist):\n\n def convert_to_codon(nts_array):\n \"\"\"\n pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.\n This function converts nucleotide arrays to codon length (nts to codon resolution):\n \"\"\"\n \n nts_arra...
[ "0.55647254", "0.49846494", "0.48036072", "0.4783149", "0.47653148", "0.47555912", "0.47205758", "0.47141117", "0.46780866", "0.4677644", "0.46744534", "0.4664888", "0.46561313", "0.4651731", "0.46359685", "0.46011293", "0.4596688", "0.45749894", "0.45679227", "0.45530605", "...
0.0
-1
get per gene average multimapping score
def compute_mm(mmdata): mm_df = pd.DataFrame(columns=['ORF', 'MM']) counter = 0 for gene in mmdata.keys(): current_matrix = mmdata[gene] current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] ) mm_df.loc[counter] = [gene, current_avrg] counter += 1 return mm_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_avg_score(df, score):\n avg_score = (df.groupby(['condition', 'gene_a', 'gene_b'])\n .agg({score: 'mean'})\n .reset_index())\n return avg_score", "def compute_ave_score_w_sample(genes, samples):\n\n scores = np.zeros(len(genes), dtype=np.uint32)\n\n for i, v in...
[ "0.6643462", "0.6452144", "0.62165403", "0.6171676", "0.60874385", "0.60116637", "0.5967243", "0.59489244", "0.5940046", "0.5897672", "0.5886396", "0.5863131", "0.584357", "0.58260745", "0.58122647", "0.58121586", "0.5747593", "0.5738639", "0.5724953", "0.5721815", "0.5719977...
0.0
-1
Determine relevant entries in crkeng.xml and build a smaller xml file for testing.
def build_test_xml(): crkeng_file_path = find_latest_xml_file(shared_res_dir / "dictionaries") print(f"Building test dictionary files using {crkeng_file_path.name}") crkeng_root = ET.parse(str(crkeng_file_path)).getroot() # relevant entries in crkeng.xml file we want to determine relevant_xml_ls: Set[str] = set() xml_ls: Set[str] = set() crkeng_entries = crkeng_root.findall(".//e") for element in crkeng_entries: xml_l = extract_l_str(element) xml_ls.add(xml_l) test_words = get_test_words() print(f"Analyzing xml l elements and test words") word_to_analyses = morphodict.analysis.relaxed_analyzer().bulk_lookup( xml_ls | test_words ) print("Analysis done") test_word_lemmas: Set[str] = set() for test_word in test_words: for analysis in word_to_analyses[test_word]: lemma = fst_analysis_parser.extract_lemma(analysis) if lemma is None: logger.warn( "Skipping test word: %s. " "Could not extract lemma from its analysis: %s", test_word, analysis, ) continue test_word_lemmas.add(lemma) for xml_l in tqdm(xml_ls, desc="screening relevant entries in crkeng.xml"): if xml_l in test_words: relevant_xml_ls.add(xml_l) continue for xml_l_analysis in word_to_analyses[xml_l]: xml_lemma = partition_analysis(xml_l_analysis)[1] for test_word_lemma in test_word_lemmas: if test_word_lemma == xml_lemma: relevant_xml_ls.add(xml_l) break relevant_crkeng_entries = [] for element in crkeng_entries: xml_l = extract_l_str(element) if xml_l in relevant_xml_ls: relevant_crkeng_entries.append(element) crkeng_xml_utils.write_xml_from_elements( list(crkeng_root.findall(".//source")) + relevant_crkeng_entries, shared_res_dir / "test_dictionaries" / "crkeng.xml", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture...
[ "0.61072654", "0.5717132", "0.5615994", "0.5543528", "0.55028045", "0.53621995", "0.5347263", "0.5297309", "0.5269815", "0.5254691", "0.52032775", "0.5177183", "0.5130147", "0.5118681", "0.51131713", "0.51111734", "0.5094071", "0.50852966", "0.5085046", "0.50843346", "0.50589...
0.72865844
0
r"""The constructor for Config class Initializes the Config class
def __init__(self, config_file_name="config.json"): with open(config_file_name, "r") as config: f = dict(json.load(config)) for key, value in f.items(): setattr(self, key, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config = OrderedDict() # temporary configuration\r\n self._path = Path() # current c...
[ "0.8203434", "0.7962474", "0.78200513", "0.77837414", "0.77558804", "0.77334917", "0.77334917", "0.7729926", "0.7703412", "0.7703412", "0.7703412", "0.7694449", "0.7644489", "0.7622834", "0.75704795", "0.7559002", "0.754742", "0.75234354", "0.75190806", "0.74873096", "0.74624...
0.69338214
52
r"""Class constructor for Config
def __init__(self, config_file_name="config.json"): self.config_file_name = config_file_name self._config = self._open_config_file()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config: Dict[str, Any]) -> None:\n self.config = config", "def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config...
[ "0.8146151", "0.81249666", "0.7914005", "0.7902959", "0.7902959", "0.78517556", "0.7830942", "0.78164166", "0.7816199", "0.78047544", "0.78047544", "0.78047544", "0.7794701", "0.7631528", "0.7613544", "0.76106983", "0.7570277", "0.7570083", "0.7505082", "0.7465383", "0.745413...
0.7287139
22
Load the config file
def _open_config_file(self): try: with open(self.config_file_name,encoding='utf-8') as json_data_file: conf = json.load(json_data_file) return conf except FileNotFoundError: with open(self.config_file_name, 'w',encoding='utf-8') as json_data_file: json.dump({},json_data_file,indent=2) return {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_config(self):\n pass", "def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)", "def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n...
[ "0.8379018", "0.8353927", "0.8132022", "0.8111965", "0.8104528", "0.79837865", "0.7953077", "0.78238404", "0.7770456", "0.77264285", "0.77023524", "0.7679735", "0.7665702", "0.7665702", "0.76194495", "0.76163685", "0.7611368", "0.75783414", "0.7574488", "0.7554147", "0.752987...
0.0
-1
Saves the config file
def save_config_file(self): with open(self.config_file_name, 'w',encoding='utf-8') as outfile: json.dump(self._config, outfile,indent=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save_config(self):\n config.save_config(self.config, self.config_file)", ...
[ "0.9018369", "0.89983326", "0.87025833", "0.8437155", "0.83241445", "0.8323876", "0.8261388", "0.82597125", "0.8157242", "0.8059971", "0.80546784", "0.805303", "0.7990608", "0.79761726", "0.79709685", "0.79246825", "0.7923481", "0.7910758", "0.7910608", "0.78848404", "0.78846...
0.8239592
8
Return value stored in config
def get(self, key, default_val=None): if key not in self._config.keys(): # we don't want KeyError return default_val # just return None if not found return self._config[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_value(self, name):\r\n if name in self.config_values:\r\n return self.config_values[name]", "def _get_config_value(self, section, key):\n return config.get(section, key)", "def value(self) -> str:\n return self._config.get('value')", "def get(self, key):\n ...
[ "0.8236148", "0.78571916", "0.78411025", "0.77996963", "0.7762611", "0.7640735", "0.7605088", "0.7578148", "0.7571694", "0.7504212", "0.7446086", "0.7428434", "0.7364911", "0.7358118", "0.7330266", "0.73145777", "0.7290636", "0.72804093", "0.7263993", "0.7168558", "0.7160482"...
0.0
-1
Update the config file
def update(self): self.save_config_file()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = sel...
[ "0.8264877", "0.7606795", "0.74441475", "0.726482", "0.7236753", "0.72217864", "0.7091417", "0.7060331", "0.70171785", "0.70084494", "0.6937647", "0.69036394", "0.6887495", "0.68652135", "0.6852088", "0.6795112", "0.679408", "0.67330885", "0.6727831", "0.6705917", "0.66979545...
0.88768095
0
FILL COLUMN2 WITH MOST LIKELY VALUES BASED ON COLUMN1
def fillgaps(column1,column2,train,test): ddict={} d1=test[[column1,column2]].dropna().values d2=train[[column1,column2]].dropna().values c1=np.array(d1[:,0].tolist()+d2[:,0].tolist()) c2=np.array(d1[:,1].tolist()+d2[:,1].tolist()) for ic1 in np.unique(c1): ddict[ic1]=(c2[c1==ic1].mean(),c2[c1==ic1].std()) full_data = [train, test] for dataset in full_data: for missing in np.where(np.isnan(dataset[column2]))[0]: m,s=ddict[dataset[column1][missing]] if s<=0: dataset[column2][missing]=m else: dataset[column2][missing]=np.random.normal(loc=m,scale=s,size=1) return (train,test)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fill_col1_val_where_col2_notna(col1, col2, val):\n fill_ser = col1.copy()\n fill_ser[col2.notna()] = val\n return col1.fillna(fill_ser)", "def fill_col(col, x):\n col.append(x)\n return col", "def merge(line):\n #Step1. Putting 0 to the end of the list.\n result = []\n for cell in ...
[ "0.6066896", "0.5588243", "0.5520393", "0.5153865", "0.5142474", "0.5100762", "0.50284475", "0.50032073", "0.4990765", "0.4952544", "0.49398243", "0.49170038", "0.4903504", "0.4887256", "0.48762384", "0.48469424", "0.48462567", "0.48410118", "0.47721502", "0.47698507", "0.475...
0.57042193
1
Returns true if player has 3 of spades in their hand.
def has_3_spades(self): if Card('3', 'spades') in self.hand: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_three_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 3:\n self.rank_per_hand['2'] = \"three of a kind\"\n return True\n return False", "def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n...
[ "0.72666436", "0.7080395", "0.70760804", "0.6595992", "0.65198547", "0.6504807", "0.6470795", "0.6453197", "0.63943964", "0.6391937", "0.6380108", "0.63546914", "0.63495284", "0.63185066", "0.62931806", "0.6260325", "0.61777973", "0.61756945", "0.61107355", "0.6093892", "0.60...
0.89362204
0
Return all components that match the given type and filter
def queryComponent(type=None, filter=None, all=0):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_components(self, filter_type=None):\n\n if filter_type is None:\n out = self.components\n elif isinstance(filter_type, str):\n out = {}\n cls = co.str_to_comp(filter_type)\n for comp in self.get_components():\n if isinstance(self.comp...
[ "0.7252907", "0.6754801", "0.6730381", "0.66894734", "0.6564939", "0.6288751", "0.6273336", "0.62642753", "0.6115402", "0.5971226", "0.59191287", "0.5848749", "0.58480895", "0.5832433", "0.5759755", "0.5755212", "0.56894547", "0.56826574", "0.5639281", "0.56076664", "0.559276...
0.74276376
0
Inform a service component that it is providing a service Called when an immediatelycontaining service manager binds this object to perform the named service.
def bound(name):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service(self, service):\n \n self._service = service", "def register_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_service_description()\n self._register(description, \"registering agent's service on the SOEF...
[ "0.6836905", "0.6561478", "0.63746643", "0.62869096", "0.61396533", "0.61276996", "0.6079517", "0.6064858", "0.60322297", "0.60286254", "0.6019076", "0.6012391", "0.60076535", "0.5998224", "0.5997815", "0.5986478", "0.5975217", "0.59336156", "0.59249896", "0.5881597", "0.5861...
0.0
-1
Inform a service component that it is no longer providing a service Called when an immediatelycontaining service manager unbinds this object from performing the named service.
def unbound(name):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_service_departure(self, svc_ref):\n with self._lock:\n if svc_ref is self.reference:\n # Injected service going away...\n service = self._value\n\n # Clear the instance values\n self._current_ranking = None\n self._...
[ "0.770065", "0.717073", "0.69855356", "0.6912879", "0.6827076", "0.67656344", "0.66443497", "0.64091897", "0.6407706", "0.63915473", "0.6383925", "0.63309795", "0.6218311", "0.61347353", "0.612612", "0.6044134", "0.60432863", "0.60367554", "0.60173887", "0.60062134", "0.60048...
0.0
-1
checkKey is used to check for authentication
def checkKey(self): # TO DO for checking API authentication if self.apikey is None: return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth_publickey(self, username, key):\n return AUTH_FAILED", "def _check_key(self, key):\n raise NotImplementedError", "def api_key_check():\n req_path = request.path\n method_type = request.method\n app.logger.info(\">>> path = {}, method = {}\".format(req_path, method_type))\n...
[ "0.7483722", "0.7159838", "0.7102654", "0.70746124", "0.70519286", "0.69825", "0.69745266", "0.6936464", "0.6857674", "0.6834089", "0.67978585", "0.6719186", "0.661024", "0.65871054", "0.64848256", "0.6452751", "0.6445985", "0.644107", "0.6375559", "0.6362564", "0.6354385", ...
0.76604617
0
This function is used to update API endpoints
def update(self): # TO DO for updating urls if changed pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_api(self) -> None:\n LOG.debug(\"%sTrying to update RestAPI through client\", self.log_prefix)\n response_put = cast(\n Dict,\n self._api_client.put_rest_api(restApiId=self._api_physical_id, mode=\"overwrite\", body=self._swagger_body),\n )\n LOG.debug(...
[ "0.74739033", "0.7205048", "0.71335983", "0.66851205", "0.6498618", "0.6447019", "0.63703203", "0.62712914", "0.6245343", "0.6233306", "0.62273663", "0.6156109", "0.6128922", "0.6088179", "0.6088179", "0.6088179", "0.60405344", "0.60191697", "0.6019072", "0.6019072", "0.60190...
0.6032268
17
getData is used to get satellite json data from the server.
def getSearch(self, satellite: str, startDate: str, endDate: str, latitude: float, longitude: float, minCloudCover=None, maxCloudCover=None, minCoverage=None, maxCoverage=None, ) -> list: if satellite.lower() == 'landsat8': satellite = 'l8' minCloudCover = self.minCloudCover if minCloudCover is None else minCloudCover maxCloudCover = self.maxCloudCover if maxCloudCover is None else maxCloudCover minCoverage = self.minCoverage if minCoverage is None else minCoverage maxCoverage = self.maxCoverage if maxCoverage is None else maxCoverage param = { 'table_name': 'satellite_dataset_prod', 'satellite': satellite.lower(), 'start_date': startDate, 'end_date': endDate, 'min_cloudcover': int(minCloudCover), 'max_cloudcover': int(maxCloudCover), 'min_coverage': int(minCoverage), 'max_coverage': int(maxCoverage), 'x': float(longitude), 'y': float(latitude) } try: response = requests.get(url=self.searchEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to server. \ Make sure url is correct, updated and \ you are connected to Internet. Error : {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n ...
[ "0.7276632", "0.7044919", "0.7044919", "0.7044919", "0.6988719", "0.6904598", "0.67315245", "0.67315245", "0.6711421", "0.6699119", "0.66586465", "0.6641715", "0.65758157", "0.65712667", "0.6570018", "0.6539462", "0.6526975", "0.64456314", "0.64355475", "0.6282369", "0.626784...
0.0
-1
getData is used to get satellite json data from the server.
def getValue(self, url: str, latitude: list, longitude: list, satellite='l8', index='ndvi'): if type(latitude) is not list: latitude = [str(latitude)] else: latitude = [str(l) for l in latitude] if type(longitude) is not list: longitude = [str(longitude)] else: longitude = [str(l) for l in longitude] param = { 'url': url, 'x': ','.join(longitude), 'y': ','.join(latitude), 'satellite': satellite, 'index': index } try: response = requests.get(url=self.valueEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to value endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n ...
[ "0.72779673", "0.7046692", "0.7046692", "0.7046692", "0.6990083", "0.6905413", "0.67326444", "0.67326444", "0.67116404", "0.67000073", "0.6659789", "0.66423845", "0.657657", "0.65723705", "0.6571265", "0.6539489", "0.6527483", "0.64475584", "0.6434517", "0.6281322", "0.626736...
0.0
-1
getData is used to get satellite json data from the server.
def getStats(self, url: str, latitude: list, longitude: list, satellite='l8', index='ndvi'): if type(latitude) is not list: latitude = [str(latitude)] else: latitude = [str(l) for l in latitude] if type(longitude) is not list: longitude = [str(longitude)] else: longitude = [str(l) for l in longitude] param = { 'url': url, 'x': ','.join(longitude), 'y': ','.join(latitude), 'satellite': satellite, 'index': index } try: response = requests.get(url=self.statsEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to statistics endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n ...
[ "0.72779673", "0.7046692", "0.7046692", "0.7046692", "0.6990083", "0.6905413", "0.67326444", "0.67326444", "0.67116404", "0.67000073", "0.6659789", "0.66423845", "0.657657", "0.65723705", "0.6571265", "0.6539489", "0.6527483", "0.64475584", "0.6434517", "0.6281322", "0.626736...
0.0
-1
getData is used to get satellite json data from the server.
def getTimelineValue(self, url: str, latitude: float, longitude: float, startDate: str, endDate: str, minCloudCover=None, maxCloudCover=None, minCoverage=None, maxCoverage=None, satellite='l8', index='ndvi'): latitude = str(latitude) longitude = str(longitude) minCloudCover = self.minCloudCover if minCloudCover is None else minCloudCover maxCloudCover = self.maxCloudCover if maxCloudCover is None else maxCloudCover minCoverage = self.minCoverage if minCoverage is None else minCoverage maxCoverage = self.maxCoverage if maxCoverage is None else maxCoverage param = { 'url': url, 'x': str(longitude), 'y': str(latitude), 'satellite': satellite.lower(), 'index': index, 'start_date': startDate, 'end_date': endDate, 'min_cloudcover': int(minCloudCover), 'max_cloudcover': int(maxCloudCover), 'min_coverage': int(minCoverage), 'max_coverage': int(maxCoverage), } try: response = requests.get(url=self.timelineValueEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to value endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n ...
[ "0.72748417", "0.7042909", "0.7042909", "0.7042909", "0.6986527", "0.69044787", "0.6730157", "0.6730157", "0.67095935", "0.6698002", "0.6657195", "0.66411364", "0.6575214", "0.65689933", "0.6568993", "0.6539387", "0.6526602", "0.64448017", "0.643305", "0.62808186", "0.6266045...
0.0
-1
getData is used to get satellite json data from the server.
def getTimelineStats(self, url: str, latitude: list, longitude: list, startDate: str, endDate: str, minCloudCover=None, maxCloudCover=None, minCoverage=None, maxCoverage=None, satellite='l8', index='ndvi'): if type(latitude) is not list: latitude = [str(latitude)] else: latitude = [str(l) for l in latitude] if type(longitude) is not list: longitude = [str(longitude)] else: longitude = [str(l) for l in longitude] minCloudCover = self.minCloudCover if minCloudCover is None else minCloudCover maxCloudCover = self.maxCloudCover if maxCloudCover is None else maxCloudCover minCoverage = self.minCoverage if minCoverage is None else minCoverage maxCoverage = self.maxCoverage if maxCoverage is None else maxCoverage param = { 'url': url, 'x': ','.join(longitude), 'y': ','.join(latitude), 'satellite': satellite.lower(), 'index': index, 'start_date': startDate, 'end_date': endDate, 'min_cloudcover': int(minCloudCover), 'max_cloudcover': int(maxCloudCover), 'min_coverage': int(minCoverage), 'max_coverage': int(maxCoverage), } try: response = requests.get(url=self.timelineStatsEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to value endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n ...
[ "0.7276632", "0.7044919", "0.7044919", "0.7044919", "0.6988719", "0.6904598", "0.67315245", "0.67315245", "0.6711421", "0.6699119", "0.66586465", "0.6641715", "0.65758157", "0.65712667", "0.6570018", "0.6539462", "0.6526975", "0.64456314", "0.64355475", "0.6282369", "0.626784...
0.0
-1
make the cosmos and DES meds files
def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid): flist = files.get_cosmos_flist(tileid) cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i') print('making cosmos MEDS:',cosmos_meds) maker = CosmosMEDSMaker( config_path=cosmos_config, catname=catfile, flistname=flist, ) maker.write(cosmos_meds) for band in ['u','g','r','i','z']: band_flist = files.get_des_flist(band) band_meds = files.get_meds_file(run, tileid, 'des',band) print('making DES MEDS:',band_meds) maker = CosmosMEDSMaker( config_path=des_config, catname=cosmos_meds, flistname=band_flist, ) maker.write(band_meds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_data_raw (mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with g...
[ "0.637841", "0.6184567", "0.6101035", "0.6063938", "0.59966964", "0.5898186", "0.5831032", "0.5792886", "0.56806254", "0.5673659", "0.5642974", "0.56186104", "0.5612226", "0.5589771", "0.55712795", "0.55688566", "0.55426204", "0.5509481", "0.5476108", "0.5453811", "0.5441293"...
0.7661663
0
write compressed meds file
def write(self, filename): assert filename[-3:]=='.fz','name must end in .fz' files.makedir_fromfile(filename) ucfilename=filename[0:-3] bname = os.path.basename(ucfilename) tmp_path = os.path.join( files.get_temp_dir(), bname, ) files.makedir_fromfile(tmp_path) with TempFile(tmp_path) as tfile: super(CosmosMEDSMaker,self).write(tfile.path) self._compress_meds_file(tfile.path, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_compressed(self, filename):\n\n # Define which molecules to use \n # (counting indices of processed data set)\n indices = np.arange(len(self))\n # All charges and position arrays have the same size\n # (the one of the biggest molecule)\n size = np.max( self.num_a...
[ "0.66734606", "0.6449204", "0.61703485", "0.6010072", "0.5976385", "0.5969885", "0.59663653", "0.59331244", "0.5882864", "0.5877286", "0.5830967", "0.5827414", "0.5769662", "0.574796", "0.57416415", "0.5724244", "0.5712031", "0.56908506", "0.5682276", "0.5674419", "0.56734663...
0.6046685
3
build the object data, filling in the stub we read note position offsets appear nowhere in this function
def _build_meds_layout(self): nim = self.image_info.size nobj = self.obj_data.size trim_to_coadd = self.get('trim_to_coadd',False) if trim_to_coadd: print(' trimming to coadd') coadd_wcs, coadd_pos, coadd_bnds, coadd_q = \ self._get_pos_and_bounds(self.obj_data, 0) in_bnds = coadd_bnds.contains_points(coadd_pos['zrow'], coadd_pos['zcol']) w_in_bnds, = np.where(in_bnds == True) assert w_in_bnds.size > 0,"none found in coadd" w_in_bnds = coadd_q[w_in_bnds] self.obj_data = self.obj_data[w_in_bnds] self._do_psf_setup() # box sizes are even half_box_size = self.obj_data['box_size']//2 for file_id in range(nim): wcs, pos, bnds, q = self._get_pos_and_bounds(self.obj_data, file_id) # do the test in_bnds = bnds.contains_points(pos['zrow'], pos['zcol']) q_rc, = np.where(in_bnds == True) print(' second cut: %6d of %6d objects' % (len(q_rc),len(q))) # now make sure everything is there if self['check_in_first_image']: if file_id == 0 and len(self.obj_data['ra']) != len(q_rc): raise MEDSCreationError('Not all objects were found in first image for ' 'MEDS making (which is the coadd/detection ' 'image by convention).') # compose them q = q[q_rc] # fill in the object_data structure # note q_rc since pos was created using obj_data[q] qrow = pos['zrow'][q_rc] qcol = pos['zcol'][q_rc] icut = self.obj_data['ncutout'][q] self.obj_data['file_id'][q,icut] = file_id self.obj_data['orig_row'][q,icut] = qrow self.obj_data['orig_col'][q,icut] = qcol # this results in the object center being close to # the natural center (dim-1.)/2. ostart_row = qrow.astype('i4') - half_box_size[q] + 1 ostart_col = qcol.astype('i4') - half_box_size[q] + 1 crow = qrow - ostart_row ccol = qcol - ostart_col self.obj_data['orig_start_row'][q,icut] = ostart_row self.obj_data['orig_start_col'][q,icut] = ostart_col self.obj_data['cutout_row'][q,icut] = crow self.obj_data['cutout_col'][q,icut] = ccol # do jacobian, in original, not-offset coords # note q_rc since pos was created using self.obj_data[q] jacob = wcs.get_jacobian( x=pos['wcs_col'][q_rc], y=pos['wcs_row'][q_rc]) # jacob is a tuple of arrays self.obj_data['dudcol'][q,icut] = jacob[0] self.obj_data['dudrow'][q,icut] = jacob[1] self.obj_data['dvdcol'][q,icut] = jacob[2] self.obj_data['dvdrow'][q,icut] = jacob[3] # increment self.obj_data['ncutout'][q] += 1 w,=np.where(self.obj_data['ncutout'] > 0) print('%d/%d had ncut > 0' % (w.size, self.obj_data.size)) #self.obj_data = self.obj_data[w] self.obj_data = self._make_resized_data(self.obj_data) print('setting number field as sequential') self.obj_data['number'] = 1+np.arange(self.obj_data.size) self._set_start_rows_and_pixel_count() if self['survey']=='cosmos': self._set_psf_layout_hst() else: self._set_psf_layout_psfex()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_raw_data(self, idx: int):\n info = super().prepare_raw_data(idx)\n if self.cache_reader is not None:\n self.human_data = self.cache_reader.get_item(idx)\n idx = idx % self.cache_reader.slice_size\n\n if 'smplx' in self.human_data:\n smplx_dict = sel...
[ "0.6157046", "0.6072608", "0.58471173", "0.58050644", "0.57371134", "0.5713524", "0.5703575", "0.5679709", "0.56751573", "0.5645145", "0.5609161", "0.5597858", "0.5583448", "0.556738", "0.55536085", "0.5551123", "0.5549294", "0.5537869", "0.5534043", "0.55129117", "0.55097", ...
0.0
-1
write the cutouts for the specified type
def _write_psf_cutouts_hst(self): print('writing psf cutouts') obj_data=self.obj_data psf_data=self.psf_data nfile=self.image_info.size nobj=obj_data.size cutout_hdu = self.fits['psf'] for iobj in range(nobj): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # HST psf is same for every cutout, in fact ncut should always # be 1 try: psf_im = self.psf_data.get_psf(iobj) except AttributeError: psf_im = None ncut=obj_data['ncutout'][iobj] for icut in range(ncut): if psf_im is None: row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = self.psf_data[file_id] psf_im = p.get_rec(row,col) expected_psf_shape = ( obj_data['psf_row_size'][iobj,icut], obj_data['psf_col_size'][iobj,icut], ) file_id = obj_data['file_id'][iobj, icut] row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] start_row = obj_data['psf_start_row'][iobj, icut] if psf_im.shape != expected_psf_shape: raise ValueError("psf size mismatch, expected %s " "got %s" % (expected_psf_shape, psf_im.shape)) cutout_hdu.write(psf_im, start=start_row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _write_moleculetype(top_file: IO, mol_name: str, nrexcl: int = 3):\n top_file.write(\"[ moleculetype ]\\n\")\n top_file.write(\"; Name\\tnrexcl\\n\")\n top_file.write(f\"{mol_name}\\t{nrexcl}\\n\\n\")", "def write(self, out):", "def write_output(self):", "def write(self):", "def write(self):",...
[ "0.55175173", "0.5354752", "0.53262687", "0.52530247", "0.52530247", "0.5227527", "0.5203227", "0.5147186", "0.5113114", "0.5088805", "0.5083531", "0.5081513", "0.5079073", "0.5070704", "0.5031757", "0.50116146", "0.49944216", "0.4976218", "0.49679434", "0.4965163", "0.496383...
0.5732155
0
set the box sizes and start row for each psf image
def _set_psf_layout_hst(self): print('setting psf layout for HST') obj_data=self.obj_data total_psf_pixels = 0 psf_start_row = 0 for iobj in range(obj_data.size): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # note assuming same psf for all "epochs" psf_im = self.psf_data.get_psf(iobj) psf_shape = psf_im.shape psf_npix = psf_im.size cen = (np.array(psf_shape)-1.0)/2.0 # we will expand the psfs for icut in range(obj_data['ncutout'][iobj]): obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_psf_layout_psfex(self):\n\n print('setting psf layout for PSFEx')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n total_psf_pixels = 0\n\n #psf_npix = psf_size*psf_size\n\n psf_start_row = 0\n for iobj in range(obj_data.size):\n for icut ...
[ "0.6761654", "0.6085298", "0.6009164", "0.58949316", "0.58541226", "0.58446145", "0.5767578", "0.5712032", "0.5706543", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5668548", "0.5650026", "0.5615289", "0.5615289", "0.5593645", "0.55755764", "0.55713177"...
0.62900573
1
set the box sizes and start row for each psf image
def _set_psf_layout_psfex(self): print('setting psf layout for PSFEx') obj_data=self.obj_data psf_data=self.psf_data total_psf_pixels = 0 #psf_npix = psf_size*psf_size psf_start_row = 0 for iobj in range(obj_data.size): for icut in range(obj_data['ncutout'][iobj]): row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = psf_data[file_id] pim = p.get_rec(row,col) cen = p.get_center(row,col) psf_shape = pim.shape psf_npix = pim.size obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_psf_layout_hst(self):\n\n print('setting psf layout for HST')\n obj_data=self.obj_data\n\n total_psf_pixels = 0\n psf_start_row = 0\n\n for iobj in range(obj_data.size):\n if (iobj+1) % 100 == 0:\n print(' %d/%d' % (iobj+1,obj_data.size))\n\n...
[ "0.62900573", "0.6085298", "0.6009164", "0.58949316", "0.58541226", "0.58446145", "0.5767578", "0.5712032", "0.5706543", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5668548", "0.5650026", "0.5615289", "0.5615289", "0.5593645", "0.55755764", "0.55713177...
0.6761654
0
read the cosmos catalog
def _read_catalog(self, catname): print('loading catalog:',catname) with fitsio.FITS(catname,lower=True) as fits: #cat = fits[1][100000:110000] if 'object_data' in fits: print('reading from MEDS object data') ext='object_data' else: ext=1 cat = fits[ext][:] # one cut here based on if we matched to the galsim cat w, = np.where( #(cat['mu_class'] < 3) #& #(cat['mask']==0) #& (cat['gscosmos_index'] >= 0) ) print('initial cuts %d/%d %g%%' % (w.size,cat.size,w.size/cat.size*100)) cat = cat[w] return cat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_catalog(catalog):\n with open(catalog, \"r\") as f:\n header = f.readline()\n if header.startswith('#EventID | Time | Latitude | Longitude | Depth/km'):\n catalog = _read_iris(f)\n elif header.startswith('time, latitude, longitude, depth, depthUnits, magnitude'):\n ...
[ "0.69564354", "0.63602346", "0.6289939", "0.6274737", "0.6120884", "0.60243875", "0.5962401", "0.59330785", "0.5929398", "0.59037805", "0.5888229", "0.58801526", "0.58801526", "0.587579", "0.586561", "0.582441", "0.582154", "0.57935977", "0.57935977", "0.57935977", "0.5793597...
0.67307436
1
add fields from the cat some will not be in the odata but some will. When copy is True We will copy over the ones that are in both, in some cases
def _add_cat_fields(self, odata, copy=True): # these are required fileds from get_meds_output_dtype # that we have put into the input catalog always_copy=[ 'id', 'ra', 'dec', ] cat = self.cat_orig add_dt = [] for d in cat.dtype.descr: n = d[0] if n not in odata.dtype.names: add_dt.append(d) obj_data = eu.numpy_util.add_fields( odata, add_dt, ) if copy: for n in always_copy: obj_data[n] = cat[n] for d in add_dt: n = d[0] if n in always_copy: continue # don't clobber things that should be left at # their default values if n not in odata.dtype.names: obj_data[n] = cat[n] return obj_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(oth...
[ "0.60299045", "0.5626615", "0.55989486", "0.55208635", "0.54832995", "0.54745245", "0.5468035", "0.54594445", "0.5416989", "0.54133993", "0.53944564", "0.5360663", "0.5277778", "0.5271018", "0.52541333", "0.5244835", "0.5185914", "0.518226", "0.5180149", "0.51750094", "0.5171...
0.7400884
0
make a new struct with ncutoutsizedarrays based on the actual maximum ncutout
def _make_resized_data(self, odata): nmax = odata['file_id'].shape[1] new_nmax = odata['ncutout'].max() if new_nmax < 2: new_nmax = 2 temp_obj_data = odata nobj = temp_obj_data.size new_data = meds.util.get_meds_output_struct( nobj, new_nmax, extra_fields=self._get_fields(new_nmax), ) new_data = self._add_cat_fields(new_data, copy=False) for name in new_data.dtype.names: if name in temp_obj_data.dtype.names: shape = new_data[name].shape lshape = len(shape) if lshape > 1 and shape[1] == new_nmax: new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax] else: new_data[name][:] = temp_obj_data[name][:] del temp_obj_data return new_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_maxcut_data_model():\n n = 5\n V = np.arange(0, n, 1)\n E = [(0, 1, 3.0), (1, 2, 2.0), (2, 3, 2.0), (3, 4, 3.0), (4, 0, 1.0), (0, 3, 3.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n return G", "def expanding_max_nb(a, minp=1):\n out = np.empty_like(a...
[ "0.5717705", "0.5661527", "0.5603735", "0.5556727", "0.5533146", "0.54946077", "0.54890066", "0.5484546", "0.54764545", "0.5465916", "0.54387826", "0.54167676", "0.5408915", "0.5392852", "0.5391776", "0.53902745", "0.5386068", "0.53750616", "0.5368087", "0.5362609", "0.535723...
0.58375233
0