content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def gettree(number, count=3): """ Сформировать дерево каталогов """ result = [] newline = str(number) while len(newline) % count: newline = '0' + newline for i in range(0, len(newline)//count): result.append(newline[i*count:i*count+count]) return result
55fcec36ef3a50a949ed4f2d12103374fcfd13b0
705,121
def mergeSort(nums): """归并排序""" if len(nums) <= 1: return nums mid = len(nums)//2 #left left_nums = mergeSort(nums[:mid]) #right right_nums = mergeSort(nums[mid:]) print(left_nums) print(right_nums) left_pointer,right_pointer = 0,0 result = [] while left_pointer < len(left_nums) and right_pointer < len(right_nums): if left_nums[left_pointer] <= right_nums[right_pointer]: result.append(left_nums[left_pointer]) left_pointer += 1 else: result.append(right_nums[right_pointer]) right_pointer += 1 result += left_nums[left_pointer:] result += right_nums[right_pointer:] return result
708166485cf3e916bbde12edec7057c404ee830d
705,122
import json def generate_api_queries(input_container_sas_url,file_list_sas_urls,request_name_base,caller): """ Generate .json-formatted API input from input parameters. file_list_sas_urls is a list of SAS URLs to individual file lists (all relative to the same container). request_name_base is a request name for the set; if the base name is 'blah', individual requests will get request names of 'blah_chunk000', 'blah_chunk001', etc. Returns both strings and Python dicts return request_strings,request_dicts """ assert isinstance(file_list_sas_urls,list) request_dicts = [] request_strings = [] # i_url = 0; file_list_sas_url = file_list_sas_urls[0] for i_url,file_list_sas_url in enumerate(file_list_sas_urls): d = {} d['input_container_sas'] = input_container_sas_url d['images_requested_json_sas'] = file_list_sas_url if len(file_list_sas_urls) > 1: chunk_id = '_chunk{0:0>3d}'.format(i_url) request_name = request_name_base + chunk_id else: request_name = request_name_base d['request_name'] = request_name d['caller'] = caller request_dicts.append(d) request_strings.append(json.dumps(d,indent=1)) return request_strings,request_dicts
fa6ba9bbbfa26af9a7d1c6e6aa03d0e53e16f630
705,123
def add_version(match): """return a dict from the version number""" return {'VERSION': match.group(1).replace(" ", "").replace(",", ".")}
101578396425aceaacc2827ef6f362c382aaa89b
705,124
def getValueBetweenKey1AndKey2(str, key1, key2): """得到关键字1和关键字2之间的值 Args: str: 包括key1、key2的字符串 key1: 关键字1 key2: 关键字2 Return: key1 ... key2 内的值(去除了2端的空格) """ offset = len(key1) start = str.find(key1) + offset end = str.find(key2) value = str[start : end] return value.strip()
02337550db4b9e230261e325443fdeadf90664ee
705,125
def crop_image(image, crop_box): """Crop image. # Arguments image: Numpy array. crop_box: List of four ints. # Returns Numpy array. """ cropped_image = image[crop_box[0]:crop_box[2], crop_box[1]:crop_box[3], :] return cropped_image
03ddb9927b82ddfe3ab3a36ec3329b5a980fe209
705,126
import warnings def reorder(names, faname): """Format the string of author names and return a string. Adapated from one of the `customization` functions in `bibtexparser`. INPUT: names -- string of names to be formatted. The names from BibTeX are formatted in the style "Last, First Middle and Last, First Middle and Last, First Middle" and this is the expected style here. faname -- string of the initialized name of the author to whom formatting will be applied OUTPUT: nameout -- string of formatted names. The current format is "F.M. Last, F.M. Last, and F.M. Last". """ # Set the format tag for the website's owner, to highlight where on # the author list the website owner is. Default is ** my_name_format_tag = '**' # Convert the input string to a list by splitting the string at the # "and " and strip out any remaining whitespace. nameslist = [i.strip() for i in names.replace('\n', ' ').split("and ")] # Initialize a list to store the names after they've been tidied # up. tidynames = [] # Loop through each name in the list. for namestring in nameslist: # Strip whitespace from the string namestring = namestring.strip() # If, for some reason, we've gotten a blank name, skip it if len(namestring) < 1: continue # Split the `namestring` at the comma, but only perform the # split once. namesplit = namestring.rsplit(',', 1) if (len(namesplit) == 1): namesplit = namestring.rsplit(' ', 1) last = namesplit[-1].strip().strip('{}') firsts = namesplit[:-1] else: last = namesplit[0].strip().strip('{}') firsts = [i.strip().strip('.') for i in namesplit[1].split()] # Now that all the first name edge cases are sorted out, we # want to initialize all the first names. Set the variable # initials to an empty string to we can add to it. Then loop # through each of the items in the list of first names. Take # the first element of each item and append a period, but no # space. initials = '' for item in firsts: initials += item[0] + '.' # Stick all of the parts of the name together in `tidynames` tidynames.append(initials + ' ' + last) # Find the case of the website author and set the format for that # name if faname is not None: try: i = tidynames.index(faname) tidynames[i] = my_name_format_tag + tidynames[i] + my_name_format_tag except ValueError: warnings.warn("Couldn't find {} in the names list. Sorry!".format(faname)) # Handle the various cases of number of authors and how they should # be joined. Convert the elements of `tidynames` to a string. if len(tidynames) > 2: tidynames[-1] = 'and ' + tidynames[-1] nameout = ', '.join(tidynames) elif len(tidynames) == 2: tidynames[-1] = 'and ' + tidynames[-1] nameout = ' '.join(tidynames) else: # If `tidynames` only has one name, we only need to convert it # to a string. The first way that came to mind was to join the # list to an empty string. nameout = ''.join(tidynames) # Return `nameout`, the string of formatted authors return nameout
4012add188a3497b582078d7e7e05eeafc95252f
705,127
def array_xy_offsets(test_geo, test_xy): """Return upper left array coordinates of test_xy in test_geo Args: test_geo (): GDAL Geotransform used to calcululate the offset test_xy (): x/y coordinates in the same projection as test_geo passed as a list or tuple Returns: x_offset: x coordinate of the upper left of the array y_offset: y coordinate of the uppler left of the array """ x_offset = int((test_xy[0] - test_geo[0]) / test_geo[1]) y_offset = int((test_xy[1] - test_geo[3]) / test_geo[5]) return x_offset, y_offset
5fa67b7df833459f3fc59951a056316f249acc69
705,128
def _sort2D(signal): """Revert the operation of _sort. Args: signal an instance of numpy.ndarray of one dimention Returns: An instance of numpy.ndarray """ to = signal.shape[1] for i in range(1, to // 2 + 1, 1): temp = signal[:, i].copy() signal[:, i:to - 1] = signal[:, i + 1:to] signal[:, -1] = temp return signal
566b2bbfcee7741cdb01451d6b0250c0fe21b4b5
705,129
import random def generate_rand_num(n): """ Create n 3-digits random numbers :param n: :return: """ nums = [] for i in range(n): r = random.randint(100, 999) nums.append(r) return nums
8e6ef674479767ce45b73807ee90c2d3adaf65ce
705,131
def decode_reponse(response): """Return utf-8 string.""" return response.data.decode("utf-8", "ignore")
3c2c91f08c44db4705feaea525c9c58837fa6d6c
705,132
import copy def apply_perturbation(X, y, perturbations_info): """Application of the perturbations.""" perturb = perturbations_info[3](X, None, perturbations_info[1], perturbations_info[2]) X_p, y_p = perturb.apply2features(copy.copy(X)).squeeze(2), copy.copy(y) return X_p, y_p
2a7ba4e0286fe81f494f2e2d752532d24e895be4
705,134
def scatterList(z): """ scatterList reshapes the solution vector z of the N-vortex ODE for easy 2d plotting. """ k = int(len(z)/2) return [z[2*j] for j in range(k)], [z[2*j+1] for j in range(k)]
422bf448ae999f56e92fdc81d05700189122ad0e
705,135
def provides_facts(): """ Returns a dictionary keyed on the facts provided by this module. The value of each key is the doc string describing the fact. """ return { "switch_style": "A string which indicates the Ethernet " "switching syntax style supported by the device. " "Possible values are: 'BRIDGE_DOMAIN', 'VLAN', " "'VLAN_L2NG', or 'NONE'.", }
d41f97df8a24b67d929017fc6c20596a70ba18cd
705,136
import os def linux_sys_new(): """ """ path = None # Some systems seem to have BAT1 but not BAT0, so use the first one we # encounter. for i in range(0, 4): p = '/sys/class/power_supply/BAT{}'.format(i) if os.path.exists(p): path = p break if path is None: return False if not os.path.exists('{}/energy_now'.format(path)): return False r = lambda f: open('{}/{}'.format(path, f), 'r').read().strip() ri = lambda f: int(r(f)) status = r('status') if status == 'Charging': ac = True charging = True elif status == 'Discharging': ac = False charging = False elif status == 'Full': ac = True charging = False else: ac = False charging = False percent = ri('capacity') drain_rate = ri('power_now') full_capacity = ri('energy_full') remaining = ri('energy_now') if charging: lifetime = (full_capacity - remaining) / drain_rate * 60 elif drain_rate > 0: lifetime = remaining / drain_rate * 60 else: lifetime = -1 return (1, ac, charging, percent, lifetime)
fabc9d22b0415dc84057e04da3223ac44e747118
705,137
def rgb_to_hex_string(value): """Convert from an (R, G, B) tuple to a hex color. :param value: The RGB value to convert :type value: tuple R, G and B should be in the range 0.0 - 1.0 """ color = ''.join(['%02x' % x1 for x1 in [int(x * 255) for x in value]]) return '#%s' % color
6449d5ecf8f3134ca320c784293d8ece44a84148
705,138
def _rgb_to_hex_string(rgb: tuple) -> str: """Convert RGB tuple to hex string.""" def clamp(x): return max(0, min(x, 255)) return "#{0:02x}{1:02x}{2:02x}".format(clamp(rgb[0]), clamp(rgb[1]), clamp(rgb[2]))
eafd166a67ac568cfad3da1fa16bdfcd054a914a
705,139
import math def update_one_contribute_score(user_total_click_num): """ itemcf update sim contribution score by user """ return 1/math.log10(1 + user_total_click_num)
b6dadc87150e33e1ba2d806e18856f10fd43035a
705,140
def default_interest_payment_date(): """ 利払日オブジェクトのデフォルト値 """ return { f'interestPaymentDate{index}': '' for index in range(1, 13) }
77d51cd5c7c76347a5c53e3d816985eeac1a568b
705,141
import torch import copy def clones(module, N): """Produce N identical layers. """ return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
2def7cf89def4d598253ca48cb04e670ecb54dfd
705,142
import math def bl2xy(lon: float, lat: float): """ 大地2000,经纬度转平面坐标,3度带 Param: lon (float): 经度 lat (float): 纬度 Returns: (x , y) : x坐标对应经度,y坐标对应纬度 """ # 3.1415926535898/180.0 iPI = 0.0174532925199433 # 3度带 zoneWide = 3 # 长半轴 a = 6378137 # 扁率 f = 1/298.257222101 projNo = int(lon/zoneWide) longitude0 = projNo*3 longitude0 = longitude0 * iPI longitude1 = lon * iPI latitude1 = lat * iPI e2 = 2 * f - f * f ee = e2 * (1.0 - e2) NN = a / math.sqrt(1.0 - e2 * math.sin(latitude1) * math.sin(latitude1)) T = math.tan(latitude1) * math.tan(latitude1) C = ee * math.cos(latitude1) * math.cos(latitude1) A = (longitude1 - longitude0) * math.cos(latitude1) M = a * ((1 - e2 / 4 - 3 * e2 * e2 / 64 - 5 * e2 * e2 * e2 / 256) * latitude1 - (3 * e2 / 8 + 3 * e2 * e2 / 32 + 45 * e2 * e2 * e2 / 1024) * math.sin(2 * latitude1) + (15 * e2 * e2 / 256 + 45 * e2 * e2 * e2 / 1024) * math.sin(4 * latitude1) - (35 * e2 * e2 * e2 / 3072) * math.sin(6 * latitude1)) xval = NN * (A + (1 - T + C) * A * A * A / 6 + (5 - 18 * T + T * T + 72 * C - 58 * ee) * A * A * A * A * A / 120) yval = M + NN * math.tan(latitude1) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24 + (61 - 58 * T + T * T + 600 * C - 330 * ee) * A * A * A * A * A * A / 720) X0 = 1000000 * projNo + 500000 Y0 = 0 xval = xval + X0 yval = yval + Y0 return (xval, yval)
4f2166d7878998da5373a4fa6aff5fcee6f32c61
705,143
def woodbury_solve(vector, low_rank_mat, woodbury_factor, shift): """ Solves the system of equations: :math:`(sigma*I + VV')x = b` Using the Woodbury formula. Input: - vector (size n) - right hand side vector b to solve with. - woodbury_factor (k x n) - The result of calling woodbury_factor on V and the shift, \sigma - shift (vector) - shift value sigma """ if vector.ndimension() > 1: shift = shift.unsqueeze(-1) right = low_rank_mat.transpose(-1, -2).matmul(woodbury_factor.matmul(vector / shift)) return (vector - right) / shift
92b25fe675671408c560008e4093c1e4b35d3c42
705,144
def get_offset_from_var(var): """ Helper for get_variable_sizes)_ Use this to calculate var offset. e.g. var_90, __saved_edi --> 144, -1 """ instance = False i=0 # Parse string i = var.rfind(' ')+1 tmp = var[i:-1] # Parse var if tmp[0] == 'v': tmp = tmp[4:] j = tmp.find('_') # Handles SSA var instances (var_14_1) and converts c, 58, 88 --> 12, 88, 136 if (j != -1): tmp = tmp[:j] instance = True else: instance = False try: tmp = int(tmp, 16) except: tmp = -1 # -1 for non vars else: tmp = -1 return tmp, instance
6cf58d6dc2ffcb7a78d98ed83c2dbcf05933af76
705,145
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score): """ Takes as input a set of characters alphabet and three scores diag_score, off_diag_score, and dash_score. The function returns a dictionary of dictionaries whose entries are indexed by pairs of characters in alphabet plus '-'. The score for any entry indexed by one or more dashes is dash_score. The score for the remaining diagonal entries is diag_score. Finally, the score for the remaining off-diagonal entries is off_diag_score """ alphabet.add('-') scoring_matri = {} for first_ltr in alphabet: temp = {} for sec_ltr in alphabet: if first_ltr == sec_ltr and first_ltr != '-': temp[sec_ltr] = diag_score elif first_ltr == '-' or sec_ltr == '-': temp[sec_ltr] = dash_score else: temp[sec_ltr] = off_diag_score scoring_matri[first_ltr] = temp return scoring_matri
703c3ef7fb6899a46a26d55dae740705b6953adb
705,146
import os def get_user_pysit_path(): """ Returns the full path to the users .pysit directory and creates it if it does not exist.""" path = os.path.join(os.path.expanduser('~'), '.pysit') if not os.path.isdir(path): os.mkdir(path) return path
a37f762642ce986dbf3d488bcf5760512d5a0b6c
705,147
import os def filter_directory(directory: str, extension: str = '.py') -> str: """ Delete all files within the given directory with filenames not ending in the given extension """ for root, dirs, files in os.walk(directory): [os.remove(os.path.join(root, fi)) for fi in files if not fi.endswith(extension)] return directory
e0ad4853c6ca8c2337dbd3c7b9901c7e6e9ce6a4
705,149
import struct def set_real(bytearray_: bytearray, byte_index: int, real) -> bytearray: """Set Real value Notes: Datatype `real` is represented in 4 bytes in the PLC. The packed representation uses the `IEEE 754 binary32`. Args: bytearray_: buffer to write to. byte_index: byte index to start writing from. real: value to be written. Returns: Buffer with the value written. Examples: >>> data = bytearray(4) >>> snap7.util.set_real(data, 0, 123.321) bytearray(b'B\\xf6\\xa4Z') """ real = float(real) real = struct.pack('>f', real) _bytes = struct.unpack('4B', real) for i, b in enumerate(_bytes): bytearray_[byte_index + i] = b return bytearray_
bda32caab27adeae7c6710d4c26743b93533ccff
705,150
def get_shape(obj): """ Get the shape of a :code:'numpy.ndarray' or of a nested list. Parameters(obj): obj: The object of which to determine the shape. Returns: A tuple describing the shape of the :code:`ndarray` or the nested list or :code:`(1,)`` if obj is not an instance of either of these types. """ if hasattr(obj, "shape"): return obj.shape elif type(obj) == list: if obj == []: return (0,) else: return (len(obj),) + get_shape(obj[0]) else: return ()
d02d755f4b9e4a4dbde6c87ddfe0b5729a8c158e
705,152
from typing import Any from typing import List def as_list(x: Any) -> List[Any]: """Wrap argument into a list if it is not iterable. :param x: a (potential) singleton to wrap in a list. :returns: [x] if x is not iterable and x if it is. """ # don't treat strings as iterables. if isinstance(x, str): return [x] try: _ = iter(x) return x except TypeError: return [x]
4b1b26857d209a9f5b142908e3a35b1ce7b05be4
705,153
def _sort_destinations(destinations): """ Takes a list of destination tuples and returns the same list, sorted in order of the jumps. """ results = [] on_val = 0 for dest in destinations: if len(results) == 0: results.append(dest) else: while on_val <= len(results): if on_val == len(results): results.append(dest) on_val = 0 break else: if dest[1] > results[on_val][1]: on_val += 1 else: results.insert(on_val, dest) on_val = 0 break return results
302480ef09f4b5a402a5c568c5d35d717db8c851
705,154
import re def remove_url(text): """ Supprime les URLs :param text: texte à transformer :return: texte transformé """ return re.sub(r'http\S+', '', text)
d0f3716808863d5e868da1efc4a7bb16ffa47ac1
705,155
from typing import Dict import itertools def format_coco(chip_dfs: Dict, patch_size: int, row_name: str): """ Format train and test chip geometries to COCO json format. COCO train and val set have specific ids. """ chip_height, chip_width = patch_size, patch_size cocojson = { "info": {}, "licenses": [], "categories": [ { "supercategory": "Burned Areas", "id": 1, # id needs to match category_id. "name": "agfields_singleclass", } ], } for key_idx, key in enumerate(chip_dfs.keys()): key_image = { "file_name": f"{key}.jpg", "id": int(key_idx), "height": chip_width, "width": chip_height, } cocojson.setdefault("images", []).append(key_image) for row_idx, row in chip_dfs[key]["chip_df"].iterrows(): # Convert geometry to COCO segmentation format: # From shapely POLYGON ((x y, x1 y2, ..)) to COCO [[x, y, x1, y1, ..]]. # The annotations were encoded by RLE, except for crowd region (iscrowd=1) coco_xy = list( itertools.chain.from_iterable( (x, y) for x, y in zip(*row.geometry.exterior.coords.xy) ) ) coco_xy = [round(coords, 2) for coords in coco_xy] # Add COCO bbox in format [minx, miny, width, height] bounds = row.geometry.bounds # COCO bbox coco_bbox = [ bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1], ] coco_bbox = [round(coords, 2) for coords in coco_bbox] key_annotation = { "id": key_idx, "image_id": int(key_idx), "category_id": 1, # with multiple classes use "category_id" : row.reclass_id "mycategory_name": "agfields_singleclass", "old_multiclass_category_name": row[row_name], "bbox": coco_bbox, "area": row.geometry.area, "iscrowd": 0, "segmentation": [coco_xy], } cocojson.setdefault("annotations", []).append(key_annotation) return cocojson
d48c8308a7bc23f737a969c5e4cf55aafb58e74e
705,156
def reversed_complement(string): """Find the reverse complement of a DNA string. Given: A DNA string Pattern. Return: Pattern, the reverse complement of Pattern.""" complements = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A' } return "".join([complements[string[i]] for i in range(len(string))][::-1])
b4ecaf6d2c58a0c14d87122529e316b43082cf54
705,157
def print_subheader(object_type): """ Print out a subheader for a text file. """ return """ ################################################################# # {0} ################################################################# """.format(object_type)
1ea7185f024ec7dc45a1ccac9f7e2feb6a2a6bf2
705,158
import time def condor_tables(sqlContext, hdir='hdfs:///project/monitoring/archive/condor/raw/metric', date=None, verbose=False): """ Parse HTCondor records Example of HTCondor recornd on HDFS {"data":{"AccountingGroup":"analysis.wverbeke","Badput":0.0,"CMSGroups":"[\"/cms\"]","CMSPrimaryDataTier":"MINIAODSIM","CMSPrimaryPrimaryDataset":"TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8","CMSPrimaryProcessedDataset":"RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1","CRAB_AsyncDest":"T2_BE_IIHE","CRAB_DataBlock":"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM#291c85fa-aab1-11e6-846b-02163e0184a6","CRAB_ISB":"https://cmsweb.cern.ch/crabcache","CRAB_Id":30,"CRAB_JobArch":"slc6_amd64_gcc530","CRAB_JobSW":"CMSSW_9_2_4","CRAB_JobType":"analysis","CRAB_OutLFNDir":"/store/user/wverbeke/heavyNeutrino/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1/171111_214448","CRAB_PrimaryDataset":"TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8","CRAB_Publish":false,"CRAB_PublishName":"crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1-00000000000000000000000000000000","CRAB_Retry":0,"CRAB_SaveLogsFlag":true,"CRAB_SiteBlacklist":"[]","CRAB_SiteWhitelist":"[]","CRAB_SubmitterIpAddr":"193.58.172.33","CRAB_TaskEndTime":1513028688,"CRAB_TaskLifetimeDays":30,"CRAB_TaskWorker":"vocms052","CRAB_TransferOutputs":true,"CRAB_UserHN":"wverbeke","CRAB_Workflow":"171111_214448:wverbeke_crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1","Campaign":"crab_wverbeke","ClusterId":20752288,"Cmd":"/data/srv/glidecondor/condor_local/spool/2259/0/cluster20752259.proc0.subproc0/gWMS-CMSRunAnalysis.sh","CommittedCoreHr":0.0,"CommittedSlotTime":0,"CommittedSuspensionTime":0,"CommittedTime":0,"CommittedWallClockHr":0.0,"CoreHr":0.0,"CoreSize":-1,"Country":"Unknown","CpuBadput":0.0,"CpuEff":0.0,"CpuTimeHr":0.0,"CumulativeRemoteSysCpu":0.0,"CumulativeRemoteUserCpu":0.0,"CumulativeSlotTime":0,"CumulativeSuspensionTime":0,"CurrentHosts":0,"DAGNodeName":"Job30","DAGParentNodeNames":"","DESIRED_Archs":"X86_64","DESIRED_CMSDataLocations":"T2_FR_IPHC,T2_CH_CERN_HLT,T1_ES_PIC,T2_DE_DESY,T2_BE_IIHE,T2_CH_CERN,T2_ES_IFCA","DESIRED_CMSDataset":"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM","DESIRED_Overflow_Region":"none,none,none","DESIRED_Sites":["T2_FR_IPHC","T2_CH_CERN_HLT","T1_ES_PIC","T2_DE_DESY","T2_BE_IIHE","T2_CH_CERN","T2_ES_IFCA"],"DataCollection":1510475761000,"DataCollectionDate":1510475761000,"DataLocations":["T2_FR_IPHC","T2_CH_CERN_HLT","T1_ES_PIC","T2_DE_DESY","T2_BE_IIHE","T2_CH_CERN","T2_ES_IFCA"],"DataLocationsCount":7,"DesiredSiteCount":7,"DiskUsage":5032,"DiskUsageGB":0.005032,"EncryptExecuteDirectory":false,"EnteredCurrentStatus":1510436775000,"EstimatedWallTimeMins":1250,"ExecutableSize":9,"ExitBySignal":false,"ExitStatus":0,"GLIDEIN_CMSSite":"Unknown","GlobalJobId":"[email protected]#20752288.0#1510436775","HasSingularity":false,"ImageSize":9,"JOB_CMSSite":"$$(GLIDEIN_CMSSite:Unknown)","JOB_Gatekeeper":"Unknown","JobBatchName":"RunJobs.dag+20752259","JobPrio":10,"JobStatus":1,"JobUniverse":5,"MaxHosts":1,"MaxWallTimeMins":1250,"MemoryMB":0.0,"MinHosts":1,"NumJobCompletions":0,"NumJobStarts":0,"NumRestarts":0,"NumSystemHolds":0,"OVERFLOW_CHECK":false,"Original_DESIRED_Sites":["UNKNOWN"],"OutputFiles":2,"Owner":"cms1315","PostJobPrio1":-1510436758,"PostJobPrio2":0,"PreJobPrio1":1,"ProcId":0,"QDate":1510436775000,"QueueHrs":10.951667712198363,"REQUIRED_OS":"rhel6","Rank":0,"RecordTime":1510475761000,"RemoteSysCpu":0,"RemoteUserCpu":0,"RemoteWallClockTime":0,"RequestCpus":1,"RequestDisk":1,"RequestMemory":2000,"ScheddName":"[email protected]","ShouldTransferFiles":"YES","Site":"Unknown","SpoolOnEvict":false,"Status":"Idle","TaskType":"Analysis","Tier":"Unknown","TotalSubmitProcs":1,"TotalSuspensions":0,"TransferInputSizeMB":4,"Type":"analysis","Universe":"Vanilla","User":"cms1315@cms","VO":"cms","WMAgent_TaskType":"UNKNOWN","WallClockHr":0.0,"WhenToTransferOutput":"ON_EXIT_OR_EVICT","Workflow":"wverbeke_crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1","metadata":{"id":"[email protected]#20752288.0#1510436775","timestamp":1510476202,"uuid":"8aa4b4fe-c785-11e7-ad57-fa163e15539a"},"x509UserProxyEmail":"[email protected]","x509UserProxyFQAN":["/DC=org/DC=terena/DC=tcs/C=BE/O=Universiteit Gent/CN=Willem Verbeke [email protected]","/cms/Role=NULL/Capability=NULL"],"x509UserProxyFirstFQAN":"/cms/Role=NULL/Capability=NULL","x509UserProxyVOName":"cms","x509userproxysubject":"/DC=org/DC=terena/DC=tcs/C=BE/O=Universiteit Gent/CN=Willem Verbeke [email protected]"},"metadata":{"_id":"380721bc-a12c-9b43-b545-740c10d2d0f0","hostname":"monit-amqsource-fafa51de8d.cern.ch","kafka_timestamp":1510476204057,"partition":"1","producer":"condor","timestamp":1510476204022,"topic":"condor_raw_metric","type":"metric","type_prefix":"raw","version":"001"}} """ if not date: # by default we read yesterdate data date = time.strftime("%Y/%m/%d", time.gmtime(time.time()-60*60*24)) hpath = '%s/%s' % (hdir, date) # create new spark DataFrame condor_df = sqlContext.read.json(hpath) condor_df.registerTempTable('condor_df') # condor_df = condor_df.select(unpack_struct("data", condor_df)) # extract data part of JSON records condor_df.printSchema() tables = {'condor_df':condor_df} return tables
8742f240f65755431a5b640e9481f657ce3048d5
705,159
from typing import Callable from typing import List from typing import Tuple def _contiguous_groups( length: int, comparator: Callable[[int, int], bool] ) -> List[Tuple[int, int]]: """Splits range(length) into approximate equivalence classes. Args: length: The length of the range to split. comparator: Determines if two indices have approximately equal items. Returns: A list of (inclusive_start, exclusive_end) range endpoints. Each corresponds to a run of approximately-equivalent items. """ result = [] start = 0 while start < length: past = start + 1 while past < length and comparator(start, past): past += 1 result.append((start, past)) start = past return result
fc25e286a2b6ec9ab7de15146e8b26922ea56e6b
705,160
def datatype_derive(times, series): """ returns series converted to datatype derive store only differeces between two subsequent values parameters: series <tuple> of <float> returns: <tuple> of <float> """ new_series = [0.0, ] for index in range(1, len(series)): new_series.append(series[index] - series[index - 1]) return new_series
4a2689030e1911a8b4ee5777157c61c623e94da0
705,161
def cleanRepl(matchobj): """ Clean up a directory name so that it can be written to a matplotlib title without encountering LaTeX escape sequences Replace backslashes with forward slashes replace underscores (subscript) with escaped underscores """ if matchobj.group(0) == r'\\': return '/' if matchobj.group(0) == r'_': return r'\_' if matchobj.group(0) == r'/': return '/' else: return ''
ffe9abb42df66780134e058ad24457a75f873055
705,162
def fibonacci_index(index): """ Returns fibonacci sequence with the given index being the last value. raises a type error if given index is a string, float, zero or negative number. returns a string for given indexes that are 1 and 2. """ try: if type(index) == str or type(index) == float or index < 1: raise TypeError elif index < 3: return f"""[0, 1] For getting better results enter a whole number bigger than 2.""" else: initial_sequence = [0, 1] while len(initial_sequence) < index: next = initial_sequence[-2] + initial_sequence[-1] initial_sequence.append(next) return initial_sequence except TypeError: raise TypeError('Please enter a positive whole number.')
fe6af59ed30d2559ed3d8822ff3b78d21fee6f65
705,163
from pathlib import Path def get_history_file_path(): """Returns path to the training command history file.""" return Path(__file__).parent / 'history'
b55e6ad58f1b841d22cce1fe421f17740abc32a0
705,164
def center_embeddings(X, Y): """ Copied from Alvarez-Melis & Jaakkola (2018) """ X -= X.mean(axis=0) Y -= Y.mean(axis=0) return X, Y
a583c400db2e3ddcabc535dc20c8866b432828d6
705,166
def scale_range(x, x_range, y_range=(0.0, 1.0)): """ scale the number x from the range specified by x_range to the range specified by y_range :param x: the number to scale :type x: float :param x_range: the number range that x belongs to :type x_range: tuple :param y_range: the number range to convert x to, defaults to (0.0, 1.0) :type y_range: tuple :return: the scaled value :rtype: float """ x_min, x_max = x_range y_min, y_max = y_range return (y_max - y_min) * (x - x_min) / (x_max - x_min) + y_min
3e2f5185f1565d70e8d1d699f3b5b1e00d375e21
705,167
def guardian_join(team): """Returns a string of all of the parent guardians on the team joined together""" guardian_names = [] for player in team['team_players']: guardian_names.extend(player['guardians']) guardian_string = ", " guardian_string = guardian_string.join(guardian_names) return guardian_string
5b9c7908598a65bb5e465fae13258de99fbf8597
705,168
from typing import ByteString def is_prefix_of(prefix: ByteString, label: ByteString) -> bool: """ Whether label starts with prefix """ if len(prefix) > len(label): return False for (a,b) in zip(prefix, label): if a != b: return False return True
6be10ca432876f7847e2f8513e5205a9ae4d3c16
705,169
def get_worker_list(AnnotationSet): """ return a list of worker IDs """ return list(AnnotationSet.dataframe.columns)[1:]
0f18afa4bb70360e03a1a65c1ab3a5b4bbba7e38
705,171
from typing import Counter import math def sentence_bleu(hypothesis, reference, smoothing=True, order=4, **kwargs): """ Compute sentence-level BLEU score between a translation hypothesis and a reference. :param hypothesis: list of tokens or token ids :param reference: list of tokens or token ids :param smoothing: apply smoothing (recommended, especially for short sequences) :param order: count n-grams up to this value of n. :param kwargs: additional (unused) parameters :return: BLEU score (float) """ log_score = 0 if len(hypothesis) == 0: return 0 for i in range(order): hyp_ngrams = Counter(zip(*[hypothesis[j:] for j in range(i + 1)])) ref_ngrams = Counter(zip(*[reference[j:] for j in range(i + 1)])) numerator = sum(min(count, ref_ngrams[bigram]) for bigram, count in hyp_ngrams.items()) denominator = sum(hyp_ngrams.values()) if smoothing: numerator += 1 denominator += 1 score = numerator / denominator if score == 0: log_score += float('-inf') else: log_score += math.log(score) / order bp = min(1, math.exp(1 - len(reference) / len(hypothesis))) return math.exp(log_score) * bp
e3913cebdfe58ca55aa9c02d9faab4d8fc9ef3dd
705,174
import os def CPU_temperature(): """Returns the temperature of the Raspberry Pi's CPU.""" try: res = os.popen('vcgencmd measure_temp').readline() return(res.replace("temp=","").replace("'C\n","")) except: pass
98be304b1939f2f1d56c52e03815400b1863468b
705,175
def get_ldap(): """connects to ldap and returns ldap connection""" # if not hasattr(g, 'ldap'): # g.ldap = ldap.initialize(app.config['LDAP_URL']) # return g.ldap return None
c2cebe614b269d8e68a320c3f6951ca5b5cf26d0
705,176
def needs_update(targ_capacity, curr_capacity, num_up_to_date): """Return whether there are more batch updates to do. Inputs are the target size for the group, the current size of the group, and the number of members that already have the latest definition. """ return not (num_up_to_date >= curr_capacity == targ_capacity)
77981f3fdb57296503f34b0ea955b68b9f98db4c
705,177
def private_invite_code(invite_code_key, invite_code): """ 内测邀请码校验 :param invite_code_key: :param invite_code: :return: """ error_dict = 0 if not invite_code: error_dict = {'captcha_not_blank': ['内测邀请码不能为空']} else: # if settings.ENABLE_VERIFY_CAPTCHA: # server_captcha = get_redis_conn().get(captcha_key) # else: server_invite = '616833686' # server_captcha = '123456' # if captcha_value != server_captcha: # server_captcha = get_redis_conn().get(captcha_key) if server_invite != invite_code: error_dict = {'captcha_error': ['内测邀请码有误或已过期']} else: # get_redis_conn().delete(captcha_key) pass return error_dict
1403c849670adc21501775003283d3dfd0c32019
705,178
from pathlib import Path def is_dir_exist(path): """Whether the directory exists""" path_info = Path(path) return path_info.is_dir()
8182e96399d2271bc8e3cd5c1a4201f3e2acd895
705,180
import argparse def arguments(): """Parse the arguments.""" parser = argparse.ArgumentParser() parser.add_argument('SONG_NAME', help="Name of the song to download.", default=None, nargs='?', type=str) parser.add_argument('-q', '--quiet', help="Don't ask the user to select songs\ if more than one search result.\ The first result in each case will be considered.", action='store_true') parser.add_argument('--version', action='version', version='v0.2-r3', help='show the program version number and exit') parser.add_argument('--url', help="Youtube song link.") parser.add_argument('-s', '--setup', help='Setup the config file', action='store_true') parser.add_argument('--nolocal', help='Dont search locally for the song before\ downloading.', action='store_true') args = parser.parse_args() return args
4c2d18544b0cd2a0b63e1b0657c264ef131a5787
705,181
import argparse def get_arguments(): """Parse all the arguments provided from the CLI. Returns: A list of parsed arguments. """ parser = argparse.ArgumentParser(description="DeepLabLFOV NetworkEv") parser.add_argument("--pred-path", type=str, default='', help="Path to predicted segmentation.") parser.add_argument("--gt-path", type=str, default='', help="Path to the groundtruth dir.") return parser.parse_args()
12e8f214e5ef97e0a5a5e3aafaa8395a9e845601
705,182
def _normalize_dataframe(dataframe, index): """Take a pandas DataFrame and count the element present in the given columns, return a hierarchical index on those columns """ #groupby the given keys, extract the same columns and count the element # then collapse them with a mean data = dataframe[index].dropna() grouped = data.groupby(index, sort=False) counted = grouped[index].count() averaged = counted.mean(axis=1) # Fill empty missing with 0, see GH5639 averaged = averaged.fillna(0.0) return averaged
fdc49912f538694048560f1c1453714791a7c6e4
705,183
from datetime import datetime def to_date(string, format="%d/%m/%Y"): """Converts a string to datetime :param string: String containing the date. :type string: str :param format: The date format. Use %Y for year, %m for months and %d for daus, defaults to "%d/%m/%Y" :type format: str, optional :return: The present data in string format :rtype: `str` """ return datetime.strptime(string, format)
83fa8e8a0cdfae9546c7a83e55ddcf84ec667646
705,184
def pad_word_array(word_array, MAX_SEQUENCE_LENGTH, padding='pre', truncating='pre'): """Return a word array that is of a length MAX_SEQUENCE_LENGTH by truncating the original array or padding it Args: word_array: MAX_SEQUENCE_LENGTH: padding: truncating: Returns: """ if len(word_array) > MAX_SEQUENCE_LENGTH: word_array = word_array[:MAX_SEQUENCE_LENGTH] if truncating == 'pre' else word_array[len( word_array) - MAX_SEQUENCE_LENGTH:] else: if padding == 'pre': word_array = word_array + ['<pad>'] * (MAX_SEQUENCE_LENGTH - len(word_array)) else: word_array = ['<pad>'] * (MAX_SEQUENCE_LENGTH - len(word_array)) + word_array return word_array
33d33284eb347f9f4b242932c42b7b8b68219135
705,185
def nbconvert(code): """Create Jupyter Notebook code Return dict in ipynb format Arguments: code -- code string separated by \\n """ cells = [] for cell in code.split("\n# <codecell>\n"): cells.append({ "cell_type": "code", "execution_count": None, "metadata": { "collapsed": True, }, "outputs": [], "source": [cell] }) result = { "cells": cells, "nbformat": 4, "nbformat_minor": 0, "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } } } return result
f7e895e107f07850652e762a4b382ec299e6d352
705,186
def mock_user_moira_lists(mocker): """Return a fake moira client""" mocked = mocker.patch("ui.utils.user_moira_lists") mocked.return_value = set() return mocked
8dedab7071deae4f1e5fa3ffc7b79149fc49e795
705,188
def permutate(array: list, permutation: list): """ permutate a fixed array with a given permutation list Args: array: An array of random elements permutation: The permutation of the given array Returns: """ _swapped_array = [] _counter = 0 for i in permutation: if _counter == i or i in _swapped_array: _counter += 1 continue _temp = i-1 _swap = array[_temp] _sub = array[_counter] array[_temp] = _sub array[_counter] = _swap _swapped_array.append(_counter) _counter += 1 return array
5b4f603c030276dcd78b6334ec00c901ca003c63
705,189
def outformathtml(pandasdf): """ change a few formating things to prettify and make it match """ pandas_table = pandasdf.to_html() pandas_table = pandas_table.replace(""" border="1" """, " ") pandas_table = pandas_table.replace("""<tr style="text-align: right;">\n <th></th>\n <th></th>\n </tr>\n""", "") pandas_table = pandas_table.replace("""<thead>""", """<thead style="text-align: left; color: #094D92; font-size: 30px"> """) return(pandas_table)
5f8abf88a2aead4f095f52c1f49ab4ad609c04a5
705,190
import math def distance(): """ Calculate the distance between two points. return: Distance. """ return lambda a, b: math.sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*+(a.y-b.y)+(a.z-b.z)*(a.z-b.z))
ab3a14e7033afab66db7c283aefda745158bad65
705,192
import numpy def rational_sum(numerator, denominator, *argv): """Sum of rational numbers.""" if len(argv) < 2: gcd = numpy.gcd(numerator, denominator) num_out, den_out = numerator//gcd, denominator//gcd else: num_2 = argv[0] den_2 = argv[1] num_3 = numerator*den_2 + num_2*denominator den_3 = denominator*den_2 gcd = numpy.gcd(num_3, den_3) num_out, den_out = rational_sum(num_3//gcd, den_3//gcd, argv[2:]) return num_out, den_out
b34d01ea2bcfd072430501828401d5b492b2bae5
705,193
import time def nonce() -> str: """Return a nounce counter (monotonic clock). References: * https://support.kraken.com/hc/en-us/articles/360000906023-What-is-a-nonce- """ # pylint: disable=line-too-long return str(time.monotonic_ns())
fb6221fef4c2c8af66200c4c9da8f6253854b186
705,194
import string def base62_encode(number): """Encode a number in base62 (all digits + a-z + A-Z).""" base62chars = string.digits + string.ascii_letters l = [] while number > 0: remainder = number % 62 number = number // 62 l.insert(0, base62chars[remainder]) return ''.join(l) or '0'
b1f10fe69b6263d54f2e00a32b8260cbb3c42747
705,196
import random def random_swap(o_a, o_b): """ Randomly swap elements of two observation vectors and return new vectors. :param o_a: observation vector a :param o_b: observation vector b :return: shuffled vectors """ X, Y = [], [] tf = [True, False] for x, y in zip(o_a, o_b): if random.choice(tf): x, y = y, x X.append(x) Y.append(y) return X, Y
f243e91e5b281c682601fdb8df49bd7e6209274c
705,198
from functools import reduce def gcd(numbers): """Return greatest common divisor of integer numbers. Using Euclid's algorithm. Examples -------- >>> gcd([4]) 4 >>> gcd([3, 6]) 3 >>> gcd([6, 7]) 1 """ def _gcd(a, b): """Return greatest common divisor of two integer numbers.""" while b: a, b = b, a % b return a return reduce(_gcd, numbers)
da7ae2a24649bc05e233533735baf850a37dcc5a
705,199
import os def Write_data(data_name,parameters,metrics_name_list,length_input,metrics_mean_list,metrics_std_dev_list): """Writes the metrics of a given simulation in a datasheet in .txt format. Parameters ---------- data_name: string Desired name of data_sheet archive. metrics_name_list: list of strings Names of used metrics legnth_input: int Size of sigma_input_list. metrics_mean_list: array Array with the mean values of each metric in each input. metrics_std_dev_list: array Array with the standard deviation of each metric in each input. Return ------ References ---------- """ os.chdir('Data') data = open(data_name +'.txt','w') num_metrics = len(metrics_name_list) variables_name_list=['Sigma_mean','Sigma_std_dev'] for metrics_name in metrics_name_list: variables_name_list.append(metrics_name+'_mean') variables_name_list.append(metrics_name+'_std_dev') data.write(parameters+'\n') variables='#' variables+='\t'.join(variables_name_list) variables+='\n' data.write(variables) data_line='' for j in range(length_input): for k in range(num_metrics+1): m,s=metrics_mean_list[j][k],metrics_std_dev_list[j][k] data_line+='{}\t{}'.format(m,s) data_line+= '\t' data_line+='\n' data.write(data_line) data.close() os.chdir('..') return None
83478d88f45203b22373dfb42ae0e4a1338b2355
705,201
import argparse def setup_options(): """ add logging options to cmd-line, but surpress them, so that they don't clobber up the help-messages """ my_argparser = argparse.ArgumentParser(add_help=False) my_argparser.add_argument("--LogLevel_Model", default = "", \ help = argparse.SUPPRESS ) return my_argparser
dfbd38e14720f1bbe8806b1d9dab47694c7a0610
705,203
def isAnomaly(lowBand, highBand, value): """Condition for anomaly on a certain row""" if value < lowBand or value > highBand: return True return False
552513115ec9c98c40cd6487b6a33f497c562c87
705,204
import numpy def _create_globio_lulc_op( lulc_array, potential_vegetation_array, pasture_array, ffqi, globio_nodata, pasture_threshold, primary_threshold): """Construct GLOBIO lulc given relevant biophysical parameters.""" result = numpy.empty_like(lulc_array, dtype=numpy.int16) result[:] = globio_nodata valid_mask = lulc_array != globio_nodata valid_result = result[valid_mask] # Split Shrublands and grasslands into primary vegetations, # livestock grazing areas, and man-made pastures. Landcover # 131 represents grassland/shrubland in the GLOBIO classification. grass_shrub_mask = lulc_array[valid_mask] == 131 grass_shrub_result = valid_result[grass_shrub_mask] # fill with livestock grazing, then re-assign to pasture, primary veg. grass_shrub_result[:] = 5 # man-made pasture valid_pasture_mask = potential_vegetation_array[valid_mask][grass_shrub_mask] <= 8 grass_shrub_result[valid_pasture_mask] = 6 # primary vegetation valid_primary_veg_mask = ~valid_pasture_mask & ( pasture_array[valid_mask][grass_shrub_mask] < pasture_threshold) grass_shrub_result[valid_primary_veg_mask] = 1 valid_result[grass_shrub_mask] = grass_shrub_result # Outside of the grass/shrub categories, carry over the original codes: valid_result[~grass_shrub_mask] = lulc_array[valid_mask][~grass_shrub_mask] # Step 1.4a: Split Forests into Primary, Secondary # 1 is primary forest # 3 is secondary forest valid_modis_forest_mask = lulc_array[valid_mask] == 130 forest_result = valid_result[valid_modis_forest_mask] forest_result[:] = 1 forest_result[ ffqi[valid_mask][valid_modis_forest_mask] < primary_threshold] = 3 valid_result[valid_modis_forest_mask] = forest_result # Classify all ag classes as a new LULC value "12" per our custom design # of agriculture. Landcover 132 represents agriculture landcover types # in the GLOBIO classification scheme valid_ag_mask = lulc_array[valid_mask] == 132 valid_result[valid_ag_mask] = 12 result[valid_mask] = valid_result return result
1a6ca959e8da30767a264bc529f16ab6f2626851
705,205
def pair_keys_to_items(items, key): """ Convert the list of key:value dicts (nics or disks) into a dict. The key for the new dict is one value of the current dict identified by the key parameter. If it does not exist, then the key is the order number in the list. """ new_items = {} for i, item in enumerate(items): new_items[item.get(key, i)] = item return new_items
92c66bfbb298e767b3fedbfcfd48ad87ac1162ef
705,206
def get_human_size(size): """Return a string describing the size in bytes""" if size < 1024: return '{} B'.format(size) if size < 1024 * 1024: return '{:.2f} KB'.format(float(size) / 1024) if size < 1024 * 1024 * 1024: return '{:.2f} MB'.format(float(size) / (1024 * 1024)) if size < 1024 * 1024 * 1024 * 1024: return '{:.2f} GB'.format(float(size) / (1024 * 1024 * 1024)) return '{:.2f} TB'.format(float(size) / (1024 * 1024 * 1024 * 1024))
48cee8ca55717d6fb48c5c1dc06becff71c58f0e
705,207
def empty_when_none(_string=None): """If _string if None, return an empty string, otherwise return string. """ if _string is None: return "" else: return str(_string)
402186ee7b4ba9c3968f81bee23134067d0f260e
705,208
import hashlib def file_hashes(f, bufsize=16000000): """ computes md5, sha1, sha256 from a file obj. intended for large files. returns 3-tuple of hexstrings """ md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() while True: buf = f.read(bufsize) if len(buf) == 0: break md5.update(buf) sha1.update(buf) sha256.update(buf) return (md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest())
4e23a0d99cda07325ba3a14675bfb515c12d2950
705,209
from typing import List import json def get_edfi_payloads(context, dbt_run_result, table_reference: str) -> List: """ Extract BigQUery table and return the resulting JSON as a dict. """ df = context.resources.warehouse.download_table(table_reference) df_json = df.to_json(orient="records", date_format="iso") df_dict = json.loads(df_json) return df_dict
c2ad0026ad4e56a256a824a4c1fae0762aaa51b7
705,211
def add_rnn_encoder_arguments(group): """Define arguments for RNN encoder.""" group.add_argument( "--elayers", default=4, type=int, help="Number of encoder layers (for shared recognition part " "in multi-speaker asr mode)", ) group.add_argument( "--eunits", "-u", default=300, type=int, help="Number of encoder hidden units", ) group.add_argument( "--eprojs", default=320, type=int, help="Number of encoder projection units" ) group.add_argument( "--subsample", default="1", type=str, help="Subsample input frames x_y_z means subsample every x frame " "at 1st layer, every y frame at 2nd layer etc.", ) return group
64a65bd496402dedfe98c4bd0d5bbc516c87a398
705,212
def sections(parsed): """Calculates number of every type of section""" num_small_sections = 0 num_medium_sections = 0 num_big_sections = 0 for fence in parsed.fences: if not fence.isRemoval: num_big_sections += (fence.length/12) // 8 if (fence.length/12) % 8 < 6 and (fence.length/12) % 8 > 0: num_small_sections += 1 if (fence.length/12) % 8 > 6: num_medium_sections += 1 num_sections = num_small_sections + num_medium_sections + num_big_sections return num_small_sections, num_medium_sections, num_big_sections, num_sections
67bf9328af627234d7dd2fc4bf6dfb11911f9985
705,213
def is_power(num, return_decomposition=False): """ Check if num is a perfect power in O(n^3) time, n=ceil(logN) """ b = 2 while (2 ** b) <= num: a = 1 c = num while (c - a) >= 2: m = int((a + c) / 2) if (m ** b) < (num + 1): p = int(m ** b) else: p = int(num + 1) if int(p) == int(num): if return_decomposition: return True, int(m), int(b) else: return True if p < num: a = int(m) else: c = int(m) b = b + 1 if return_decomposition: return False, num, 1 else: return False
f12a3d5559e68eb72d8a920ee1e3fdfb9c813d3f
705,214
import re def validate_bucket_name(bucket_name): """ Validate bucket name Bucket name must be compatible with DNS name (RFC 1123): - Less than 63 characters - Valid character set [a-z0-9-] - Can not begin and end with "-" Returns Trues if valid, False otherwise """ if len(bucket_name) < 6 or len(bucket_name) > 63: return False if bucket_name.startswith("-") or bucket_name.endswith("-"): return False pattern = re.compile("^[0-9a-z]([0-9a-z-]{0,61})[0-9a-z]$") if not pattern.match(bucket_name): return False return True
1d759408d097143b93b0af172bf8e73fe02e283a
705,215
def format_gro_box(box): """ Print a line corresponding to the box vector in accordance with .gro file format @param[in] box Box NamedTuple """ if box.alpha == 90.0 and box.beta == 90.0 and box.gamma == 90.0: return ' '.join(["% 13.9f" % (i/10) for i in [box.a, box.b, box.c]]) else: return ' '.join(["% 13.9f" % (i/10) for i in [box.A[0], box.B[1], box.C[2], box.A[1], box.A[2], box.B[0], box.B[2], box.C[0], box.C[1]]])
61fd32e7bc9eb9a81b8276afd3e35eb1b32150a5
705,216
def name2link(name: str): """Used for hyperlink anchors""" if not isinstance(name, str): name = str(name) return "-".join([s.lower() for s in name.split(" ")])
357496a291dcb16a86f830551350ff77ca9de81c
705,217
def get_neighbors(grid, structure_num, proximity): """ Given a grid of structures, returns the closest proximity neighbors to the given structure params: - Grid: 2D numpy array - structure_num: int - proximity: int :returns - A list of neighboring structures to the current structure_num """ # Get the number of columns for ease of access width = len(grid) height = len(grid[0]) # We'll make it a set initially to avoid duplicate neighbors neighbors = set() for i in range(-proximity, proximity + 1): for j in range(-proximity, proximity + 1): if not (i == 0 and j == 0): x = min(max((structure_num // height) - i, 0), width - 1) y = min(max((structure_num % height) - j, 0), height - 1) if grid[x][y] != structure_num: neighbors.add(grid[x][y]) return list(neighbors)
4f62fb8f01beaeea32b8ae0b496e4e972e4cc74b
705,218
def split_data_target(element, device, logger=None): """Split elements in dataloader according to pre-defined rules.""" if not (isinstance(element, list) or isinstance(element, tuple)): msg = ( "Invalid dataloader, please check if the input dataloder is valid." ) if logger: logger.error(msg) raise ValueError(msg) if len(element) == 2: # Dataloader with one input and one target data, target = element[0], element[1] return [data.to(device)], target.to(device) # tensor -> list elif len(element) > 2: # Dataloader with multiple inputs and one target data, target = element[:-1], element[-1] data_device = [tensor.to(device) for tensor in data] return data_device, target.to(device) else: # Dataloader with invalid input msg = ( "The input dataloader should at least contain two tensors - data" " and target." ) if logger: logger.error(msg) raise ValueError(msg)
2aa0a5c4d80aae2dc237ba9f87c11a7fc7e206fd
705,220
def coerce_types(T1, T2): """Coerce types T1 and T2 to a common type. Coercion is performed according to this table, where "N/A" means that a TypeError exception is raised. +----------+-----------+-----------+-----------+----------+ | | int | Fraction | Decimal | float | +----------+-----------+-----------+-----------+----------+ | int | int | Fraction | Decimal | float | | Fraction | Fraction | Fraction | N/A | float | | Decimal | Decimal | N/A | Decimal | float | | float | float | float | float | float | +----------+-----------+-----------+-----------+----------+ Subclasses trump their parent class; two subclasses of the same base class will be coerced to the second of the two. """ # Get the common/fast cases out of the way first. if T1 is T2: return T1 if T1 is int: return T2 if T2 is int: return T1 # Subclasses trump their parent class. if issubclass(T2, T1): return T2 if issubclass(T1, T2): return T1 # Floats trump everything else. if issubclass(T2, float): return T2 if issubclass(T1, float): return T1 # Subclasses of the same base class give priority to the second. if T1.__base__ is T2.__base__: return T2 # Otherwise, just give up. raise TypeError('cannot coerce types %r and %r' % (T1, T2))
7d412df0182ca6e1f43bfc6ce8e7c6ce1a738bed
705,221
from typing import OrderedDict def sort_dict(od, d): """Sort parameters (same order as xsd:sequence)""" if isinstance(od, dict): ret = OrderedDict() for k in od.keys(): v = d.get(k) # don't append null tags! if v is not None: if isinstance(v, dict): v = sort_dict(od[k], v) elif isinstance(v, list): v = [sort_dict(od[k][0], v1) for v1 in v] ret[k] = v return ret else: return d
6211a98d30e29ac9b5d0dcaeeec3ef76e9c95713
705,222
import itertools def pad_ends( sequence, pad_left=True, left_pad_symbol="<s>", right_pad_symbol="</s>" ): """ Pad sentence ends with start- and end-of-sentence tokens In speech recognition, it is important to predict the end of sentence and use the start of sentence to condition predictions. Typically this is done by adding special tokens (usually <s> and </s>) at the ends of each sentence. The <s> token should not be predicted, so some special care needs to be taken for unigrams. Arguments --------- sequence : iterator The sequence (any iterable type) to pad. pad_left : bool Whether to pad on the left side as well. True by default. left_pad_symbol : any The token to use for left side padding. "<s>" by default. right_pad_symbol : any The token to use for right side padding. "</s>" by deault. Returns ------- generator A generator that yields the padded sequence. Example ------- >>> for token in pad_ends(["Speech", "Brain"]): ... print(token) <s> Speech Brain </s> """ if pad_left: return itertools.chain( (left_pad_symbol,), tuple(sequence), (right_pad_symbol,) ) else: return itertools.chain(tuple(sequence), (right_pad_symbol,))
e4a341d1e777adab36ec0c0e7996e23203c53478
705,223
import numpy def split_with_minimum_rt_distance(rts, min_rt_delta=0, random_state=None): """ Sample from a set ot retention times, so that the sampled rts have a minimum rt differences. :param rts: :param min_rt_delta: :param random_state: :return: """ # if min_rt_delta == 0: # return list(range(len(rts))) # Store old random state and set random state rs_old = numpy.random.get_state() numpy.random.seed(random_state) last_rt = -numpy.inf idc = [] for rt in numpy.unique(rts): if last_rt + min_rt_delta <= rt: sel = numpy.where(rts == rt)[0] idc.append (sel[numpy.random.randint(0,len(sel))]) last_rt = rt # Restore old random state numpy.random.set_state(rs_old) return idc
026adc9b8dc7f3be513a93275fb0ef0d4b7de615
705,224
def calc_overlap(row): """ Calculates the overlap between prediction and ground truth and overlap percentages used for determining true positives. """ set_pred = set(row.predictionstring_pred.split(' ')) set_gt = set(row.predictionstring_gt.split(' ')) # Length of each and intersection len_gt = len(set_gt) len_pred = len(set_pred) inter = len(set_gt.intersection(set_pred)) overlap_1 = inter / len_gt overlap_2 = inter/ len_pred return [overlap_1, overlap_2]
98e65250f82ab13b23de049fd80a59dea30ccce2
705,225
def get_range(l_list,l_position): """ Obtaining range of points in list (optionally at position inside of list)""" l_range = 0 l_abs_range = 0 l_max = 0 l_min = 0 ll_list = [] counter = 0 if l_position == None: ll_list = l_list else: while counter < len(l_list): ll_list.append(l_list[counter][l_position]) counter += 1 for l_object in ll_list: if int(max(l_object)) > l_max: l_max = int(max(l_object)) if int(min(l_object)) < l_min: l_min = int(min(l_object)) if (l_max - l_min) > l_range: l_range = (l_max - l_min) if abs(l_max - l_min) > l_abs_range: l_abs_range = abs(l_max - l_min) return l_range,l_abs_range
a98b1d12cd37545b5cb1932cfe273222d9c5e4c0
705,226
def _bucket_from_workspace_name(wname): """Try to assert the bucket name from the workspace name. E.g. it will answer www.bazel.build if the workspace name is build_bazel_www. Args: wname: workspace name Returns: the guessed name of the bucket for this workspace. """ revlist = [] for part in wname.split("_"): revlist.insert(0, part) return ".".join(revlist)
4cf3f4505a894f63258846abbe41b3b787485d40
705,227
def decay(epoch): """ This method create the alpha""" # returning a very small constant learning rate return 0.001 / (1 + 1 * 30)
b3311fe38557ee18d0e72ce794a3123b04b92c7a
705,228
import functools import time def timer_function(function): """Print time taken to execute a function""" @functools.wraps(function) def inner_function(name): start = time.perf_counter() function(name) end = time.perf_counter() total = end-start print(start, end) print(f"The function finished in {total:.4f}") return inner_function
82981c28e9401581d38c1eed6b4efab30679cec8
705,229
def get_attrib_uri(json_dict, attrib): """ Get the URI for an attribute. """ url = None if type(json_dict[attrib]) == str: url = json_dict[attrib] elif type(json_dict[attrib]) == dict: if json_dict[attrib].get('id', False): url = json_dict[attrib]['id'] elif json_dict[attrib].get('@id', False): url = json_dict[attrib]['@id'] return url
838b698e3475ebdc877b29de6f3fd446d2be1cdf
705,230
def set_model_params(module, params_list, start_param_idx=0): """ Set params list into model recursively """ param_idx = start_param_idx for name, param in module._parameters.items(): module._parameters[name] = params_list[param_idx] param_idx += 1 for name, child in module._modules.items(): if child is not None: param_idx += set_model_params(child, params_list, param_idx) return param_idx
7ce6edb0c1b83020280cf0b586623d66839b4b0a
705,231
import inspect def super_class_property(*args, **kwargs): """ A class decorator that adds the class' name in lowercase as a property of it's superclass with a value constructed using the subclass' constructor with the given arguments. So for example: class A: pass @super_class_property(foo=5) class B(A): def __init__(self, foo=3): self.foo=foo Effectively results in the following, after the definition of B: A.b = B(foo=5) Can be used multiple times with different arguments if desired. """ def add_superclass_property(cls): nonlocal args, kwargs mro = inspect.getmro(cls) if len(mro) <= 2: raise TypeError( ( "Class {} can't be a super_class_property because it has no super " "class." ).format(cls) ) parent = mro[1] instance = cls(*args, **kwargs) setattr(parent, cls.__name__.lower(), instance) return cls return add_superclass_property
ecfd38ba3d7ea96266278ed6be6cf0ba87263d7d
705,232
import os def hdfs_to_local(hdfs_path, local_path, is_txt=True): """copy hdfs file to local param: * hdfs_path: hdfs file or dir * local_path: local file or dir return: * res: result message """ res = '' if is_txt: f = os.popen("hadoop dfs -text {} > {}".format(hdfs_path, local_path)) res = f.read() else: f = os.popen("hadoop dfs -get {} {}".format(hdfs_path, local_path)) res = f.read() if '' == res: res = 'ok' return res
46ee67069c7c43c1fb23a62a1c1d8fadcf058121
705,233
def set_playbook_config(ctx, **kwargs): """ Set all playbook node instance configuration as runtime properties :param _ctx: Cloudify node instance which is instance of CloudifyContext :param config: Playbook node configurations """ def _get_secure_values(data, sensitive_keys, parent_hide=False): """ ::param data : dict to check againt sensitive_keys ::param sensitive_keys : a list of keys we want to hide the values for ::param parent_hide : boolean flag to pass if the parent key is in sensitive_keys """ for key in data: # check if key or its parent {dict value} in sensitive_keys hide = parent_hide or (key in sensitive_keys) value = data[key] # handle dict value incase sensitive_keys was inside another key if isinstance(value, dict): # call _get_secure_value function recusivly # to handle the dict value inner_dict = _get_secure_values(value, sensitive_keys, hide) data[key] = inner_dict else: data[key] = '*'*len(value) if hide else value return data if kwargs and isinstance(kwargs, dict): kwargs = _get_secure_values(kwargs, kwargs.get("sensitive_keys", {})) for key, value in kwargs.items(): ctx.instance.runtime_properties[key] = value ctx.instance.update()
241642acdcd3b3b37c4b3736b375a03e5bc4cbec
705,234
def merge(sorted1, sorted2): """Merge two sorted lists into a single sorted list.""" if sorted1 == (): return sorted2 elif sorted2 == (): return sorted1 else: h1, t1 = sorted1 h2, t2 = sorted2 if h1 <= h2: return (h1, merge(t1, sorted2)) else: return (h2, merge(sorted1, t2))
7c02b345b3d1e7c67e363e1535c608575a313f75
705,235
def slice_repr(slice_obj): """ Get the best guess of a minimal representation of a slice, as it would be created by indexing. """ slice_items = [slice_obj.start, slice_obj.stop, slice_obj.step] if slice_items[-1] is None: slice_items.pop() if slice_items[-1] is None: if slice_items[0] is None: return "all" else: return repr(slice_items[0]) + ":" else: return ":".join("" if x is None else repr(x) for x in slice_items)
c894f66478ec830a4968d0cfc5d9e146457012b6
705,237
def pad(sequences, max_length, pad_value=0): """Pads a list of sequences. Args: sequences: A list of sequences to be padded. max_length: The length to pad to. pad_value: The value used for padding. Returns: A list of padded sequences. """ out = [] for sequence in sequences: padded = sequence + [0]*(max_length - len(sequence)) out.append(padded) return out
68d0a8a19352e3e724ef012a396b51c28005ff02
705,238