content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import List from typing import Dict from typing import Tuple from typing import Callable def enum_options(values: List[Dict]) -> Tuple[List[str], int, Callable]: """Enumerate options of a enum parameter for display in a select box. Returns a 3-tuple containing a list of option value, the list index of the default option, and a function that provides a mapping from option values to their names (identifier). Parameters ---------- values: list of dict List of enumeration values from the parameter declaration. Returns ------- (list, int, callable) """ options = list() default_index = 0 mapping = dict() for i, obj in enumerate(values): identifier = obj['value'] options.append(identifier) mapping[identifier] = obj['name'] if obj.get('isDefault', False): default_index = i def mapfunc(value: str) -> str: """Mapping for option values to thier identifier.""" return mapping[value] return options, default_index, mapfunc
c1214f319847b40705f425e529420a5916debe6e
703,994
def bits_to_netmask(bits): """ Convert bits to netmask Args: bits ('int'): bits to converts ex.) bits = 32 Raise: None Returns: Net mask """ mask = (0xffffffff >> (32 - bits)) << (32 - bits) return (str( (0xff000000 & mask) >> 24) + '.' + str( (0x00ff0000 & mask) >> 16) + '.' + str( (0x0000ff00 & mask) >> 8) + '.' + str( (0x000000ff & mask)))
33897b357d99027d6f96fc5fb63f38989dbbe930
703,995
def find_elements(node, xpath, allow_zero=True, allow_multiple=True): """Attempt to find child elements in a node by xpath. Raise exceptions if conditions are violated. Return a (possibly empty) list of elements.""" all_elements = node.findall(xpath) if (len(all_elements) == 0 and not allow_zero) or (len(all_elements) > 1 and not allow_multiple): raise AssertionError(f'Found {len(all_elements)} instances of {xpath} in {node}, which is not allowed') return all_elements
2255fcd63f35837c647dd6dca81ab648d59addc8
703,996
def largest_rectangle_area(heights): """ return largest rectangle in histogram """ stack = [-1] max_area = 0 for i in range(len(heights)): # we are saving indexes in stack that is why we comparing last element in stack # with current height to check if last element in stack not bigger then # current element while stack[-1] != -1 and heights[stack[-1]] >= heights[i]: lastElementIndex = stack.pop() max_area = max(max_area, heights[lastElementIndex] * (i - stack[-1] - 1)) stack.append(i) # we went through all elements of heights array # let's check if we have something left in stack while stack[-1] != -1: lastElementIndex = stack.pop() max_area = max(max_area, heights[lastElementIndex] * (len(heights) - stack[-1] - 1)) return max_area
fefb750d57a250c322a6464be250fe2cb3a9a2df
703,997
import subprocess def kill_lines(path): """ Will run sed on all files in path and remove leftovers from commented out cookie cutter code ` # - ` or ` // - ` """ re = '^\s*(#|//) -\s*$' for escape in "()/|": re = re.replace(escape, fr"\{escape}") sed_command = f"/{re}/d" print(f"removing kill lines | {sed_command} | @ { path }") return subprocess.check_call(["find", path, "-type", "f", "-exec", "sed", "-i", sed_command, "{}", "+"])
19bd97900ff2be10528eed93a0a42f9984d404a6
703,998
def generateMessage(rotation, tiltRight, tiltLeft, eyeLeft, blinkLeft, blinkRight, eyeRight): """ """ messageToSend = [] messageToSend.append(int(rotation)) messageToSend.append(int(tiltRight)) messageToSend.append(int(tiltLeft)) messageToSend.append(int(eyeLeft)) messageToSend.append(int(blinkLeft)) messageToSend.append(int(blinkRight)) messageToSend.append(int(eyeRight)) return messageToSend
fcda9d537cda1fd7dc6fbc1e888d2a1af598104c
704,000
def app_metadata_json(): """Create metadata.json content for a test app.""" return { "description": "A test app that does not really exist.", "title": "Test App", "version": "1.0.0", "authors": "aiidalab", "logo": "img/logo.png", "state": "development" }
a07c21259c43980e51e0a8e115e589b02e493d04
704,001
def get_text(original, token, replace): """Convenience function for getting the text to use for a match when formatting. If ``replace`` is False, returns the part of ``original`` between ``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns ``token.text``. """ if replace: return token.text else: return original[token.startchar:token.endchar]
b0e2e53611cb5b26b04d0e8350f1ae88a6b56056
704,002
def calc_Ti(Te, Tg, n): """Calcuate the infectious period.""" return (Tg - Te) * 2.0 * n / (n + 1.0)
7ba0176b821032e4f69ff7fe368393c2773c7d0d
704,003
def _get_indent(line): """Return the indentation in front of a line""" indent = line.replace(line.lstrip(), '') return indent
6a5dd97d4c5702a55b8b1ddaad91c5ecb99458fa
704,004
def _GetRegionalSetRequest(client, health_check_ref, replacement): """Returns a request for updating the health check.""" return (client.apitools_client.regionHealthChecks, 'Update', client.messages.ComputeRegionHealthChecksUpdateRequest( healthCheck=health_check_ref.Name(), healthCheckResource=replacement, project=health_check_ref.project, region=health_check_ref.region))
ec47ac791fcf912ba7c05077b34f2f62e0949691
704,005
def problems(mod, msg): """Ansible module exist with an error.""" return mod.exit_json(changed=False, failed=True, msg=msg)
3ec1a8c8843ba9b33d47e61d1b775aadb32ab45e
704,006
def to_sql_name(name): """ Ensure ``name`` is a valid SQL name. """ return name.lower().replace(' ', '_')
f3157d9444793d0af05e27317fdab2aa55531b84
704,007
import os import json def get_setting(key): """Get the secret variable or return explicit exception.""" try: base_dir = str(os.path.dirname(__file__)) print("BASE DIR: " + base_dir) with open(base_dir.join("/config.json")) as f: config_json = json.loads(f.read()) return config_json[key] except KeyError: error_msg = "Set the {0} environment variable".format(key) raise KeyError(error_msg) except Exception as e: raise Exception("Some error occurred: ", e)
20cae220942c087f3d372444399f79e68aa49263
704,008
def polygon_to_points( poly ): """ Plotting helper, which rearranges polygon vertices into lists of X and Y coordinates. The first point is duplicated at the end of each list, to make a closed path. :Parameters: poly: tuple of ((x1,y1),(x2,y2),...) The coordinates of the vertices of the polygon. :Returns: (xlist, ylist): list of 2 tuples ((x1, x2, ..., x1), (y1, y2, ..., y1)) """ xlist = [] ylist = [] for vertex in poly: xlist.append(vertex[0]) ylist.append(vertex[1]) xlist.append(xlist[0]) ylist.append(ylist[0]) return (xlist, ylist)
958621cb7ff2d4fe22c4482e07a6a7ba614f9fc1
704,009
import re def alphanumerical(string): """ A function to filter a string to only allow alphanumerical characters. """ pattern = re.compile('[^a-zA-Z0-9]+') return pattern.sub('', string)
3acd64f629e601c72421ac73955a61c1426d3a9d
704,010
def helper_parse_UniProt_dump_other_functions(list_of_string): """ e.g. input [['EMBL; AY548484; AAT09660.1; -; Genomic_DNA.'], ['RefSeq; YP_031579.1; NC_005946.1.'], ['ProteinModelPortal; Q6GZX4; -.'], ['SwissPalm; Q6GZX4; -.'], ['GeneID; 2947773; -.'], ['KEGG; vg:2947773; -.'], ['Proteomes; UP000008770; Genome.'], ['GO; GO:0046782; P:regulation of viral transcription; IEA:InterPro.'], ['InterPro; IPR007031; Poxvirus_VLTF3.'], ['Pfam; PF04947; Pox_VLTF3; 1.']] EnsemblPlants; AT3G09880.1; AT3G09880.1; AT3G09880. """ # GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes = [], [], [], [], [], [], [] GO, InterPro, Pfam, Reactome = [], [], [], [] for row in list_of_string: row_split = row.split(";") func_type = row_split[0] try: annotation = row_split[1].strip() except IndexError: continue # if func_type == "KEGG": # KEGG.append(annotation) if func_type == "GO": GO.append(annotation) elif func_type == "InterPro": InterPro.append(annotation) elif func_type == "Pfam": Pfam.append(annotation) elif func_type == "Reactome": # DR Reactome; R-DME-6799198; Complex I biogenesis. if annotation.startswith("R-"): # R-DME-6799198 --> DME-6799198 annotation = annotation[2:] Reactome.append(annotation) # elif func_type == "STRING": # funcs_2_return = [] # try: # for func in [func.strip() for func in row_split[1:]]: # if func.endswith("."): # func = func[:-1] # if func == "-": # continue # funcs_2_return.append(func) # except IndexError: # continue # STRING += funcs_2_return # elif func_type == "Proteomes": # Proteomes.append(annotation) # return [GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes] return GO, InterPro, Pfam, Reactome
292cd9eca3636ba3cd6813df574de3bdd54aa48f
704,011
import numpy def normalise_signal_minmax(signal: numpy.ndarray) -> numpy.ndarray: """ """ return numpy.interp(signal, (signal.min(), signal.max()), (-1, 1))
266cb3c86fa5f48c997bb8f18e42cc38b2d05d7e
704,012
import os def parallel(): """ Returns 'mpi', if this code runs with MPI, else returns 'seq' :return: """ return 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'
7a5212a1b563f2bd95130ee6b6381397a4ef2edd
704,013
def decode_dbkey(dbkey): """ Decodes dbkey and returns tuple ( username, dbkey )""" if isinstance(dbkey, str) and ':' in dbkey: return dbkey.split(':') else: return None, dbkey
de2a076b36e4ea82412478384b65845af225b1ab
704,014
def get_case_related_entities(st, related_cases): """Returns the all the entities featured in the related cases without duplicates. INPUT: Storage unit, related cases. OUTPUT: All the entities and their types contained in the related cases without duplicates""" existing_ners = {} for c in related_cases: case_ents = st.get_case_entities(c) ents = {} for k, v in case_ents.items(): ents.update(v) if not set(existing_ners) & set(ents): for k, e in ents.items(): existing_ners[k] = e return existing_ners
7ecc8927720a22d5dc05c948b9abbae458aee426
704,015
import random def get_documents_words(news_files, corpus_news): """ Given a set of documents it will return the dictionary with their respective categories :param news_files: List of raw news file names :param corpus_news: PlainTextCorpusReader object :return: Dictionary with words and categories associated """ root = corpus_news.root news = [] for file in news_files: category = file.split('/')[-1].split('--')[0] file_name = file.replace(root, '', 1) words = corpus_news.words(file_name[1:]) news.append((list(words), category)) random.shuffle(news) return news
ad189bf0e00118bebbb21794d959d903242ca10c
704,016
def question_first_sentence(database_connection, question): """ return the id, answer, and first sentence of questions in a set of categories """ c = database_connection.cursor() command = 'select raw from text where question=%i' % question c.execute(command) for ii, in c: return ii
730e3c3825a47f66418a3c9decbd0e38755b6775
704,017
def sum_of_vals(vals): """ :param vals: an iterable such as list :return: sum of the values from the iterable """ sum = 0 for val in vals: sum += val return sum
aecc28e505acbea02f5ec41aec03b2db7bc3baad
704,018
def time_sa_to_s(t, clock_freq): """ convert from time in samples to time in seconds """ return float(t / clock_freq)
eabf76cda8529dc9c9ad0acc6466c5037062b295
704,019
def mod(ctx, arg): """ """ action = ctx.copy_last_action() return action
e9007ffab406df13ddf01c6d65d58deaa713799c
704,020
def kdcompare(r, p, depth): """ Returns the branch of searching on a k-d tree Input r: root p: point depth : starting depth of search Output A value of -1 (left branch), or 1 (right) """ k = len(p) dim = depth%k if p[dim] <= r.point[dim]: return -1 else: return 1
c11aa24718b8a2d8d9e39852ca53e09118d3c215
704,021
def _has_class(domElement, className): """ Helper function to test if the provided element has the provided class """ return className in domElement.get_attribute('class').split(" ")
9a20557cc8d3e3dc91ac33764a6d94139b70f6f2
704,022
def format_datestamp(datestamp): """Format datestamp to an OAI-PMH compliant format. Parameters ---------- datestamp: datetime.datetime A datestamp. Return ------ str: Formatted datestamp. """ return datestamp.strftime('%Y-%m-%dT%H:%M:%SZ')
f050dd4f18691034c0414a4d9fa51629b0208d6a
704,023
import base64 def encode_base64(data: bytes) -> bytes: """ Creates a url safe base64 representation of an input string, strips new lines.""" return base64.urlsafe_b64encode(data).replace(b"\n", b"")
472ff045dc1df4ad5fe2a0e3001477a4d1c738fd
704,025
def yellow_bold(payload): """ Format payload as yellow. """ return '\x1b[33;1m{0}\x1b[39;22m'.format(payload)
2ae528c9dcc5a4f9b5f685f201f4d6696387a256
704,026
def add_trial_name_as_number(warm_up_data_df): """add column where trial_name is converted to number and before-/after-correction is added""" warm_up_data_df.insert( warm_up_data_df.shape[1], "trial_name_corrected_by_before_and_after", float(100) ) offset = 0 for index, row in warm_up_data_df.iterrows(): if row.trial_name == "a": warm_up_data_df.at[index, "trial_name_number"] = 0 + offset elif row.trial_name == "b": warm_up_data_df.at[index, "trial_name_number"] = 1 + offset elif row.trial_name == "c": warm_up_data_df.at[index, "trial_name_number"] = 2 + offset return warm_up_data_df
160907c14f377cae0c2067dac1dd898f4e343056
704,027
def normalize(rendered): """Return the input string without non-functional spaces or newlines.""" out = ''.join([line.strip() for line in rendered.splitlines() if line.strip()]) out = out.replace(', ', ',') return out
02a87a7a5e596b45d15bb2559403e92cb69a2f1d
704,028
import aiohttp import asyncio async def test_download_speed(session: aiohttp.ClientSession, url: str) -> int: """Count the amount of data successfully downloaded.""" result = 0 try: async with session.get(url) as resp: while True: chunk = await resp.content.read(56) if not chunk: break else: result += len(chunk) except asyncio.CancelledError: pass finally: return result
c6ca9504f90cbb9091051931054f12f8498b8535
704,029
def compare_nodal_prices(df_dcopf, df_mppdc): """Find max absolute difference in nodal prices between DCOPF and MPPDC models Parameters ---------- df_dcopf : pandas DataFrame Results from DCOPF model df_mppdc : pandas DataFrame Results from MPPDC model Returns ------- max_price_difference : float Maximum difference between nodal prices for DCOPF and MPPDC models over all nodes and scenarios """ # DCOPF model # ----------- df_tmp_1 = df_dcopf.reset_index().copy() # Filter price records df_tmp_1 = df_tmp_1[df_tmp_1['index'].str.contains(r'\.POWER_BALANCE\[')] # Extract values df_tmp_1['Value'] = df_tmp_1.apply(lambda x: x['Constraint']['Dual'], axis=1) # Extract node and scenario IDs df_tmp_1['NODE_ID'] = df_tmp_1['index'].str.extract(r'\.POWER_BALANCE\[(\d+)\]').astype(int) df_tmp_1['SCENARIO_ID'] = df_tmp_1['SCENARIO_ID'].astype(int) # Prices at each node for each scenario df_dcopf_prices = df_tmp_1.set_index(['SCENARIO_ID', 'NODE_ID'])['Value'] # MPPDC model # ----------- df_tmp_2 = df_mppdc.reset_index().copy() # Filter price records df_tmp_2 = df_tmp_2[df_tmp_2['index'].str.contains(r'\.lambda_var\[')] # Extract values df_tmp_2['Value'] = df_tmp_2.apply(lambda x: x['Variable']['Value'], axis=1) # Extract node and scenario IDs df_tmp_2['NODE_ID'] = df_tmp_2['index'].str.extract(r'\.lambda_var\[(\d+)\]').astype(int) df_tmp_2['SCENARIO_ID'] = df_tmp_2['index'].str.extract(r'LL_DUAL\[(\d+)\]').astype(int) # Prices at each node for each scenario df_mppdc_prices = df_tmp_2.set_index(['SCENARIO_ID', 'NODE_ID'])['Value'] # Compute difference between models # --------------------------------- max_price_difference = df_dcopf_prices.subtract(df_mppdc_prices).abs().max() print('Maximum difference between nodal prices over all nodes and scenarios: {0}'.format(max_price_difference)) return max_price_difference
2368bb7f8534ac466ab7858fa1056e3fe5f48f16
704,030
def add(a, b): """A dummy function to add two variables""" return a + b
4914b8d73e6808d93e8e8ee98902ad3b093f1ce6
704,031
def truncate(s, eps): """ Find the smallest k such that sum(s[:k]**2) \geq 1-eps. """ mysum = 0.0 k=-1 while (mysum < 1-eps): k += 1 mysum += s[k]**2 return k+1
fc9b5984316e969961b496fd54425e4f52f025ff
704,032
def list_to_str(items): """ :param items: :return: """ mystr = '' for item in items: mystr += item return mystr
6530a33641f261888094d4ecb6fff469a97d6c10
704,033
import torch def to_minmax_form(boxes): """ :param boxes: (n, 4) tensor, (xmin, ymin, xmax, ymax) form. :return: (n, 4) tensor, (cx, cy, w, h) form """ xmin = boxes[:, 0] - boxes[:, 2] / 2 + 0.5 ymin = boxes[:, 1] - boxes[:, 3] / 2 + 0.5 xmax = boxes[:, 0] + boxes[:, 2] / 2 - 0.5 ymax = boxes[:, 1] + boxes[:, 3] / 2 - 0.5 return torch.stack([xmin, ymin, xmax, ymax])
f00f703c78db7926bbea684147facc6fa553ac67
704,034
def compute_mean_nutrient_intake(nutrient_intake): """Compute mean nutrient intake""" nutrient_totals = nutrient_intake.sum() total_count = nutrient_intake.count() carb_mean = (nutrient_totals[0]/total_count) fiber_mean = (nutrient_totals[1]/total_count) fat_mean = (nutrient_totals[2]/total_count) prot_mean = (nutrient_totals[3]/total_count) nutrients_mean = [carb_mean, fiber_mean, fat_mean, prot_mean] return nutrients_mean
ff79f816162a8f555d5deb582eb9647f1cef66b9
704,035
import os def delete_outputs(config, outcfg): """ Remove pipeline outputs to save memory after running the job Parameters ---------- config : dict-like Input configuration of job. Uses config["management"]["delete"] (list of key used to index outcfg) to determine which files should be added to archive outcfg : dict-like Output configuration of job Returns ------- outcfg_cleaned : dict-like Output configuration with selected output keys removed. """ # determine keys (corresponding to files) in # outcfg that should be stored outkeys = config.get("management", {}).get("delete", None) # if no output keys are requested, nothing to do if outkeys is None: return outcfg # go through all flagged files and delete if existing for k in outkeys: # skip missing keys or ones not defined if k not in outcfg or k is None: continue # delete list of files if k.endswith("files"): for f in outcfg[k]: try: os.remove(f) except OSError: pass del outcfg[k] # delete individual file else: try: os.remove(outcfg[k]) del outcfg[k] except OSError: pass return outcfg
1edf02ae14a755f77899c6d3be05ff11a2d6bcf3
704,036
import csv def get_outputs(output_file): """ Parse ``output_file`` which is a csv file and defines the semantics of the output of a neural network. For example, output neuron 1 means class "0" in the MNIST classification task. """ outputs = [] mode = "rt" with open(output_file, mode, newline="", encoding="utf8") as csvfile: spamreader = csv.reader(csvfile, delimiter="\n", quotechar="|") for row in spamreader: outputs.append(row[0]) return outputs
b001942bc270952c9bb82537d0b5033c9ee968ff
704,037
def get_io_functions_from_lib(lib, load_func_name='load', dump_func_name='dump', load_kwargs={}, dump_kwargs={}): """Helper to create loader and dumper functions for libraries""" def loader(input_stream, args): return getattr(lib, load_func_name)(input_stream, **load_kwargs) def dumper(output, output_stream, args): return getattr(lib, dump_func_name)(output, output_stream, **dump_kwargs) return loader, dumper
1d81031c7e5421f190f8c8936d92f2a5a2d999bf
704,039
import sys def is_piped_output(): """ Checks the piped output. This function checks if this script is being executed with a piped output. E.g.: python dnsrato.py -d domain.com --format json > outfile.json Returns ------- bool True if the is a piped output, False otherwise. """ return not sys.stdout.isatty()
4f59a6abc6bfaac47340434afd07c62aaf8a5ddc
704,040
def list_zeroes(line): """ Takes a list of integers and removes all non-zero elements. """ zeroes = [] for item in line: if item == 0: zeroes.append(item) return zeroes
f10d6a59f8a6f00cb22d5f6ee6ce2087a4969b8e
704,042
def ObjectToDict(obj): """Converts an object into a dict.""" keys = [ k for k in dir(obj) if not k.startswith("__") ] return { k : getattr(obj, k) for k in keys }
f2679daab84d5cee2c7f319d1d34f3c669971cd6
704,043
def ortho(subj_coord, obj_coord, subj_dim, obj_dim): """ It returns a tuple of 3 values: new dim for combined array, component of subj_origin in it, component of obj_origin in it. """ if subj_coord > obj_coord: return (subj_coord + (obj_dim - obj_coord), 0, subj_coord - obj_coord) if subj_coord < obj_coord: return (obj_coord + (subj_dim - subj_coord), obj_coord - subj_coord, 0) if subj_dim > obj_dim: # There is place for obj_mod's tokens in subj_mod, # no increase of dims needed: use subj_mod's dims. return (subj_dim, 0, 0) # There is place for subj_mod's tokens in obj_mod, # no increase of dims needed: use obj_mod's dims. return (obj_dim, 0, 0)
36a3127b0721ac87e81259432823eab26fc015e9
704,044
def _unravel(nodes,tets,index): """Returns a list containing the node coordinates of the tet stored in the 'index' position in the 'tets' list.""" return [nodes[tets[index][0]],nodes[tets[index][1]],nodes[tets[index][2]],nodes[tets[index][3]]]
e8428de351db2a84a4875a81b47d07b03a67efd9
704,045
def visdom_image(vis, image,win_name): """ eg : visdom_image(vis=vis, image=drawn_image, win_name='image') :param vis: 由 setup_visdom 函数创建 :param image: 单幅图片张量,shape:[n,w,h] :param win_name: 绘图窗口名称,必须指定,不然会一直创建窗口 :return: """ vis.image(img=image, win=win_name) return True
145ad236c0e07a79195a573b562ffd9a8654be96
704,046
def file_num2size(num_size, h=True): """文件大小数值变为 MB 的显示 :param num_size: 文件大小 :param h: 是否 human 显示 :return: {'value': 数值,'measure': 单位,'str': 字串, 'org_size': 原始大小} """ measure_list = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] fsize = num_size i = 0 while (fsize >= 1) and (i < len(measure_list)) and h: if fsize < 1024: break else: fsize = fsize / 1024.0 i += 1 i = min(i, len(measure_list) - 1) fsize = round(fsize, 2) if not isinstance(fsize, int) else fsize res_info = {'value': fsize, 'measure': measure_list[i], 'str': str(fsize) + measure_list[i], 'org_size': num_size} return res_info
f3cf060b4015831381ab5d5b3ed236d8519746cc
704,047
import types import asyncio def loop_apply_coroutine(loop, func: types.FunctionType, *args, **kwargs) -> object: """ Call a function with the supplied arguments. If the result is a coroutine, use the supplied loop to run it. """ if asyncio.iscoroutinefunction(func): future = asyncio.ensure_future( func(*args, **kwargs), loop=loop) loop.run_until_complete(future) return future.result() else: return func(*args, **kwargs)
d77a70540237f690e712e30b93b53b363907b678
704,049
def zipmap(keys, vals): """ Return a ``dict`` with the keys mapped to the corresponding ``vals``. """ return dict(zip(keys, vals))
a058e5a4e462416f48d83b3c288a0cd8d6b000ef
704,050
def _create_thingy(sql_entity, session): """Internal call that holds the boilerplate for putting a new SQLAlchemy object into the database. BC suggested this should be a decorator but I don't think that aids legibility. Maybe should rename this though. """ session.add(sql_entity) #Note that this commit causes the .id to be populated. session.commit() return sql_entity.id
4d50da3a15606c7adf61e2c9c09a4a3a9898edf9
704,051
def maxsubarray(list): """ Naive approach to calculating max subarray Iterating all possible subarrays Complexity (n = list size) Time complexity: O(n^2) Space complexity: O(1) """ maxStart = 0 maxEnd = 0 maxSum = list[0] for i in range (len(list)): currentSum = 0 for j in range (i, len(list)): currentSum += list[j] if currentSum > maxSum: maxSum = currentSum maxStart = i maxEnd = j return (maxSum, maxStart, maxEnd)
71b4a12d02fab45fc14890ae4a34a0dc50d6a7b4
704,052
from typing import Sequence def is_sequence(obj): """Is the object a *non-str* sequence? Checking whether an object is a *non-string* sequence is a bit unwieldy. This makes it simple. """ return isinstance(obj, Sequence) and not isinstance(obj, str)
06129c6122fec0290edb34cadc75b68199738435
704,053
def getAxisList(var): """ Returns a list of coordinates from: var """ return [var.coords[key] for key in var.dims]
eae9b971bcbf021ef2203dd6cb21df6343d0f19a
704,054
def skip_nothing(name, dirpath): """Always returns :obj:`False`. """ return False
9e846b7060af43b2c4165e6530fcabc66415615b
704,056
import os import subprocess import re def get_version_number(klocwork_path): """This function determines the Klocwork version number. Inputs: - klocwork_path: Absolute path to the bin directory of the Klocwork installation [string] Ouputs: - version_number: The version number of the Klocwork instance being tested [string] """ try: # Set the path, if necessary if klocwork_path == '': call_string = 'which kwinject' my_env = os.environ.copy() subprocess.call(call_string, shell=True, env=my_env) proc = subprocess.Popen(call_string, shell=True, env=my_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') klocwork_path = os.path.dirname(proc.communicate()[0].strip()) # Get the version number call_string = klocwork_path + '/kwinject -v' my_env = os.environ.copy() proc = subprocess.Popen(call_string, shell=True, env=my_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') std_out, std_err = proc.communicate() # Get the version number version_number = re.split(' ', re.split('\n', std_out)[1])[-1] # Truncate the version number if necessary version_split = re.split('\\.', version_number) if len(version_split) > 3: version_number = '.'.join(version_split[0:3]) except: # lgtm [py/catch-base-exception] version_number = 'Unknown' return version_number
08f919420a1f2465895b3cab26d062fc78b58b95
704,057
import os import sys def credentials(): """Retrieves credentials""" username = os.environ.get('OS_USERNAME') password = os.environ.get('OS_PASSWORD') tenant_name = os.environ.get('OS_TENANT_NAME') auth_url = os.environ.get('OS_AUTH_URL') if not all((username, password, tenant_name, auth_url)): sys.stderr.write("ERROR: Unable to get Keystone credentials\n") exit(1) return { 'username': username, 'password': password, 'tenant_name': tenant_name, 'auth_url': auth_url }
6ac966762db6bb8c9079502d4adcfbf94ec753e8
704,058
def centtoinch(cents): """Cents to inch.""" return .3937*cents
517142a29242246721abd05638c8ecbefcd888cb
704,059
def get_top_element_count(mol, top): """ Returns the element count for the molecule considering only the atom indices in ``top``. Args: mol (Molecule): The molecule to consider. top (list): The atom indices to consider. Returns: dict: The element count, keys are tuples of (element symbol, isotope number), values are counts. """ if not isinstance(top, list): top = list(top) element_count = {} for i, atom in enumerate(mol.atoms): if i in top: key = (atom.element.symbol, atom.element.isotope) if key in element_count: element_count[key] += 1 else: element_count[key] = 1 return element_count
f7293c1d154346c955052ceff0ee59483538bdc3
704,060
def at_least_one_shift_each(cur_individual): """ checks if there is at least one of each shift: 01, 10, 11 """ num_entrega = 0 num_recogida = 0 num_dual = 0 while cur_individual: shift = cur_individual[:2] cur_individual = cur_individual[2:] if shift == '01': num_entrega += 1 elif shift == '10': num_recogida += 1 elif shift == '11': num_dual += 1 if num_entrega > 0 and num_recogida > 0 and num_dual > 0: return True return False
070fe16e779ab30bcee7873ec01876962f30ec91
704,062
from datetime import datetime import pytz def get_aware_utc_now(): """Create a timezone aware UTC datetime object from the system time. :returns: an aware UTC datetime object :rtype: datetime """ utcnow = datetime.utcnow() utcnow = pytz.UTC.localize(utcnow) return utcnow
b0b102b8d7d49e0d7d4041a476502cdd683dc8af
704,063
from typing import Optional from typing import Callable from typing import Iterator from typing import Any import inspect def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]: """Iterates a response JSON yielding data point by point. Can be used with both regular and chunked responses. By default, returns just a plain list of values representing each point, without column names, or other metadata. In case a specific format is needed, an optional ``parser`` argument can be passed. ``parser`` is a function/callable that takes data point values and, optionally, a ``meta`` parameter containing which takes a dictionary containing all or a subset of the following: ``{'columns', 'name', 'tags', 'statement_id'}``. Sample parser functions: .. code:: python # Function optional meta argument def parser(*x, meta): return dict(zip(meta['columns'], x)) # Namedtuple (callable) from collections import namedtuple parser = namedtuple('MyPoint', ['col1', 'col2', 'col3']) :param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query) :param parser: Optional parser function/callable :return: Generator object """ for statement in resp['results']: if 'series' not in statement: continue for series in statement['series']: if parser is None: return (x for x in series['values']) elif 'meta' in inspect.signature(parser).parameters: meta = {k: series[k] for k in series if k != 'values'} meta['statement_id'] = statement['statement_id'] return (parser(*x, meta=meta) for x in series['values']) else: return (parser(*x) for x in series['values']) return iter([])
000c2c873ab38378bb42945ed3304213b254061a
704,064
def convert_output_key(name): """ Convert output name into IE-like name :param name: output name to convert :return: IE-like output name """ if not isinstance(name, tuple): return name if len(name) != 2: raise Exception('stats name should be a string name or 2 elements tuple ' 'with string as the first item and port number the second') return '{}.{}'.format(*name)
d5c59766c615e0e7b45f173948692050a7b890e3
704,065
def in_polygon(point, polygon): """Simple wrapper on the within method of shapely points Params: point (POINT) a shapely point object polygon (POLYGON) a shapely polygon object (the target overlap area) Returns: (bool) whether or not the point is within polygon expressed as a boolean """ return point.within(polygon)
0a26d023672162a53affddfe23a89361d900d9a0
704,066
import typing def parse_patter(pats: typing.List[str]) -> typing.Tuple[typing.List[str], typing.List[str]]: """ 分析匹配项 ======== /xx/ # 只匹配根目录下文件夹 xx/ # 匹配所有的文件夹 /xx # 只匹配根目录下文件 xx # 匹配所有的文件 !xx # 除xx之外 ========= /xx/ => xx + xx/** xx/ => xx + xx/** + **/xx + **/xx/** /xx => xx xx => xx + **/xx !xx => 除xx之外 :param pats: 匹配项列表 :return: 包含的匹配项,除此之外的匹配项 """ pats_includes = [] pats_excludes = [] for pat in pats: if pat.startswith('!'): pat = pat[1:] pats_n = pats_excludes else: pats_n = pats_includes pats_n.append(pat) if pat.endswith('/'): # 文件夹:xx/ if pat.startswith('/'): # '/'开头,表示根目录下的文件 # 根目录下的文件夹:/xx/ => xx or xx/** pats_n.append(pat[1:-1]) pats_n.append(pat[1:] + '**') else: # xx/ => xx or xx/** or **/xx or **/xx/** pats_n.append(pat[:-1]) pats_n.append(pat + '**') pats_n.append('**/' + pat[:-1]) pats_n.append('**/' + pat + '**') else: if pat.startswith('/'): # '/'开头,表示根目录下的文件 # 根目录下的文件:/xx => xx pats_n.append(pat[1:]) else: # xx => xx or **/xx pats_n.append('**/' + pat) return pats_includes, pats_excludes
17eca544b157fe2ad406e1c4385c49acc53ff0d7
704,067
import os def is_downloaded(folder,path_): """[check if the .html file exists] Args: folder ([folder in which is placed the book]): [description] path_ ([type]): [path of the .html page] Returns: [type]: [description] """ if not os.path.exists(folder): os.makedirs(folder) return bool(os.path.exists(path_))
1d480fedb13429baa4bc3234f7a42cafe8dba93c
704,068
def set_spines(ax, plot_params): """ Sets spines of the shift graph to be invisible if chosen by the user Parameters ---------- ax: Matplotlib ax Current ax of the shift graph plot_parms: dict Dictionary of plotting parameters. Here `invisible_spines` is used """ spines = plot_params["invisible_spines"] if spines: for spine in spines: if spine in {"left", "right", "top", "bottom"}: ax.spines[spine].set_visible(False) else: print("invalid spine argument") return ax
4e74ce30f52d465e9470f608cd8c909dfae4d0a5
704,069
def matches(G, queue): """ If the sequence in 'queue' correspond to a node in 'G', return the sequence id, otherwise return False """ if not queue: return False seq = 0 for a in queue: try: seq = G[seq][a] except KeyError: return False return seq
859f43f6457b4add4cda88495a8dc6c4a559e5d5
704,071
def getPortList(chute): """ Get a list of ports to expose in the format expected by create_container. Uses the port binding dictionary from the chute host_config section. The keys are expected to be integers or strings in one of the following formats: "port" or "port/protocol". Example: port_bindings = { "1111/udp": 1111, "2222": 2222 } getPortList returns [(1111, 'udp'), (2222, 'tcp')] """ if not hasattr(chute, 'host_config') or chute.host_config == None: config = {} else: config = chute.host_config ports = [] for port in config.get('port_bindings', {}).keys(): if isinstance(port, int): ports.append((port, 'tcp')) continue parts = port.split('/') if len(parts) == 1: ports.append((int(parts[0]), 'tcp')) else: ports.append((int(parts[0]), parts[1])) # If the chute is configured to host a web service, check # whether there is already an entry in the list for the # web port. If not, we should add one. web_port = chute.getWebPort() if web_port is not None: if not any(p[0] == web_port for p in ports): ports.append((web_port, 'tcp')) return ports
eb97568befca72f6d9cb6c455d8d9ad03b13eebf
704,072
def get_datasets_dbpedia(subset, limit): """ Loads dbpedia data from files, split the data into words and generates labels. Returns split sentences and labels. """ datasets = dict() data = [] target = [] target_int =[] target_names = [] filename = 'data/dbpedia/'+subset+'.csv' last_label = '' i = 0 # Load data from files with open(filename, 'r') as f: for line in f: label, header, text = line.split(',') #print('.',end='') if (i >= limit): if (label == last_label): continue else: i = 0 # reset i print('Entry : {}, label:{}, header: {}'.format(i,label, header)) # remove blank spaces from text and insert into list 'data' data.append(text.strip()) target.append(int(label)-1) if label not in target_names: target_names.append(label) last_label = label i += 1 datasets['data'] = data datasets['target'] = target datasets['target_names'] = target_names return datasets
03f567e814b3ca0227b558177feb8a2165256cfc
704,073
import glob import sys import os def recursive_glob(path): """Version-agnostic recursive glob. Implements the Python 3.5+ glob module's recursive glob for Python 2.7+. Recursive glob emulates the bash shell's globstar feature, which is enabled with `shopt -s globstar`. Args: path: A path that may contain `**` and `*` ("magic"). Returns: The expanded list of paths with magic removed. """ if "*" not in path: # Glob isn't needed. return [path] elif "**" not in path: # Recursive glob isn't needed. return glob.glob(path) elif sys.version_info >= (3, 5): # Recursive glob is supported. # Pylint doesn't respect the version check. # pylint: disable=unexpected-keyword-arg return glob.glob(path, recursive=True) # pylint: enable=unexpected-keyword-arg # Simulate recursive glob with os.walk. left, right = path.split("**", 1) if not left: left = "." + os.sep right = right.lstrip(os.sep) paths = [] for d, _, _ in os.walk(left): # Don't recurse into hidden directories. Note that the current directory # ends with '/', giving it a basename of '', which prevents this check # from accidentally skipping it. if not os.path.basename(d).startswith("."): paths += recursive_glob(os.path.join(d, right)) return paths
6a738db431a0230c186f14f6739f1beb20e1ff97
704,074
import re def camelcase_to_snakecase(value: str) -> str: """ Convert a string from snake_case to camelCase. >>> camelcase_to_snakecase('') '' >>> camelcase_to_snakecase('foo') 'foo' >>> camelcase_to_snakecase('fooBarBaz') 'foo_bar_baz' >>> camelcase_to_snakecase('foo_bar_baz') 'foo_bar_baz' >>> camelcase_to_snakecase('_fooBarBaz') '_foo_bar_baz' >>> camelcase_to_snakecase('__fooBarBaz_') '__foo_bar_baz_' """ value = re.sub(r"[\-\.\s]", "_", value) if not value: return value return value[0].lower() + re.sub( r"[A-Z]", lambda matched: "_" + matched.group(0).lower(), value[1:] )
05fe02739e8152bc64ab35bd842162b5d7c3ab4c
704,075
def best_archiver(random, population, archive, args): """Archive only the best individual(s). This function archives the best solutions and removes inferior ones. If the comparison operators have been overloaded to define Pareto preference (as in the ``Pareto`` class), then this archiver will form a Pareto archive. .. Arguments: random -- the random number generator object population -- the population of individuals archive -- the current archive of individuals args -- a dictionary of keyword arguments """ new_archive = archive for ind in population: if len(new_archive) == 0: new_archive.append(ind) else: should_remove = [] should_add = True for a in new_archive: if ind.candidate == a.candidate: should_add = False break elif ind < a: should_add = False elif ind > a: should_remove.append(a) for r in should_remove: new_archive.remove(r) if should_add: new_archive.append(ind) return new_archive
20d606f5ea4d76b1c6bd2acfd6bf1017e694f026
704,076
def elk_index(hashDict): """ Index setup for ELK Stack bulk install """ index_tag_full = {} index_tag_inner = {} index_tag_inner['_index'] = "hash-data" index_tag_inner['_id'] = hashDict['hashvalue'] index_tag_full['index'] = index_tag_inner return index_tag_full
9adcc529b88b319180e223ba9e47bda51e628478
704,078
import uuid def generate_unique_str(allow_dashes=True): """ Generate unique string using uuid package Args: allow_dashes (bool, optional): If true use uuid4() otherwise use hex that will skip dash in names. Defaults to True. """ if allow_dashes: unique_str = str(uuid.uuid4()) else: unique_str = uuid.uuid4().hex return unique_str
9a08364837ea719454b885fcb344631005e7a610
704,079
def import_atm_mass_info(): """ Funtion to load dictionary storing atomic mass information by atom type. dict dictionary of atomic mass by atom name """ massdict = {'H': 1.00797, 'HE': 4.0026, 'LI': 6.941, 'BE': 9.01218, 'B': 10.81, 'C': 12.011, 'N': 14.0067, 'O': 15.9994, 'F': 18.998403, 'NE': 20.179, 'NA': 22.98977, 'MG': 24.305, 'AL': 26.98154, 'SI': 28.0855, 'P': 30.97376, 'S': 32.06, 'CL': 35.453, 'K': 39.0983, 'AR': 39.948, 'CA': 40.08, 'SC': 44.9559, 'TI': 47.9, 'V': 50.9415, 'CR': 51.996, 'MN': 54.938, 'FE': 55.847, 'NI': 58.7, 'CO': 58.9332, 'CU': 63.546, 'ZN': 65.38, 'GA': 69.72, 'GE': 72.59, 'AS': 74.9216, 'SE': 78.96, 'BR': 79.904, 'KR': 83.8, 'RB': 85.4678, 'SR': 87.62, 'Y': 88.9059, 'ZR': 91.22, 'NB': 92.9064, 'MO': 95.94, 'TC': -98.0, 'RU': 101.07, 'RH': 102.9055, 'PD': 106.4, 'AG': 107.868, 'CD': 112.41, 'IN': 114.82, 'SN': 118.69, 'SB': 121.75, 'I': 126.9045, 'TE': 127.6, 'XE': 131.3, 'CS': 132.9054, 'BA': 137.33, 'LA': 138.9055, 'CE': 140.12, 'PR': 140.9077, 'ND': 144.24, 'PM': -145.0, 'SM': 150.4, 'EU': 151.96, 'GD': 157.25, 'TB': 158.9254, 'DY': 162.5, 'HO': 164.9304, 'ER': 167.26, 'TM': 168.9342, 'YB': 173.04, 'LU': 174.967, 'HF': 178.49, 'TA': 180.9479, 'W': 183.85, 'RE': 186.207, 'OS': 190.2, 'IR': 192.22, 'PT': 195.09, 'AU': 196.9665, 'HG': 200.59, 'TL': 204.37, 'PB': 207.2, 'BI': 208.9804, 'PO': -209.0, 'AT': -210.0, 'RN': -222.0, 'FR': -223.0, 'RA': 226.0254, 'AC': 227.0278, 'PA': 231.0359, 'TH': 232.0381, 'NP': 237.0482, 'U': 238.029, 'PU': -242.0, 'AM': -243.0, 'BK': -247.0, 'CM': -247.0, 'NO': -250.0, 'CF': -251.0, 'ES': -252.0, 'HS': -255.0, 'MT': -256.0, 'FM': -257.0, 'MD': -258.0, 'LR': -260.0, 'RF': -261.0, 'BH': -262.0, 'DB': -262.0, 'SG': -263.0, 'UUN': -269.0, 'UUU': -272.0, 'UUB': -277.0, '—': 0.0, 'UUQ': 0.0} return massdict
c470495f30d77b41642bfd07f52a82b7a060ffb5
704,080
def remove_batch_from_layout(layout): """ The tf-mesh layout splits across batch size, remove it. Useful for prediction steps, when you no longer want large batches. :param layout: string describing tf-mesh layout :return: layout minus batch dimension """ layout = layout.split(',') ret_layout = "" for i in layout: if "batch" in i: pass else: ret_layout += f"{i}," return ret_layout[:-1]
44d032504055e1133a6dc97ea040ff44ea2ac327
704,081
def _knapsack01_recur(val, wt, wt_cap, n): """0-1 Knapsack Problem by naive recursion. Time complexity: O(2^n), where n is the number of items. Space complexity: O(n). """ if n < 0 or wt_cap == 0: return 0 if wt[n] > wt_cap: # Cannot be put. max_val = _knapsack01_recur(val, wt, wt_cap, n - 1) else: # Can be put: to put or not to put. val_in = val[n] + _knapsack01_recur(val, wt, wt_cap - wt[n], n - 1) val_ex = _knapsack01_recur(val, wt, wt_cap, n - 1) max_val = max(val_in, val_ex) return max_val
88f73b2e2f577b5e17a4ba235699ad542dfc7f0d
704,082
def matchElements(e1, e2, match): """ Test whether two elements have the same attributes. Used to check equality of elements beyond the primary key (the first match option) """ isMatch = True for matchCondition in match: if(e1.attrib[matchCondition] != e2.attrib[matchCondition]): isMatch = False break return isMatch
1aa57a3a9a3123445e0234efa506f95616f90ef8
704,083
def auc_step(X, Y): """Compute area under curve using step function (in 'post' mode).""" if len(X) != len(Y): raise ValueError( "The length of X and Y should be equal but got " + "{} and {} !".format(len(X), len(Y)) ) area = 0 for i in range(len(X) - 1): delta_X = X[i + 1] - X[i] area += delta_X * Y[i] return area
886f410a35a49a7098c1f2dcd145b54d1b54d423
704,084
def calculate_num_modules(slot_map): """ Reads the slot map and counts the number of modules we have in total :param slot_map: The Slot map containing the number of modules. :return: The number of modules counted in the config. """ return sum([len(v) for v in slot_map.values()])
efbb82a54843f093a5527ebb6a1d4c4b75668ebb
704,085
def df2dicts(df): """ df to dicts list """ dicts = [] for line in df.itertuples(): ll = list(df.columns) dicts.append(dict(zip(ll, list(line)[1:]))) return dicts
f63520c22766a2454e52f17d539a876d1eea4fa5
704,087
import filecmp def files_differ(path_a, path_b): """ True if the files at `path_a` and `path_b` have different content. """ return not filecmp.cmp(path_a, path_b)
ea0382e619228cd0fc042a9003c34f33bd53f313
704,089
def m2(topic_srs, topic_vol, sharpe, ref_vol, cum=False, annual_factor=1): """Calcs m2 return which is a port to mkt vol adjusted return measure. The Sharpe ratio can be difficult to interpret since it's a ratio, so M2 converts a Sharpe to a return number. Args: topic_srs (Pandas DataFrame of float): The series of interest. topic_vol (Pandas DataFrame of float): The volatility of the topic series. sharpe (Pandas DataFrame of float): The Sharpe ratio of the topic. ref_vol (Pandas DataFrame of float): The reference series' volatility. The M2 return calculated with be comparable to this reference series' return. cum (bool, optional): Boolean flag to inidicate calculating a cumulative value. (default is False) annual_factor (float, optional): The factor used to annualize the M2 value. (default is 1) Returns: Pantas DataFrame of float: M2 return. """ return (topic_srs + (sharpe * (ref_vol - topic_vol))) * annual_factor
8b05b0419db895d1de756cfb8751b9311cd43eca
704,090
def helper(n, largest_digit): """ :param n: int, a number :param largest_digit: int, :return: int, the largest digit """ if n == 0: # base case return largest_digit else: if n < 0: # convert negative n into positive if any n = n * -1 if n % 10 > largest_digit: largest_digit = n % 10 return helper(int(n/10), largest_digit)
557a6ce39a31f7edd2438bea43be9d8abfec47c5
704,091
import networkx def do_to_networkx(do): """Return a networkx representation of do""" terms = do.get_terms() dox = networkx.MultiDiGraph() dox.add_nodes_from(term for term in terms if not term.obsolete) for term in dox: for typedef, id_, name in term.relationships: dox.add_edge(term, do.get_term(id_), key = typedef) assert networkx.is_directed_acyclic_graph(dox) return dox
97c27a5e6ec3c0467fe42f34325aac5b565f5be3
704,092
def find_largest(line: str) -> int: """Return the largest value in line, which is a whitespace-delimited string of integers that each end with a '.'. >>> find_largest('1. 3. 2. 5. 2.') 5 """ # The largest value seen so far. largest = -1 for value in line.split(): # Remove the trailing period. v = int(value[:-1]) # If we find a larger value, remember it. if v > largest: largest = v return largest
95ceb1e79812e9ef9c7338f393e9d22224eb5a03
704,093
def _shot_id_to_int(shot_id): """ Returns: shot id to integer """ tokens = shot_id.split(".") return int(tokens[0])
59d0ecabf874841d616a72ebea1ebac6e6dc3947
704,094
import ctypes def get_native_pointer_type(pointer_size: int): """ :return: A type that can represent a pointer. """ return { ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32, ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64, }[pointer_size]
2364bde2f7bfb7ce2b743d8958551156c847f847
704,095
from typing import Union from typing import List def trim_name(names: Union[List[str], str]) -> Union[List[str], str]: """Trims the name from the web API, specifically from IFTTT (removes extra "the ")""" # Single name if isinstance(names, str): trimmed_name = names.lower().replace("the", "").strip() return trimmed_name elif isinstance(names, list): trimmed_names: List[str] = [] for name in names: trimmed_names.append(str(trim_name(name))) return trimmed_names return ""
ee0bf0dcb9353fcad1b4af5f50bf2ff208c1dbe2
704,096
def compare(Target, Population): """ This function takes in two picture objects and compares them. :param Target: target image :type Target: Picture object :param Population: The population of the current generations :type Population: A list of picture objects :return: Two best members of the population :rtype: A list of picture objects """ target_rep = Target.get_rep() target_len = len(target_rep) best_pop = [] correctness_list = [] for member in Population: correct = 0 member_rep = member.get_rep() if target_len != len(member_rep): print("Error: Images must be of the same size") else: for x in range(0, len(target_rep)): if target_rep[x] == member_rep[x]: correct += 1 correctness_list.append(correct) best_idx = correctness_list.index(max(correctness_list)) best_pop.append(Population[best_idx]) del correctness_list[best_idx] second_best_idx = correctness_list.index(max(correctness_list)) best_pop.append(Population[second_best_idx]) return best_pop
4456141d1c980c5ca008d614ced05e1ec2efc062
704,097
def strify(iterable_struct, delimiter=','): """ Convert an iterable structure to comma separated string. :param iterable_struct: an iterable structure :param delimiter: separated character, default comma :return: a string with delimiter separated """ return delimiter.join(map(str, iterable_struct))
3c8337691c9008449a86e1805fe703d6da73a523
704,098
def generate_pattern_eq_ipv4(value): """ makes a pattern to check an ip address """ return "ipv4-addr:value = '" + value + "'"
36b4a09363512709c3bdf8046ea52f8ba14aa8e7
704,099
import requests import sys def print_server_info(ip, user, password): """ Fetch and print servers info @params: ip - Required : the ip of the server (Str) user - Required : the administrator username (Str) password - Required : The administrator password (Str) """ try: r = requests.get(f'https://{ip}:8443/api/v2/server', auth=(user, password), verify=False) if r.status_code != 200: raise Exception(f"Invalid response from plesk api. Response code: {r.status_code}") data = r.json() return print(f"{'='*100}\nServer info: {data['hostname']}, platform: {data['platform']}, panel version: {data['panel_version']} ({data['panel_revision']})\n{'='*100}\n") except: sys.exit(f"Error occured while trying to get server info")
77280b61a71f58f827e879a09f8e72f984d759d9
704,100
def sanitize_df(df, d_round=2, **options): """All dataframe cleaning and standardizing logic goes here.""" for c in df.columns[df.dtypes == float]: df[c] = df[c].round(d_round) return df
cb411b0019112155311a926ec145becc0f8c4ce9
704,101
import torch def undo_imagenet_preprocess(image): """ Undo imagenet preprocessing Input: - image (pytorch tensor): image after imagenet preprocessing in CPU, shape = (3, 224, 224) Output: - undo_image (pytorch tensor): pixel values in [0, 1] """ mean = torch.Tensor([0.485, 0.456, 0.406]).view((3, 1, 1)) std = torch.Tensor([0.229, 0.224, 0.225]).view((3, 1, 1)) undo_image = image * std undo_image += mean return undo_image
57d4cfc365c4e6c2dcfd37c8a2c500465daa421a
704,102
def collapse(html): """Remove any indentation and newlines from the html.""" return ''.join([line.strip() for line in html.split('\n')]).strip()
a5a55691f2f51401dbd8b933562266cbed90c63d
704,103
def get_pagination_request_params(): """ Pagination request params for a @doc decorator in API view. """ return { "page": "Page", "per_page": "Items per page", }
8ceb2f8ead3d9285017b595671f02817d098bc40
704,104