content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Dict from typing import List import pathlib import json def json_loader(path_to_json_file: str) -> Dict[str, List[str]]: """Reads a JSON file and converts its content in a dictionary. Parameters ---------- path_to_json_file: str The path to the JSON file. Returns ------- Dict[str, List[str]] A dictionary of source codes with the corresponding lists of instrument symbols of interest for each source. """ with pathlib.Path(path_to_json_file).open('r') as infile: return json.loads(infile.read())
d3f26504078e72e1522981a4d8ca5b60c3b8cf23
706,233
import time import json def create_elasticsearch_domain(name, account_id, boto_session, lambda_role, cidr): """ Create Elastic Search Domain """ boto_elasticsearch = boto_session.client('es') total_time = 0 resource = "arn:aws:es:ap-southeast-2:{0}:domain/{1}/*".format(account_id, name) access_policy = {"Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"AWS": str(lambda_role)}, "Action": "es:*", "Resource": resource}, {"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", "Resource": resource, "Condition": {"IpAddress": {"aws:SourceIp": "{0}".format(cidr)}}} ]} endpoint = None time.sleep(5) try: print('Creating elasticsearch domain: {0}'.format(name)) boto_elasticsearch.create_elasticsearch_domain( DomainName=name, ElasticsearchVersion='2.3', ElasticsearchClusterConfig={ 'InstanceType': 't2.micro.elasticsearch', 'InstanceCount': 1, 'DedicatedMasterEnabled': False, 'ZoneAwarenessEnabled': False }, EBSOptions={ 'EBSEnabled': True, 'VolumeType': 'gp2', 'VolumeSize': 20 } ) time.sleep(10) attempts = 1 while True: print('Trying to apply access policies to elasticsearch domain: {0} (attempt: {1})'.format(name, attempts)) try: boto_elasticsearch.update_elasticsearch_domain_config( DomainName=name, AccessPolicies=json.dumps(access_policy) ) break except Exception as e: attempts += 1 if attempts > 3: print('Failed to apply access policies. Please run this script again with `-a delete -n {0}`' 'and wait approx 20 minutes before trying again'.format(name)) print('Full error was: {0}'.format(e)) exit(1) else: time.sleep(2) except Exception as e: print('Could not create elasticsearch domain: {0}.'.format(name)) print('Error was: {0}'.format(e)) exit(1) while True: try: es_status = boto_elasticsearch.describe_elasticsearch_domain(DomainName=name) processing = es_status['DomainStatus']['Processing'] if not processing: endpoint = es_status['DomainStatus']['Endpoint'] print('Domain: {0} has been created!'.format(name)) break else: print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name)) time.sleep(120) except Exception: print('Domain: {0} is still processing. Waiting for 120 seconds before checking again'.format(name)) total_time += 120 if total_time > 1800: print('Script has been running for over 30 minutes... This likely means that your elastic search domain' ' has not created successfully. Please check the Elasticsearch Service dashboard in AWS console' ' and delete the domain named {0} if it exists before trying again'.format(name)) exit(1) time.sleep(120) return endpoint
5e33bd1454a2b3d1ce3bc1cc181b44497ce6035a
706,234
def interp(x, x1, y1, x2, y2): """Find a point along a line""" return ((x2 - x) * y1 + (x - x1) * y2) / (x2 - x1)
3af4575c017a32619a5bb2866a7faea5ff5c760d
706,237
import time def getLocalUtcTimeStamp(): """ Get the universal timestamp for this machine. """ t = time.mktime(time.gmtime()) isDst = time.localtime().tm_isdst return t - isDst * 60 * 60
54eed0893d03f3b6a76de0d36fc3f1ff5b35f64f
706,238
from typing import List def extract_text(html_text) -> List[List[str]]: """ :param html_text: :return: """ lines = [i.text.replace("\xa0", "") for i in html_text.find("div", attrs={"class": "contentus"}).findAll("h3")] return [line.split(" ") for line in lines if line]
19dfdd513e48f2662dc51661bfeca4b1155762a3
706,239
def set_payout_amount(): """ define amount of insurance payout NB must match what was defined in contract constructor at deployment """ return 500000e18
30ff7b07cbbe28b3150be2f1f470236875c8d0e3
706,241
def rucklidge(XYZ, t, k=2, a=6.7): """ The Rucklidge Attractor. x0 = (0.1,0,0) """ x, y, z = XYZ x_dt = -k * x + y * (a - z) y_dt = x z_dt = -z + y**2 return x_dt, y_dt, z_dt
9d10aa89fb684a95474d45399ae09a38b507913c
706,242
def add_bold_line(latex: str, index: int) -> str: """Makes a provided line number bold """ lines = latex.splitlines() cells = lines[index].split("&") lines[index] = r'\bfseries ' + r'& \bfseries '.join(cells) return '\n'.join(lines)
637338ea9ec576c780ccfa0b37d47a670465cdbb
706,243
def _unwrap_function(func): """Unwrap decorated functions to allow fetching types from them.""" while hasattr(func, "__wrapped__"): func = func.__wrapped__ return func
8c7a9f5b08dc91b9ae2c8387fbd4860bb554d561
706,244
def read_file(repo, name): """Read JSON files.""" with open(repo + '/' + name + '.txt') as file: data = [d.rstrip() for d in file.readlines()] file.close() return data
4d91e4c68a4f132dc6ebb41cc51df66bd555107a
706,245
def add_end_slash(value: str): """ Added a slash at the end of value """ if type(value) != str: return value return value if value.endswith("/") else value + "/"
bc8f41898c50120ad7ca8b814ff03d19c1c64c27
706,246
def _none_or_int_or_list(val): """Input conversion - expecting None, int, or a list of ints""" if val is None: return None elif isinstance(val, list): return list(map(int, val)) else: return int(val)
1958c64175a1cd63f8a42044b40b84d7cf8baed2
706,248
def hour_of_day(datetime_col): """Returns the hour from a datetime column.""" return datetime_col.dt.hour
18b2f6e16ccbcb488f3863968466fda14f669d8b
706,249
def sum_naturals(n): """Sum the first N natural numbers >>> sum_naturals(5) 15 """ total = 0 k = 1 while k <= n: total += k k += 1 return total
4c59057cd82083d615c72a59f682dd218a657ea0
706,250
def sort_list_files(list_patches, list_masks): """ Sorts a list of patches and masks depending on their id. :param list_patches: List of name of patches in the folder, that we want to sort. :param list_masks: List of name of masks in the folder, that we want to sort. :return: List of sorted lists, respectively of patches and masks. """ return sorted(list_patches, key=lambda x: int(x[1])), sorted(list_masks, key=lambda x: int(x[1]))
91557475bf145862ea88ad9f86cef82135eddd6c
706,251
def xor(*args): """True if exactly one of the arguments of the iterable is True. >>> xor(0,1,0,) True >>> xor(1,2,3,) False >>> xor(False, False, False) False >>> xor("kalimera", "kalinuxta") False >>> xor("", "a", "") True >>> xor("", "", "") False """ return sum([bool(i) for i in args]) == 1
86bbe0350dd18a2508120cec9672661e1aa56ce0
706,252
import copy def dfa2nfa(dfa): """Copy DFA to an NFA, so remove determinism restriction.""" nfa = copy.deepcopy(dfa) nfa.transitions._deterministic = False nfa.automaton_type = 'Non-Deterministic Finite Automaton' return nfa
eed8e651a51e71599a38288665604add3d8a0a3d
706,253
def correct_predictions(output_probabilities, targets): """ 计算与模型输出中的某些目标类匹配的预测数量 Args: output_probabilities: 不同输出类的概率张量 targets: 实际目标类的索引 Returns: 返回:“output_probabilities”中正确预测的数量 """ _, out_classes = output_probabilities.max(dim=1) correct = (out_classes == targets).sum() return correct.item()
0e39f3bfa00fc20334cf679aa77d89523a34454c
706,255
import re def split_value(s, splitters=["/", "&", ","]): """Splits a string. The first match in 'splitters' is used as the separator; subsequent matches are intentionally ignored.""" if not splitters: return [s.strip()] values = s.split("\n") for spl in splitters: spl = re.compile(r"\b\s*%s\s*\b" % re.escape(spl), re.UNICODE) if not filter(spl.search, values): continue new_values = [] for v in values: new_values.extend([st.strip() for st in spl.split(v)]) return new_values return values
a9227a4dcf4c49393e6c784337754d1e2b1d30b4
706,256
from typing import List def parse_text(text): """ Parse raw text format playlists, each line must contain a single. track with artist and title separated by a single dash. eg Queen - Bohemian Rhapsody :param str text: :return: A list of tracks """ tracks: List[tuple] = [] for line in text.split("\n"): line = line.strip() if not line or line.startswith("#"): continue parts = line.split("-", 1) if len(parts) != 2: continue artist, track = list(map(str.strip, parts)) if not artist or not track or (artist, track) in tracks: continue tracks.append((artist, track)) return tracks
1307d7ced966aa388e570456964c5921ac54ccca
706,257
import os def get_absolute_path(file_name, package_level=True): """Get file path given file name. :param: [package_level] - Wheather the file is in/out side the `gmail_api_wrapper` package """ if package_level: # Inside `gmail_api_wrapper` dirname = os.path.dirname(__file__) else: # Outside `gmail_api_wrapper` dirname = os.path.join(os.path.dirname(__file__), os.pardir) file_path = os.path.abspath(os.path.join(dirname, file_name)) return file_path
70206d9f8b94603b3efaf89c1b53573e1e01ca4d
706,258
def thanos(planet: dict, finger: int) -> int: """ Thanos can kill half lives of a world with a snap of the finger """ keys = planet.keys() for key in keys: if (++finger & 1) == 1: # kill it planet.pop(key) return finger
5b6325297cb8f259c27b3eb7fa5618edd1486b9c
706,259
from typing import List def ordered_list_item_to_percentage(ordered_list: List[str], item: str) -> int: """Determine the percentage of an item in an ordered list. When using this utility for fan speeds, do not include "off" Given the list: ["low", "medium", "high", "very_high"], this function will return the following when when the item is passed in: low: 25 medium: 50 high: 75 very_high: 100 """ if item not in ordered_list: raise ValueError list_len = len(ordered_list) list_position = ordered_list.index(item) + 1 return (list_position * 100) // list_len
2aa1b0574664e53da6080ae4bc99d1f3c93fad96
706,260
def dirty(graph): """ Return a set of all dirty nodes in the graph. """ # Reverse the edges to get true dependency return {n: v for n, v in graph.node.items() if v.get('build') or v.get('test')}
06835b52d7741716f1c67d951c0ab74758f476b4
706,261
def _ibp_sub(lhs, rhs): """Propagation of IBP bounds through a substraction. Args: lhs: Lefthand side of substraction. rhs: Righthand side of substraction. Returns: out_bounds: IntervalBound. """ return lhs - rhs
45ed06feea14275ddd64e1ec60727123db52a5cd
706,262
import os import sys def import_module_from_path(full_path, global_name): """ Import a module from a file path and return the module object. Allows one to import from anywhere, something ``__import__()`` does not do. The module is added to ``sys.modules`` as ``global_name``. :param full_path: The absolute path to the module .py file :param global_name: The name assigned to the module in sys.modules. To avoid confusion, the global_name should be the same as the variable to which you're assigning the returned module. """ path, filename = os.path.split(full_path) module, ext = os.path.splitext(filename) sys.path.append(path) try: mymodule = __import__(module) sys.modules[global_name] = mymodule except ImportError: raise ImportError('Module could not be imported from %s.' % full_path) finally: del sys.path[-1] return mymodule
d7f73dce4e51715e79a71616cf509f86c8106f27
706,263
def civic_eid26_statement(): """Create a test fixture for CIViC EID26 statement.""" return { "id": "civic.eid:26", "description": "In acute myloid leukemia patients, D816 mutation is associated with earlier relapse and poorer prognosis than wildtype KIT.", # noqa: E501 "direction": "supports", "evidence_level": "civic.evidence_level:B", "proposition": "proposition:001", "variation_origin": "somatic", "variation_descriptor": "civic.vid:65", "disease_descriptor": "civic.did:3", "method": "method:001", "supported_by": ["pmid:16384925"], "type": "Statement" }
bdad5e8d5f6fe063d43bb600bf4158fadc1f38ca
706,264
import re def validate_user(username, minlen): """Checks if the received username matches the required conditions.""" if type(username) != str: raise TypeError("username must be a string") if minlen < 1: raise ValueError("minlen must be at least 1") """ Username should not be shorter than minlen Username should always starts with letter and should consists of letters, numbers, dots and underscore """ if (len(username) < minlen): return False if not re.match(r'^[a-z][a-z0-9._]*$', username): # made changes in Regex return False return True
7d7ad86eccba2639158a9f5da9fb093f9f4abff9
706,266
def get_type_name_value(obj): """ Returns object type name from LLDB value. It returns type name with asterisk if object is a pointer. :param lldb.SBValue obj: LLDB value object. :return: Object type name from LLDB value. :rtype: str | None """ return None if obj is None else obj.GetTypeName()
c87a5acf7d8ef794eab97c90b82bbd9574fb0e2b
706,267
def lang_add(cursor, lang, trust): """Adds language for db""" if trust: query = 'CREATE TRUSTED LANGUAGE "%s"' % lang else: query = 'CREATE LANGUAGE "%s"' % lang cursor.execute(query) return True
f5a1ac9264efca070b4528505ee6bee6892b3e80
706,268
def format_time(time): """ It formats a datetime to print it Args: time: datetime Returns: a formatted string representing time """ m, s = divmod(time, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) return ('{:02d}d {:02d}h {:02d}m {:02d}s').format(int(d), int(h), int(m), int(s))
67c6404cbc5076358f9e85dc169e1d7b976b7d60
706,269
def percent_list(part_list, whole_list): """return percent of the part""" w = len(whole_list) if not w: return (w,0) p = 100 * float(len(part_list))/float(w) return (w,round(100-p, 2))
f9b3697c96c04c402972351e73395b7f7ed18350
706,270
def capacity_rule(mod, g, p): """ The capacity of projects of the *gen_ret_bin* capacity type is a pre-specified number for each of the project's operational periods multiplied with 1 minus the binary retirement variable. """ return mod.gen_ret_bin_capacity_mw[g, p] \ * (1 - mod.GenRetBin_Retire[g, p])
ba4ccad8d620da084912a65a80793f54fb84b374
706,271
def secure_request(request, ssl: bool): """ :param ssl: :param request: :return: """ # request.headers['Content-Security-Policy'] = "script-src 'self' cdnjs.cloudflare.com ; " request.headers['Feature-Policy'] = "geolocation 'none'; microphone 'none'; camera 'self'" request.headers['Referrer-Policy'] = 'no-referrer' request.headers['x-frame-options'] = 'SAMEORIGIN' request.headers['X-Content-Type-Options'] = 'nosniff' request.headers['X-Permitted-Cross-Domain-Policies'] = 'none' request.headers['X-XSS-Protection'] = '1; mode=block' if ssl: request.headers['expect-ct'] = 'max-age=60, enforce' request.headers["Content-Security-Policy"] = "upgrade-insecure-requests" request.headers['Strict-Transport-Security'] = "max-age=60; includeSubDomains; preload" return request
e1c19aa89930e6aeb1c548c24da374859987e090
706,272
def _get_prefixed_values(data, prefix): """Collect lines which start with prefix; with trimming""" matches = [] for line in data.splitlines(): line = line.strip() if line.startswith(prefix): match = line[len(prefix):] match = match.strip() matches.append(match) return matches
d0fe7ff11321ccbf06397963a303f0e79181ebba
706,273
def bytes_to_msg(seq, standard="utf-8"): """Decode bytes to text.""" return seq.decode(standard)
5664d97b3fec5d119daa2171bcb431ca5a4b5f33
706,274
def bonferroni_correction(pvals): """ Bonferroni correction. Reference: http://en.wikipedia.org/wiki/Bonferroni_correction """ n = len(pvals) return [min(x * n , 1.0) for x in pvals]
f57ffd6b77a0a74a61904334604d1cb0eb08f8ff
706,275
from itertools import accumulate, chain, repeat def make_fib(): """Returns a function that returns the next Fibonacci number every time it is called. >>> fib = make_fib() >>> fib() 0 >>> fib() 1 >>> fib() 1 >>> fib() 2 >>> fib() 3 >>> fib2 = make_fib() >>> fib() + sum([fib2() for _ in range(5)]) 12 """ return map(lambda x_y: x_y[0], accumulate(chain(((0, 1),), repeat(None)), lambda x_y, _: (x_y[1], x_y[0] + x_y[1]))).__next__
e546ce79c4b441418f5325b0ac5d7c3faf6ac35e
706,276
def text_in_bytes(text, binary_data, encoding="utf-8"): """Return True of the text can be found in the decoded binary data""" return text in binary_data.decode(encoding)
e416057989c452718fa27b5f84286e347b986117
706,277
def normalize_v(v): """ Normalize velocity to [-1, 1]. Ref: https://github.com/microsoft/AirSim-Drone-Racing-VAE-Imitation/blob/e651be52ff8274c9f595e88b13fe42d51302403d/racing_utils/dataset_utils.py#L20 """ # normalization of velocities from whatever to [-1, 1] range v_x_range = [-1, 7] v_y_range = [-3, 3] v_z_range = [-3, 3] v_yaw_range = [-1, 1] if len(v.shape) == 1: # means that it's a 1D vector of velocities v[0] = 2.0 * (v[0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0 v[1] = 2.0 * (v[1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0 v[2] = 2.0 * (v[2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0 v[3] = 2.0 * (v[3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0 elif len(v.shape) == 2: # means that it's a 2D vector of velocities v[:, 0] = 2.0 * (v[:, 0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0 v[:, 1] = 2.0 * (v[:, 1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0 v[:, 2] = 2.0 * (v[:, 2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0 v[:, 3] = 2.0 * (v[:, 3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0 else: raise Exception('Error in data format of V shape: {}'.format(v.shape)) return v # Note: The version used in Shuang's code base is below, which should be equivalent to the above version. # self.targets[:, 0] = 2. * (self.targets[:, 0] + 1.) / (7. + 1.) - 1. # self.targets[:, 1] = 2. * (self.targets[:, 1] + 3.) / (3. + 3.) - 1. # self.targets[:, 2] = 2. * (self.targets[:, 2] + 3.) / (3. + 3.) - 1. # self.targets[:, 3] = 2. * (self.targets[:, 3] + 1.) / (1. + 1.) - 1.
cd47c8d3498e677a1f566b64199224f23a4b5896
706,278
def second_smallest(numbers): """Find second smallest element of numbers.""" m1, m2 = float('inf'), float('inf') for x in numbers: if x <= m1: m1, m2 = x, m1 elif x < m2: m2 = x return m2
0ca7b297da68651e4a8b56377e08f09d4d82cfb7
706,279
from datetime import datetime import socket def get_run_name(): """ A unique name for each run """ return datetime.now().strftime( '%b%d-%H-%M-%S') + '_' + socket.gethostname()
26f57e72912e896fe192de61b6477ef65051fccd
706,280
def compute_win_state_str_row(n_rows, n_cols, n_connects): """Each win state will be a string of 0s and 1s which can then converted into an integer in base 2. I assume that at the maximum n_rows = n_cols = 5, which means that a 31 bit integer (since in Python it's always signed) should be more than enough for a 25 bit string. """ n_cells = n_rows * n_cols win_states = list() # each iteration in the for loop computes the possible # winning states for a particular row, e.g., # - if n_connects == n_cols, there's just one winning state for row_ind in range(n_rows): prefix = '0' * (row_ind * n_cols) row_end = (row_ind * n_cols) + n_cols win_start_ind = row_ind * n_cols win_end_ind = win_start_ind + n_connects while win_end_ind <= row_end: # save the winning state suffix = '0' * (n_cells - win_end_ind) win_state = prefix + '1' * n_connects + suffix win_states.append(win_state) # update for the next possible win state of the row win_start_ind = win_start_ind + 1 win_end_ind = win_start_ind + n_connects prefix += '0' return win_states
b0f0f2846de7506b4b69f90bb8a0b1641a421659
706,281
import hmac import hashlib def hmac_sha512(key: bytes, data: bytes) -> bytes: """ Return the SHA512 HMAC for the byte sequence ``data`` generated with the secret key ``key``. Corresponds directly to the "HMAC-SHA512(Key = ..., Data = ...)" function in BIP32 (https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions). :param key: The secret key used for HMAC calculation. :param data: The data for which an HMAC should be calculated. :return: A byte sequence containing the HMAC of ``data`` generated with the secret key ``key``. """ h = hmac.new(key, data, hashlib.sha512) return h.digest()
64850ea2d5e921138d8e0ebc2d021f8eaf5a7357
706,282
def validate(data): """Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid """ if not isinstance(data, dict): raise ValueError("data should be dict") if "text" not in data or not isinstance(data["text"], str) or len(data["text"]) < 1: raise ValueError("text field is required and should not be empty") if "markdown" in data and not isinstance(data["markdown"], bool): raise ValueError("markdown field should be bool") if "attachments" in data: if not isinstance(data["attachments"], list): raise ValueError("attachments field should be list") for attachment in data["attachments"]: if "text" not in attachment and "title" not in attachment: raise ValueError("text or title is required in attachment") return True
ae8b7e74bd7607a7c8f5079014a0f5e3af5bc011
706,283
def stripExtra(name): """This function removes paranthesis from a string *Can later be implemented for other uses like removing other characters from string Args: name (string): character's name Returns: string: character's name without paranthesis """ startIndexPer=name.find('(') start = 0 if(startIndexPer!=-1): start = startIndexPer if(start==0): return name else: return name[0:start-1]
fd9b8c2d6f513f06d8b1df067520c7f05cff023d
706,284
def sort_car_models(car_db): """return a copy of the cars dict with the car models (values) sorted alphabetically""" sorted_db = {} for model in car_db: sorted_db[model] = sorted(car_db[model]) return sorted_db
a478f16ece83058ba411480b91584e4c61026141
706,285
import gc def at(addr): """Look up an object by its id.""" for o in gc.get_objects(): if id(o) == addr: return o return None
f408b9a63afad1638f156163c6249e0e8095bff4
706,286
def voltage(raw_value, v_min=0, v_max=10, res=32760, gain=1): """Converts a raw value to a voltage measurement. ``V = raw_value / res * (v_max - v_min) * gain`` """ return (float(raw_value) / res * (v_max - v_min) * gain, "V")
b4ea7d2521e1fa856a21b98ace2a9490f8a3b043
706,287
def extract_characteristics_from_string(species_string): """ Species are named for the SBML as species_name_dot_characteristic1_dot_characteristic2 So this transforms them into a set Parameters: species_string (str) = species string in MobsPy for SBML format (with _dot_ instead of .) """ return set(species_string.split('_dot_'))
abfcc0d3e425e8f43d776a02254a04b0e85dc6d1
706,288
def _diff_bearings(bearings, bearing_thresh=40): """ Identify kinked nodes (nodes that change direction of an edge) by diffing Args: bearings (list(tuple)): containing (start_node, end_node, bearing) bearing_thresh (int): threshold for identifying kinked nodes (range 0, 360) Returns: list[str] of kinked nodes """ kinked_nodes = [] # diff bearings nodes = [b[0] for b in bearings] bearings_comp = [b[2] for b in bearings] bearing_diff = [y - x for x, y in zip(bearings_comp, bearings_comp[1:])] node2bearing_diff = list(zip(nodes[1:-1], bearing_diff)) # id nodes to remove for n in node2bearing_diff: # controlling for differences on either side of 360 if min(abs(n[1]), abs(n[1] - 360)) > bearing_thresh: kinked_nodes.append(n[0]) return kinked_nodes
a29c3cdd009065d7a73dd993ae66f81853d5e2bc
706,289
def _convert_steplist_to_string(step_data): """Converts list of step data into a single string. Parameters ---------- step_data : list List of step data Returns ------- str A space delimited string where every 6th value is followed by a newline. """ text = '' for i, datum in enumerate(step_data): if i == 0: text += f'\n{datum}\n' else: if i%6 == 0: text += f'{datum}\n' else: text += f'{datum} ' return text
112495edbafc3db39946d7abeefff6466e2dff94
706,290
import re def parse_transceiver_dom_sensor(output_lines): """ @summary: Parse the list of transceiver from DB table TRANSCEIVER_DOM_SENSOR content @param output_lines: DB table TRANSCEIVER_DOM_SENSOR content output by 'redis' command @return: Return parsed transceivers in a list """ result = [] p = re.compile(r"TRANSCEIVER_DOM_SENSOR\|(Ethernet\d+)") for line in output_lines: m = p.match(line) assert m, "Unexpected line %s" % line result.append(m.group(1)) return result
367d6a744add04e7649c971ef8fec3788ed8db88
706,291
def group_node_intro_times(filt, groups, n_sents): """ Returns lists of addition times of nodes into particular groups """ devs = [[] for _ in range(len(set(groups)))] for i in range(len(groups)): intro = int(filt[i, i]) devs[groups[i]].append(intro/n_sents) # still normalize addition time return devs
b5da0e97c76683201a9b81fce1b1f1c7f25e4d6d
706,292
import re def youku(link): """Find youku player URL.""" pattern = r'http:\/\/v\.youku\.com\/v_show/id_([\w]+)\.html' match = re.match(pattern, link) if not match: return None return 'http://player.youku.com/embed/%s' % match.group(1)
efcf1394cc02503a1ae18d91abee34777958e545
706,293
def combinationSum(candidates, target): """ :type candidates: List[int] :type target: int :rtype: List[List[int]] """ result = [] candidates = sorted(candidates) def dfs(remain, stack): if remain == 0: result.append(stack) return for item in candidates: if item > remain: break if stack and item < stack[-1]: continue else: dfs(remain - item, stack + [item]) dfs(target, []) return result
e8739c196c84aa7d15712ba1007e602a330fd625
706,294
def format_time(data, year): """Format any time variables in US. Parameters ---------- data : pd.DataFrame Data without time formatting. year : int The `year` of the wave being processed. Returns ------- data : pd.DataFrame Data with time formatting. """ # See to do messages at the top of the file. # Theres some wierd overlap in the pidp data. Theres essentially a gap in September 2008 with noone in it from # BHPS which makes transition models fail. # Following 2 lines are a stupid work around. # if self.year <= 2008: # self.year += 1 data["time"] = year return data
858d7e48143a16e644d4f1241cd8918385dc7c5f
706,295
def plot(plot, x, y, **kwargs): """ Adds series to plot. By default this is displayed as continuous line. Refer to matplotlib.pyplot.plot() help for more info. X and y coordinates are expected to be in user's data units. Args: plot: matplotlib.pyplot Plot to which series should be added. x: (float,) Collection of x-coordinates in user units. y: (float,) Collection of y-coordinates in user units. title: str Series legend. """ # add series return plot.plot(x, y, **kwargs)
1e861243a87b61461fb49dcadf19ec9099fa5a1f
706,296
def interval_to_errors(value, low_bound, hi_bound): """ Convert error intervals to errors :param value: central value :param low_bound: interval low bound :param hi_bound: interval high bound :return: (error minus, error plus) """ error_plus = hi_bound - value error_minus = value - low_bound return error_minus, error_plus
ffee403968ddf5fd976df79a90bdbb62474ede11
706,297
def expsign(sign, exp): """ optimization of sign ** exp """ if sign == 1: return 1 assert sign == -1 return -1 if exp % 2 else 1
d770aaa2a4d20c9530a213631047d1d0f9cca3f7
706,299
import re def _newline_to_ret_token(instring): """Replaces newlines with the !RET token. """ return re.sub(r'\n', '!RET', instring)
4fcf60025f79811e99151019a479da04f25ba47c
706,300
def _ComputeLineCounts(old_lines, chunks): """Compute the length of the old and new sides of a diff. Args: old_lines: List of lines representing the original file. chunks: List of chunks as returned by patching.ParsePatchToChunks(). Returns: A tuple (old_len, new_len) representing len(old_lines) and len(new_lines), where new_lines is the list representing the result of applying the patch chunks to old_lines, however, without actually computing new_lines. """ old_len = len(old_lines) new_len = old_len if chunks: (_, old_b), (_, new_b), old_lines, _ = chunks[-1] new_len += new_b - old_b return old_len, new_len
ba99714016b69d87f260c8e7b8793468a2f7b04d
706,301
def _read_int(file_handle, data_size): """ Read a signed integer of defined data_size from file. :param file_handle: The file handle to read from at current position :param data_size: The data size in bytes of the integer to read :returns: The integer read and decoded """ return int.from_bytes(file_handle.read(data_size), byteorder="little", signed=True)
4d2a7e82e9daa828c0e5b180250834f2fa9977d5
706,302
import numpy def quaternion_to_matrix(quat): """OI """ qw = quat[0][0] qx = quat[1][0] qy = quat[2][0] qz = quat[3][0] rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw], [2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw], [2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]]) return rot
67f02ea97db1af4a763c3a97957f36de29da0157
706,303
def list_check(lst): """Are all items in lst a list? >>> list_check([[1], [2, 3]]) True >>> list_check([[1], "nope"]) False """ t = [1 if isinstance(x, list) else 0 for x in lst] return len(lst) == sum(t)
9e2c55cb6e15f89ff2b73a78d5f15310d3cac672
706,304
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False): """ input: pred_variable (batch_size, sent_len): pred tag result gold_variable (batch_size, sent_len): gold result variable mask_variable (batch_size, sent_len): mask variable """ pred_variable = pred_variable[word_recover] # print("reordered labels: {}".format(pred_variable)) gold_variable = gold_variable[word_recover] mask_variable = mask_variable[word_recover] batch_size = gold_variable.size(0) if sentence_classification: pred_tag = pred_variable.cpu().data.numpy().tolist() gold_tag = gold_variable.cpu().data.numpy().tolist() pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag] gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag] else: seq_len = gold_variable.size(1) mask = mask_variable.cpu().data.numpy() pred_tag = pred_variable.cpu().data.numpy() gold_tag = gold_variable.cpu().data.numpy() batch_size = mask.shape[0] pred_label = [] gold_label = [] for idx in range(batch_size): pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0] gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0] assert(len(pred)==len(gold)) pred_label.append(pred) gold_label.append(gold) return pred_label, gold_label
7f3efef4a0e9041e329c8d1c0c5641bf0c79ff58
706,305
import json def all_cells_run(event_str: str, expected_count: int) -> bool: """Wait for an event signalling all cells have run. `execution_count` should equal number of nonempty cells. """ try: event = json.loads(event_str) msg_type = event["msg_type"] content = event["content"] execution_count = content["execution_count"] status = content["status"] except (TypeError, KeyError): return False return all( ( msg_type == "execute_reply", execution_count == expected_count, status == "ok", ) )
c3e1bb23f38ffdd09d4cc2ea3326d40b7cf54034
706,306
def searchInsert(nums, target): """ :type nums: List[int] :type target: int :rtype: int """ try: return nums.index(target) except ValueError: nums.append(target) nums.sort() return nums.index(target)
56a719b1595502a773c108d26c597fb5ac0201bb
706,307
import json def jsonify(value): """ Convert a value into a JSON string that can be used for JSONB queries in Postgres. If a string happens to contain the character U+0000, which cannot be represented in a PostgreSQL value, remove the escape sequence representing that character, effectively stripping out that character from all strings. """ return json.dumps(value, ensure_ascii=False).replace("\\u0000", "")
7fff497b302822f8f79f0e68b2576c26458df99c
706,308
import os import json import urllib3 import certifi def add_generated_report_header(report_header): """ Upload report history and return the id of the header that was generated on the server. Parameters ---------- report_header: Required Parmeters: A dictionary of parameters that will be used to describe the report that consist of: - report: Name of the report - executionTimeMS: The number of milliseconds it took to generate the report - scheduled: True if the report was scheduled, false if it was not - note: Any notes to be added to the report - user: An Entity Header (dictionary of id and text) of the user (which could be a system user) that requested the report. - contentType: Mime type of the report, generally this is application/pdf - fileName: name of the file (not including the path) of the report - reportTitle: tile of the report as it was generated Optional Parameters - reportSummary: report summary as returned from the generatred report - reportDate: date of for the report - device: An Entity Header (dictionary of id and text) of the device that this report is for, if this is provided reports for specific devices will be available in the dashboard Returns ------- out: string Returns the id of the generated report that can be used to upload a report. """ job_server = os.environ.get('JOB_SERVER_URL') if(job_server is None): raise Exception("Missing environment variable [JOB_SERVER_URL]") headers={'Content-Type':'application/json'} generated_report_json = json.dumps(report_header) url = "%s/api/generatedreport/header" % (job_server) encoded_data = generated_report_json.encode('utf-8') http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) r = http.request('POST', url, headers=headers, preload_content=False, body=encoded_data) responseText = '' responseStatus = r.status for chunk in r.stream(32): responseText += chunk.decode("utf-8") responseJSON = json.loads(responseText) r.release_conn() if responseStatus > 299: print('Failed http call, response code: ' + str(responseStatus)) print('Url: ' + url) print(responseJSON) print('--------------------------------------------------------------------------------') print() raise Exception("Could not upload report header to %s" % url) if(responseJSON["successful"]): return responseJSON["result"] else: raise Exception(responseJSON["errors"][0]["message"])
d1c1923e4a61ae82f4f9319b471a18ed2bcbf562
706,309
def getAsciiFileExtension(proxyType): """ The file extension used for ASCII (non-compiled) proxy source files for the proxies of specified type. """ return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
cb2b27956b3066d58c7b39efb511b6335b7f2ad6
706,310
def dist(s1, s2): """Given two strings, return the Hamming distance (int)""" return abs(len(s1) - len(s2)) + sum( map(lambda p: 0 if p[0] == p[1] else 1, zip(s1.lower(), s2.lower())))
ef7b3bf24e24a2e49f0c7acfd7bcb8f23fa9af2e
706,311
import pickle def read_bunch(path): """ read bunch. :param path: :return: """ file = open(path, 'rb') bunch = pickle.load(file) file.close() return bunch
aec87c93e20e44ddeeda6a8dfaf37a61e837c714
706,312
def col_index_list(info, key, value): """Given a list of dicts 'info', return a list of indices corresponding to columns in which info[key] == value. Use to build lists of default columns, non-exportable columns, etc.""" index_list = list() if info != None: for i in range(0, len(info)): if info[i].get(key) == value: index_list.append(i) return index_list
af46b03c2fe5bce2ceb7305fd670ce1f0f52ae38
706,313
def retr_radihill(smax, masscomp, massstar): """ Return the Hill radius of a companion Arguments peri: orbital period rsma: the sum of radii of the two bodies divided by the semi-major axis cosi: cosine of the inclination """ radihill = smax * (masscomp / 3. / massstar)**(1. / 3.) # [AU] return radihill
5010f66026db7e2544b85f70fd1449f732c024b4
706,314
def profiling_csv(stage, phases, durations): """ Dumps the profiling information into a CSV format. For example, with stage: `x` phases: ['a', 'b', 'c'] durations: [1.42, 2.0, 3.4445] The output will be: ``` x,a,1.42 x,b,2.0 x,c,3.444 ``` """ assert all(hasattr(p, "name") for p in phases), "expected to have name attribute." return "\n".join( [f"{stage},{p.name},{round(t, 3)}" for (p, t) in zip(phases, durations)] )
d40ee5601aa201904741870ce75c4b5bfde0f9bc
706,315
def view_filestorage_file(self, request): """ Renders the given filestorage file in the browser. """ return getattr(request.app, self.storage).getsyspath(self.path)
ad65b83b9462c8b8efec7626d4751685df3aba8b
706,316
def fib(n): """Returns the nth Fibonacci number.""" if n == 0: return 1 elif n == 1: return 1 else: return fib(n - 1) + fib(n - 2)
397d5714f45491dde68c13379fe2a6acafe55002
706,318
def split_sample(labels): """ Split the 'Sample' column of a DataFrame into a list. Parameters ---------- labels: DataFrame The Dataframe should contain a 'Sample' column for splitting. Returns ------- DataFrame Updated DataFrame has 'Sample' column with a list of strings. """ sample_names = labels["Sample"].str.split(" ", n=1, expand=False) labels['Sample'] = sample_names return labels
483f1b78e07a2156aa3e48ae6c1f5ce41f5e60fe
706,320
import hashlib def get_text_hexdigest(data): """returns md5 hexadecimal checksum of string/unicode data NOTE ---- The md5 sum of get_text_hexdigest can differ from get_file_hexdigest. This will occur if the line ending character differs from being read in 'rb' versus 'r' modes. """ data_class = data.__class__ # fmt: off if data_class in ("".__class__, u"".__class__): data = data.encode("utf-8") elif data.__class__ != b"".__class__: raise TypeError("can only checksum string, unicode or bytes data") # fmt: on md5 = hashlib.md5() md5.update(data) return md5.hexdigest()
762115178406c0b49080b3076859a3d1c13ad356
706,321
def escape_env_var(varname): """ Convert a string to a form suitable for use as an environment variable. The result will be all uppercase, and will have all invalid characters replaced by an underscore. The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]* Example: "my.private.registry/cat/image" will become "MY_PRIVATE_REGISTRY_CAT_IMAGE" """ varname = list(varname.upper()) if not varname[0].isalpha(): varname[0] = "_" for i, c in enumerate(varname): if not c.isalnum() and c != "_": varname[i] = "_" return "".join(varname)
c1e57ff3b9648e93a540202f00d0325f91bccde1
706,323
def is_in(a_list): """Returns a *function* that checks if its argument is in list. Avoids recalculation of list at every comparison.""" def check(arg): return arg in a_list return check
34afbc269c164f0e095b1cbbf4e9576bafc7a9e1
706,324
def get_log_record_extra_fields(record): """Taken from `common` repo logging module""" # The list contains all the attributes listed in # http://docs.python.org/library/logging.html#logrecord-attributes skip_list = ( 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'thread', 'threadName', 'extra', 'stack_info', 'exc_type', 'exc_msg') easy_types = (str, bool, dict, float, int, list, type(None)) fields = {} for key, value in record.__dict__.items(): if key not in skip_list: if isinstance(value, easy_types): fields[key] = value else: fields[key] = repr(value) return fields
95fe6a74cd169c14ac32728f0bb1d16a2aa9e874
706,325
def _dtype(a, b=None): """Utility for getting a dtype""" return getattr(a, 'dtype', getattr(b, 'dtype', None))
c553851231f0c4be544e5f93738b43fa98e65176
706,327
def get_decay_fn(initial_val, final_val, start, stop): """ Returns function handle to use in torch.optim.lr_scheduler.LambdaLR. The returned function supplies the multiplier to decay a value linearly. """ assert stop > start def decay_fn(counter): if counter <= start: return 1 if counter >= stop: return final_val / initial_val time_range = stop - start return 1 - (counter - start) * (1 - final_val / initial_val) / time_range assert decay_fn(start) * initial_val == initial_val assert decay_fn(stop) * initial_val == final_val return decay_fn
d84c0f0305d239834429d83ba4bd5c6d6e945b69
706,328
def _vars_to_add(new_query_variables, current_query_variables): """ Return list of dicts representing Query Variables not yet persisted Keyword Parameters: new_query_variables -- Dict, representing a new inventory of Query Variables, to be associated with a DWSupport Query current_query_variables -- Dict, representing the Query Variables currently associated with the 'new_query_variables' Query mapped by tuple(table_name, column_name) >>> from pprint import pprint >>> test_new_vars = { 'great_fact': ['measure_a', 'measure_b'] ... ,'useful_dim': ['field_one'] ... ,'occasionally_useful_dim': ['field_two']} >>> persisted_vars = { ('great_fact', 'measure_a'): object() #fake ... ,('useful_dim', 'field_one'): object()#objects ... ,('useful_dim', 'field_two'): object()} >>> out = _vars_to_add(test_new_vars, persisted_vars) >>> pprint(out) # check detected additions {'great_fact': ['measure_b'], 'occasionally_useful_dim': ['field_two']} """ additional_fields_by_table_name = {} # Values to return # detect additions for new_variable_table_name, table_columns in new_query_variables.items(): for column_name in table_columns: key = (new_variable_table_name, column_name) #table+column tuple if key not in current_query_variables: # New Query Variable - add variable name to table's list table_variables = additional_fields_by_table_name.setdefault( new_variable_table_name ,list()) #default to new, empty list (if none exists yet) table_variables.append(column_name) return additional_fields_by_table_name
fd5ea2209b374ab9987a05c139ba1f28805f3eff
706,329
def Ak(Y2d, H, k): """ Calculate Ak for Sk(x) Parameters ---------- Y2d : list list of y values with the second derived H : list list of h values from spline k : int index from Y2d and H Returns ------- float Ak from cubic spline """ return (Y2d[k] - Y2d[k - 1]) / (6 * H[k - 1])
baea453b9c7b023b78c1827dc23bacbd8fd6b057
706,330
def cycle_list_next(vlist, current_val): """Return the next element of *current_val* from *vlist*, if approaching the list boundary, starts from begining. """ return vlist[(vlist.index(current_val) + 1) % len(vlist)]
48e2ac31178f51f981eb6a27ecf2b35d44b893b4
706,331
import requests def id_convert(values, idtype=None): """ Get data from the id converter API. https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/ """ base = 'http://www.pubmedcentral.nih.gov/utils/idconv/v1.0/' params = { 'ids': values, 'format': 'json', } if idtype is not None: params['idtype'] = idtype resp = requests.get(base, params=params) raw = resp.json() records = raw.get('records') if records is None: return None status = records[0].get('status') if status == u"error": return None return raw['records'][0]
a60698fb20ba94445bbd06384b8523e92bfb91a3
706,332
def get_secondary_connections(network, user): """ Finds all the secondary connections (i.e. connections of connections) of a given user. Arguments: network: the gamer network data structure. user: a string containing the name of the user. Returns: A list containing the secondary connections (connections of connections). - If the user is not in the network, returns None. - If a user has no primary connections to begin with, returns an empty list. NOTE: It is OK if a user's list of secondary connections includes the user himself/herself. It is also OK if the list contains a user's primary connection that is a secondary connection as well. """ if user not in network: return None if network[user][0] == []: return [] return [person for group in [network[connection][0] for connection in network[user][0]] for person in group]
4e53f6e43f2fb132932381370efa4b3a3cd4793c
706,333
def get_regression_function(model, model_code): """ Method which return prediction function for trained regression model :param model: trained model object :return: regression predictor function """ return model.predict
fca4a0767b1e741952534baf59ac07cece2c9342
706,334
def beam_motion_banding_filter(img, padding=20): """ :param img: numpy.array. 2d projection image or sinogram. The left and right side of the image should be empty. So that `padding` on the left and right will be used to create an beam motion banding image and be normalized from the original image. :param padding: int. The size of on the left and right empty area to be used to find the average value where there is no object. :return img_new: numpy.array Smoothed image. """ nx = img.shape[1] mean_left = img[:, 0:padding].mean(axis=1) mean_right = img[:, -padding:].mean(axis=1) mean_middle = (mean_left + mean_right) / 2 slope = (mean_right - mean_left) / (nx - padding) # Make an image with only bandings. img_banding = img * 0.0 for i in range(img_banding.shape[1]): # iterate cols img_banding[:, i] = mean_middle + (i - nx / 2) * slope # Subtract the banding from the original. img_new = img-img_banding return img_new
5191c1f3022711459ce81cfbf0c4d6c6fb7dcd41
706,335
def log(session): """Clear nicos log handler content""" handler = session.testhandler handler.clear() return handler
086e362c8195b917c826fc8b20d3095210ac82fd
706,336
def _find_additional_age_entities(request, responder): """ If the user has a query such as 'list all employees under 30', the notion of age is implicit rather than explicit in the form of an age entity. Hence, this function is beneficial in capturing the existence such implicit entities. Returns a true/false depending on the existence or lack of the combination of numerical entities and comparators, thereby indicating an implicit age entitiy or lack of it, respectively. """ try: comparator_entity = [e for e in request.entities if e['type'] == 'comparator'][0] num_entity = [float(e['value'][0]['value']) for e in request.entities if e['type'] == 'sys_number'] # if any token in the text query is numeric that was missed by the num_entity, # add it to the list for i in request.text.split(): try: num_entity.append(float(i)) except ValueError: continue except (IndexError, ValueError): comparator_entity = [] num_entity = [] return True if comparator_entity and num_entity else False
971bc0805c607134b6947e0d61ebab6f217c6961
706,337
import argparse def parse_args(): """Process arguments""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--train', '-t', required=True, type=str, help="Training ProteinNet data") parser.add_argument('--val', '-v', type=str, help="Validation ProteinNet data") parser.add_argument('--no_gpu', '-n', action="store_true", help="Prevent GPU usage for ESM1b even when available") parser.add_argument('--threshold', '-r', default=None, type=float, help="Perform frequency classification at given threshold") parser.add_argument('--model', '-m', default="esm_top_model.pth", help="Path to save model") parser.add_argument('--epochs', '-e', default=3, type=int, help="Epochs to train for") parser.add_argument('--report_batch', '-p', default=1000, type=int, help="Batch multiple to report at") return parser.parse_args()
c37d11327cb0d0baf3049943320c5bed6fb18e18
706,338
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s, RPTC_c, RPTC_rt, rptc_p, rptc_s, rptc): """ Computes refundable payroll tax credit amounts. """ rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c) rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c) rptc = rptc_p + rptc_s return (rptc_p, rptc_s, rptc)
e282139921045fe8e286abbde6bb4ae44151a50d
706,339
def email_sent_ipn(path: str) -> tuple: """ **email_sent_ipn** Delivered ipn for mailgun :param path: organization_id :return: OK, 200 """ # NOTE: Delivered ipn will end up here if path == "delivered": pass elif path == "clicks": pass elif path == "opens": pass elif path == "failure": pass elif path == "spam": pass elif path == "unsubscribe": pass return "OK", 200
4bbfed4f86916ddc2b68ade0c8739e25a562bbda
706,340
import sys def decode(path: str) -> str: """ utility fct to encode/decode """ return path.encode(sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding)
9e75e04928e7df4646feaed85a799a137693fa77
706,341
def klucb(x, d, div, upperbound, lowerbound=-float("inf"), precision=1e-6): """The generic klUCB index computation. Input args.: x, d, div: KL divergence to be used. upperbound, lowerbound=-float('inf'), precision=1e-6, """ l = max(x, lowerbound) u = upperbound while u - l > precision: m = (l + u) / 2 if div(x, m) > d: u = m else: l = m return (l + u) / 2
82aa51e248568d201e0d9d5621bf043532df8572
706,342
def wavelength_to_energy(wavelength): """ Converts wavelength (A) to photon energy (keV) """ return 12.39842/wavelength
4e2d11f2de8ed4890df5d885801cd492644817d8
706,343