content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_new_lp_file(test_nr): """ Get name of new LP file """ return "test/{0}-new.lp".format(test_nr)
f7527a053a640d080f3aecd9db24623d5563b250
15,833
def tokenize(tokenizer, data, max_length = 128): """ Iterate over the data and tokenize it. Sequences longer than max_length are trimmed. :param tokenizer: tokenizer to use for tokenization :param data: list of sentences :return: a list of the entire tokenized data """ tokenized_data = [] for sent in data: tokens = tokenizer.encode(sent, add_special_tokens=True) # keeping a maximum length of bert tokens: 512 tokenized_data.append(tokens[:max_length]) return tokenized_data
30b3786c1299bc42cd2698eae83ce1c6bdc3cfbe
15,834
import logging def find_log_path(lg): """ Find the file paths of the FileHandlers. """ out = [] for h in lg.handlers: if isinstance(h, logging.FileHandler): out.append(h.baseFilename) return out
efab0cb7eafd0491e1365224dc83f816c7bb1b51
15,836
import requests def send_proms_pingback(pingback_target_uri, payload, mimetype='text/turtle'): """ Generates and posts a PROMS pingback message :param pingback_target_uri: a URI, to where the pingback is sent :param payload: an RDF file, in one of the allowed formats and conformant with the PROMS pingback message spec :param mimetype: the mimetype of the RDF file being sent :return: True or an error message """ headers = {'Content-Type': mimetype} # send the post try: r = requests.post(pingback_target_uri, data=payload, headers=headers) result = (r.status_code == 201) if result: return [True, r.content] else: return [False, r.content] except Exception as e: print(str(e)) return [False, str(e)]
09ecff1835352f08e7fc5f4bce545585798e688c
15,838
def sign(x): """ Sign function """ return 1 if x >= 0 else -1
20d85cf36d183c96e75fa3b795bf7f05f558e3b8
15,844
def dict_hangman(num_of_tries): """ The function return the "photo" of the hangman. :param num_of_tries: the user's number of guessing :type num_of_tries: int :return: the photo of the hangman :rtype: string """ HANGMAN_PHOTHOS = { '1': """ x-------x""", '2': """ x-------x | | | | |""", '3': """ x-------x | | | 0 | | |""", '4': """ x-------x | | | 0 | | | |""", '5': """ x-------x | | | 0 | /|\\ | |""", '6': """ x-------x | | | 0 | /|\\ | / |""", '7':""" x-------x | | | 0 | /|\\ | / \\ |""" } return HANGMAN_PHOTHOS[str(num_of_tries)]
3864d3072fa0fe9fea6c7e02733e466669335c80
15,847
def split_warnings_errors(output: str): """ Function which splits the given string into warning messages and error using W or E in the beginning of string For error messages that do not start with E , they will be returned as other. The output of a certain pack can both include: - Fail msgs - Fail msgs and warnings msgs - Passed msgs - Passed msgs and warnings msgs - warning msgs Args: output(str): string which contains messages from linters. return: list of error messags, list of warnings messages, list of all undetected messages """ output_lst = output.split('\n') # Warnings and errors lists currently relevant for XSOAR Linter warnings_list = [] error_list = [] # Others list is relevant for mypy and flake8. other_msg_list = [] for msg in output_lst: # 'W:' for python2 xsoar linter # 'W[0-9]' for python3 xsoar linter if (msg.startswith('W') and msg[1].isdigit()) or 'W:' in msg or 'W90' in msg: warnings_list.append(msg) elif (msg.startswith('E') and msg[1].isdigit()) or 'E:' in msg or 'E90' in msg: error_list.append(msg) else: other_msg_list.append(msg) return error_list, warnings_list, other_msg_list
aaf0ea05f5d32247f210ae1696ea58824629d075
15,851
def _get_type_and_value(entry): """Parse dmidecode entry and return key/value pair""" r = {} for l in entry.split('\n'): s = l.split(':') if len(s) != 2: continue r[s[0].strip()] = s[1].strip() return r
e6dd2068f10085c2dac233f1f71512e5874c5adc
15,855
def _replace_strings(obj, old, new, inplace=False): """ Recursively replaces all strings in the given object. This function is specifically meant to help with saving and loading of config dictionaries and should not be considered a general tool. """ if not inplace: obj = obj.copy() if isinstance(obj, dict): obj_keys = obj.keys() elif isinstance(obj, list): obj_keys = range(len(obj)) else: raise TypeError('Object must be either a dict or a list.') for key in obj_keys: if isinstance(obj[key], str): obj[key] = obj[key].replace(old, new) elif isinstance(obj[key], dict) or isinstance(obj[key], list): obj[key] = _replace_strings(obj[key], old, new) return obj
3f7661a53ab8cbb836eee68b1bb3d1df9e73c4a5
15,857
def hello_world() -> str: """ Say something! """ return "Hello World!"
22deba02b863355d150653caf744a65950a2fec5
15,858
def range_geometric_row(number, d, r=1.1): """Returns a list of numbers with a certain relation to each other. The function divides one number into a list of d numbers [n0, n1, ...], such that their sum is number and the relation between the numbers is defined with n1 = n0 / r, n2 = n1 / r, n3 = n2 / r, ... """ if r <= 0: raise ValueError("r must be > 0") n0 = number / ((1 - (1 / r)**d) / (1 - 1 / r)) numbers = [n0] for i in range(d - 1): numbers.append(numbers[-1] / r) return numbers
92e7f9f1b85011323cf5e90002d8f9151ae17e0e
15,859
import hashlib def getIdHash(id): """ Return md5 prefix based on id value""" m = hashlib.new('md5') m.update(id.encode('utf8')) hexdigest = m.hexdigest() return hexdigest[:5]
a9e8d67fae494cd2eaac41b6258be69ed10b667a
15,860
def findPayload(message, type): """ Find a payload/part that matches a type as closely as possible and decode it properly. Parameters ---------- message : email.message.Message The message to search. type : str A MIME type. Returns ------- str The payload as a string. """ charset = message.get_charset() or "utf-8" if message.is_multipart(): for k in message.walk(): contenttype = k.get_content_type() if contenttype == type: return k.get_payload(decode=True).decode(charset), contenttype for k in message.walk(): contenttype = k.get_content_type() if k.get_content_type() == message.get_default_type(): return k.get_payload(decode=True).decode(charset), contenttype return message.get_payload(decode=True).decode(charset), message.get_content_type()
265159546f1b0d2a6cc065e2c467528ea74048ef
15,870
import csv def calculate_number_of_synthetic_data_to_mix(original_data_file, target_ratio): """Calculate the number of negative samples that need to be added to achieve the target ratio of negative samples. Args: original_data_file: path to the original data file target_ratio: the target ratio of negative samples Returns: The number of negative samples needed to achieve the target ratio. """ total_number_of_samples = 0 original_negative_sample_count = 0 with open(original_data_file) as tsv_file: read_tsv = csv.reader(tsv_file, delimiter="\t") for line in read_tsv: if int(line[-1]) == 0: original_negative_sample_count += 1 total_number_of_samples += 1 return int( (original_negative_sample_count - total_number_of_samples * target_ratio) / (target_ratio - 1))
eb83cdbb9af39715f16e412f8ae799222a2a232f
15,871
def get_contact_info_keys(status_update): """Returns the contact info method keys (email, sms) used to send a notification for a status update if the notification exists. Returns [] if there is no notification """ if hasattr(status_update, 'notification'): return list(status_update.notification.contact_info.keys()) else: return []
020a9742df99cd65be1433165823c4f364009d85
15,879
def binary_search(input_list, number, min_idx, max_idx): """ Find the index for a given value (number) by searching in a sorted array Time complexity: O(log2(n)) Space Complexity: O(1) Args: - input_list(array): sorted array of numbers to be searched in - number(int): number to be searched for Returns: - position(int): reuturns array index for the given number returns -1 when the number was not found """ # corner case for case when provided min_idx is higher than provided max_idx if max_idx < min_idx: return -1 # binary search while min_idx <= max_idx: mid = (min_idx + max_idx) // 2 # Check if x is present at mid if input_list[mid] == number: return mid # If the guess was too low, set min to be one larger than the guess if input_list[mid] < number: min_idx = mid + 1 # If the guess was too high, set max to be one smaller than the guess else: max_idx = mid - 1 # if we got here, the number was not found return -1
20358c096c529937d57503285150a90a37f62dd1
15,882
import torch def _calculate_ece(logits, labels, n_bins=10): """ Calculates the Expected Calibration Error of a model. (This isn't necessary for temperature scaling, just a cool metric). The input to this loss is the logits of a model, NOT the softmax scores. This divides the confidence outputs into equally-sized interval bins. In each bin, we compute the confidence gap: bin_gap = | avg_confidence_in_bin - accuracy_in_bin | We then return a weighted average of the gaps, based on the number of samples in each bin See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht. "Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI. 2015. """ bin_boundaries = torch.linspace(0, 1, n_bins + 1) bin_lowers = bin_boundaries[:-1] bin_uppers = bin_boundaries[1:] softmaxes = logits confidences, predictions = torch.max(softmaxes, 1) accuracies = predictions.eq(labels) ece = torch.zeros(1, device=logits.device) for bin_lower, bin_upper in zip(bin_lowers, bin_uppers): # Calculated |confidence - accuracy| in each bin in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_in_bin = accuracies[in_bin].float().mean() avg_confidence_in_bin = confidences[in_bin].mean() ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin return ece.item()
af311a47b7558b07838a38d736386147804ea109
15,886
def history_parser(arg): """ @param: arg is a string that contains the words seperated by spaces @return: Returns two strings. The first word removed from arg and everything after the space """ v = -1 try: v = arg.index(' ') except ValueError: return None, None first_word = arg[0:v] remain = arg[v + 1: len(arg)] return first_word, remain
267f0cd8ddfc0bfa9106d18341421d5d4d48ed1f
15,888
import json def decode(body): """decode string to object""" if not body: return None return json.loads(body)
2663a3d742b6f5e17d5b0aed876f136b30fdde1c
15,893
def bintodec(x): """Convert Binary to Decimal. Input is a string and output is a positive integer.""" num = 0 n = len(x) for i in range(n): num = num + 2 ** i * int(x[n - i - 1]) return num
e83e3c34c237d5840bd024f49f3d436e6804b427
15,895
import collections def _create_gt_string(cnv_row): """ Creates VCF gt string for a single-sample VCF. """ gt_dict = collections.OrderedDict() gt_dict["GT"] = cnv_row.GT gt_dict["S"] = cnv_row.S gt_dict["NS"] = cnv_row.NS gt_dict["LS"] = cnv_row.LS gt_dict["LNS"] = cnv_row.LNS gt_dict["RS"] = cnv_row.RS gt_dict["RNS"] = cnv_row.RNS gt_dict["GQ"] = cnv_row.GQ gt_dict["AB"] = cnv_row.AB gt_dict["SQ"] = cnv_row.SQ out_string = "" length_of_info = len(gt_dict.values()) for i, item in enumerate(gt_dict.items()): value = str(item[1]) if (i + 1) != length_of_info: out_string += value +":" else: out_string += value return out_string
fd866f2ce22cb8b34608dcd6161be5d11f374c82
15,896
def generate_name_Id_map(name, map): """ Given a name and map, return corresponding Id. If name not in map, generate a new Id. :param name: session or item name in dataset :param map: existing map, a dictionary: map[name]=Id :return: Id: allocated new Id of the corresponding name """ if name in map: Id = map[name] else: Id = len(map.keys()) + 1 map[name] = Id return Id
8e86daf1a345803b280ad91f40a18eaeaa0cde5f
15,897
def _flatten_task(obj): """Flatten the structure of the task into a single dict """ data = { 'id': obj.id, 'checkpoint_id': obj.checkpoint_id, 'policy_id': obj.policy_id, 'provider_id': obj.provider_id, 'vault_id': obj.vault_id, 'vault_name': obj.vault_name, 'operation_type': obj.operation_type, 'error_mesage': obj.error_info.message, 'error_code': obj.error_info.code, 'created_at': obj.created_at, 'ended_at': obj.ended_at, 'started_at': obj.started_at, 'updated_at': obj.updated_at, } return data
f10b0db4cefad81818f3195da0cf25339c420823
15,907
import itertools def prev_this_next(it): """ iterator to gradually return three consecutive elements of another iterable. If at the beginning or the end of the iterable, None is returned for corresponding elements. """ a, b, c = itertools.tee(it, 3) next(c) return zip(itertools.chain([None], a), b, itertools.chain(c, [None]))
dc4416ed0b1c06502f3005df418536b9a92b4481
15,911
def parse_unity_results(output): """Read output from Unity and parse the results into 5-tuples: (file, lineno, name, result, message)""" result = [] lines = output.split('\n') for line in lines: if line == '': break parts = line.split(':', maxsplit=4) if len(parts) == 4: parts.append(None) else: parts[4] = parts[4][1:] result.append(tuple(parts)) return result
488f98d69434b2abdb9200353e51c12805694297
15,913
def airports_codes_from_city(name, airports_list, airport_type): """ Here we finding all airports(their codes) in city or state. :param name: name of airport we gonna check :param airports_list: list of all airports :param airport_type: type of :param name: - 'code', 'city', 'state' :return: list of airports codes """ temp = [] for airport in airports_list: if name.lower() == airport[airport_type].lower(): temp.append(airport['code']) return temp
995bab1eca1633c5e52cfdfe5b1a92b7738fbbb5
15,916
from tqdm import tqdm def _get_iterator(to_iter, progress): """ Create an iterator. Args: to_iter (:py:attr:`array_like`): The list or array to iterate. progress (:py:attr:`bool`): Show progress bar. Returns: :py:attr:`range` or :py:class:`tqdm.std.tqdm`: Iterator object. """ iterator = range(len(to_iter)) if progress: try: iterator = tqdm(range(len(to_iter))) except ModuleNotFoundError: print( "For the progress bar, you need to have the tqdm package " "installed. No progress bar will be shown" ) return iterator
c1dd29a430d2c468e3f89536fef593b7477a04ce
15,917
def check_replication(service, service_replication, warn_range, crit_range): """Check for sufficient replication of a service :param service: A string representing the name of the service this replication check is relevant to. :param service_replication: An int representing the number of available service instances :param warn_range: A two tuple of integers representing the minimum and maximum allowed replication before entering the WARNING state. :param crit_range: A two tuple of integers representing the minimum and maximum allowed replication before entering the CRITICAL state. Note that all ranges are closed interval. If the replication is outside the closed interval for the relevant level (e.g. warning, critical), then the error code will change appropriately. :returns check_result: A tuple of error code and a human readable error message. The error codes conform to the nagios plugin api. e.g. for an OK service (0, "OK lucy has 1 instance(s)") e.g. for a CRITICAL service (2, "CRITICAL lucy has 0 instance(s), expected value in [1, 1e18]) """ code, status, interval = 0, 'OK', None if not (crit_range[0] <= service_replication <= crit_range[1]): code, status, interval = 2, 'CRITICAL', crit_range elif not (warn_range[0] <= service_replication <= warn_range[1]): code, status, interval = 1, 'WARNING', warn_range expected_message = "" if interval is not None: expected_message = ", expected value in {0}".format(interval) message = "{0} {1} has {2} instance(s){3}".format( status, service, service_replication, expected_message ) return code, message
ac515bd0881431eebd0e14d0578f0bb1c07835f3
15,919
import json def load_metadata(filename): """Read json from file and return json object.""" with open(filename, encoding="utf-8") as fd: return json.load(fd)
9a9fbccaf4a7e64d2aef4b427a68226e2b41c181
15,920
def get_section_number(data): """Gets the section number from the given section data Parses the given array of section data bytes and returns the section number. SI tables come in sections. Each section is numbered and this function will return the number of the given section. """ return data[6]
5d7b9c51d614f627e3765b683216905ad124598b
15,922
def denumpyfy(tuple_list_dict_number): """A nested structure of tuples, lists, dicts and the lowest level numpy values gets converted to an object with the same structure but all being corresponding native python numbers. Parameters ---------- tuple_list_dict_number : tuple, list, dict, number The object that should be converted. Returns ------- tuple, list, dict, native number (float, int) The object with the same structure but only native python numbers. """ if isinstance(tuple_list_dict_number, tuple): return tuple([denumpyfy(elem) for elem in tuple_list_dict_number]) if isinstance(tuple_list_dict_number, list): return [denumpyfy(elem) for elem in tuple_list_dict_number] if isinstance(tuple_list_dict_number, dict): return {denumpyfy(k): denumpyfy(tuple_list_dict_number[k]) for k in tuple_list_dict_number} if isinstance(tuple_list_dict_number, float): return float(tuple_list_dict_number) if isinstance(tuple_list_dict_number, int): return int(tuple_list_dict_number) return tuple_list_dict_number
70558250e3875cde2c66fe6680fd6a6ace498602
15,924
def _get_nn_for_timestamp(kd_tree, X, timestep, aso_idx, k, radius): """Returns the nearest ASOs to the provided `aso_idx` ASO. If a `radius` is provided then the results are all the ASOs within that given radius, otherwise the results are the `k` nearest ASOs :param kd_tree: The KD-tree build for the prediction timestep :type kd_tree: sklearn.neighbors.KDTree :param X: The numpy array of orbital predictions for each ASO for the prediction timestep :type X: numpy.array :param timestep: The orbital prediction timestep that the `X` array represents :type timestep: int :param aso_idx: The index in `X` of the ASO to find nearest ASOs for :type aso_idx: int :param k: The number of nearest ASOs to return. Not used if `radius` is passed :type k: int :param radius: The radius, in meters, to use in determining what is a near ASO :type radius: float :return: A list of tuples representing all ASOs that match the provided query where the first value is the index in `X` of the matching ASO, the second value is the timestep where this match occurred, and the third value is the distance from the query ASO to the matching ASO. :rtype: [(int, int, float)] """ query_point = X[aso_idx].reshape(1, -1) if radius: result_idxs, dists = kd_tree.query_radius(query_point, r=radius, return_distance=True) else: dists, result_idxs = kd_tree.query(query_point, k=k+1) idx_dists = zip(result_idxs[0], dists[0]) if radius: # Only return results that have non-zero distance result = [(int(i), int(timestep), float(d)) for i, d in idx_dists if d > 0] else: # Remove query object from results result = [(int(i), int(timestep), float(d)) for i, d in idx_dists if i != aso_idx] return result
d2dc4f7912aafc903782c21ec374bdd4d24475bc
15,925
def _poynting(field): """Computes poynting vector from the field vector""" tmp1 = (field[0].real * field[1].real + field[0].imag * field[1].imag) tmp2 = (field[2].real * field[3].real + field[2].imag * field[3].imag) return tmp1-tmp2
55c334b5c2e5df87d13ad0c000e5e599d6e8b948
15,929
import socket def tcp_port_reachable(addr, port, timeout=5): """ Return 'True' if we could establish a TCP connection with the given addr:port tuple and 'False' otherwise. Use the optional third argument to determine the timeout for the connect() call. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(timeout) try: s.connect((addr, port)) s.shutdown(socket.SHUT_RDWR) return True except: return False
22c530cdbccf6c19ffe60b5e1904e797f7d059ea
15,930
def scale_wind_speed(spd, scale_factor: float): """ Scales wind speed by the scale_factor :param spd: Series or data frame or a single value of wind speed to scale :param scale_factor: Scaling factor in decimal, if scaling factor is 0.8 output would be (1+0.8) times wind speed, if it is -0.8 the output would be (1-0.8) times the wind speed :return: Series or data frame with scaled wind speeds """ return spd * scale_factor
a55b76aee3e3ab2718db2714b6be0915c24e1631
15,931
def transform_dict(img): """ Take a raster data source and return a dictionary with geotranform values and keys that make sense. Parameters ---------- img : gdal.datasource The image datasource from which the GeoTransform will be retrieved. Returns ------- dict A dict with the geotransform values labeled. """ geotrans = img.GetGeoTransform() ret_dict = { 'originX': geotrans[0], 'pixWidth': geotrans[1], 'rotation1': geotrans[2], 'originY': geotrans[3], 'rotation2': geotrans[4], 'pixHeight': geotrans[5], } return ret_dict
8817028adfce28ae7f7ae787d4256d52fee095bc
15,933
def read_list(filename): """ Read file line by line, ignoring lines starting with '#' Parameters ---------- filename : str or pathlib.Path Returns ------- list """ with open(filename, 'r') as f: lines = f.readlines() return [line.rstrip('\n') for line in lines if not line.startswith('#')]
e4957425f1c2ff99e9e16ad8fe22e57ffa6e38a9
15,936
def get_trunc_minute_time(obstime): """Truncate obstime to nearest minute""" return((int(obstime)/60) * 60)
1a1a6ba47573442f0e98ca9aeaa8a5506e7ab081
15,942
def process_proc_output(proc, print_output=True): """Print output of process line by line. Returns the whole output.""" def _print(s): if print_output: print(s) lines = [] for line in iter(proc.stdout.readline, b''): _print('| %s' % line.rstrip()) lines.append(line) return ''.join(lines)
5af5d3355a7d588806120da625894fcbe93bdca0
15,944
def make_subtitle(rho_rms_aurora, rho_rms_emtf, phi_rms_aurora, phi_rms_emtf, matlab_or_fortran, ttl_str=""): """ Parameters ---------- rho_rms_aurora: float rho_rms for aurora data differenced against a model. comes from compute_rms rho_rms_emtf: rho_rms for emtf data differenced against a model. comes from compute_rms phi_rms_aurora: phi_rms for aurora data differenced against a model. comes from compute_rms phi_rms_emtf: phi_rms for emtf data differenced against a model. comes from compute_rms matlab_or_fortran: str "matlab" or "fortran". A specifer for the version of emtf. ttl_str: str string onto which we add the subtitle Returns ------- ttl_str: str Figure title with subtitle """ ttl_str += ( f"\n rho rms_aurora {rho_rms_aurora:.1f} rms_{matlab_or_fortran}" f" {rho_rms_emtf:.1f}" ) ttl_str += ( f"\n phi rms_aurora {phi_rms_aurora:.1f} rms_{matlab_or_fortran}" f" {phi_rms_emtf:.1f}" ) return ttl_str
7569e658785e571a4dcd4428e76a13b8b10e3327
15,947
from typing import List def find_substring_by_pattern( strings: List[str], starts_with: str, ends_before: str ) -> str: """ search for a first occurrence of a given pattern in a string list >>> some_strings = ["one", "two", "three"] >>> find_substring_by_pattern(some_strings, "t", "o") 'tw' >>> find_substring_by_pattern(some_strings, "four", "five") Traceback (most recent call last): ... ValueError: pattern four.*five not found :param strings: a list of strings where the pattern is searched for :param starts_with: the first letters of a pattern :param ends_before: a substring which marks the beginning of something different :returns: a pattern which starts with ``starts_with`` and ends before ``ends_before`` """ for package_name in strings: starting_index = package_name.find(starts_with) if starting_index >= 0: ending_index = package_name.find(ends_before) return package_name[starting_index:ending_index] raise ValueError(f"pattern {starts_with}.*{ends_before} not found")
4bc0abe6fcdbf81350b575dd9834b9c646fda81e
15,948
def noramlize_data(df): """ Normalizes the data by subtracting the mean and dividing by the max - min. :param df: the dataframe that we are normalizing :return: the normalized dataframe """ df_normalized = (df - df.mean()) / (df.max() - df.min()) return df_normalized
13adfc79876f989d6983f74e57c7372c9dc00000
15,952
def filter_packets_by_filter_list(packets, filter_list): """ :param packets: Packet list :param filter_list: Filters with respect to packet field :type filter_list: list of pyshark_filter_util.PySharkFilter :return: Filtered packets as list """ filtered_packets = [packet for packet in packets if all(single_filter.apply_filter_to_packet(packet) for single_filter in filter_list)] return filtered_packets
61106178dca039498c4fec1239bd7d251b69f812
15,955
from datetime import datetime def _get_date(element): """ This function extracts the date the image was taken from the image element and converts it to a datetime format. Args: element: A dict cuntianing all the image attributes. Returns: Date the image was taken in the format of datatime. """ date_taken = element.get('datetaken') datetime_date_taken = datetime.strptime(date_taken, '%Y-%m-%d %H:%M:%S') return datetime_date_taken
b42f6b24ce3545571bf1025b5b984386fde20208
15,958
def make_predictions(data, model, weights): """Predict the labels of all points in a data set for a given model. Args: data (array[float]): Input data. A list with shape N x 2 representing points on a 2D plane. model (qml.QNode): A QNode whose output expectation value will be used to make predictions of the labels of data. weights (array[float]): The trainable model parameters for the QNode. Returns: array[int]: The array of predictions for each data point made by the model QNode. """ preds = [] for idx in range(len(data)): estimated_expval = model(data[idx], weights) if estimated_expval > 0: preds.append(1) else: preds.append(-1) return preds
4ac2ba85a12d56f0128ba518e8a7d030c0eb5734
15,968
def get_major_minor(stat_inst): """get major/minor from a stat instance :return: major,minor tuple of ints """ return ( stat_inst.st_rdev >> 8 ) & 0xff, stat_inst.st_rdev & 0xff
ec623deb66d1e95f5ec9744ffbefc03c52ebf6a9
15,970
def _resources_json_version_required() -> str: """ Specifies the version of resources.json to obtain. """ return "develop"
7f9eaff50b3a03ec50501e7ae125f4daab462325
15,971
import hashlib def get_metadata_hash_for_attachments(attachments): """ Calculate a metadata hash from a collection of attachments. The hash will change if any of the attachments changes. """ hashes = [attachment.metadata_hash for attachment in attachments] # Sort the hashes to make the hash deterministic regardless of order hashes.sort() data = b"".join([hash_.encode("utf-8") for hash_ in hashes]) return hashlib.sha256(data).hexdigest()
fb56306c611a1aa1d87e897650142375a69f26e3
15,976
import torch def rmspe(pred, true): """Computes RMSPE""" return torch.mean(((true - pred) / true)**2)**0.5
2a83c9c10fb0547b4d90c805d94db871eb1b9e11
15,977
def get_xr_resolution(ds): """ Read dataset and get pixel resolution from attributes. If attributes don't exist, fall back to rough approach of minus one pixel to another. Parameters ---------- ds: xarray dataset, dataarray A single xarray dataset with variables and x and y dims. Returns ---------- res : float A float containing the cell resolution of xarray dataset. """ # check if x and y dims exist if 'x' not in list(ds.dims) and 'y' not in list(ds.dims): raise ValueError('No x, y dimensions in dataset.') # try getting max res option 1 try: res = abs(max(ds.res)) except: res = None # try getting max res option 2 try: if not res: res = max(ds.geobox.resolution) except: res = None # try getting max res the dirty way try: if not res: x_res = abs(float(ds['x'].isel(x=0))) - abs(float(ds['x'].isel(x=1))) y_res = abs(float(ds['y'].isel(y=0))) - abs(float(ds['y'].isel(y=1))) res = abs(float(max(x_res, y_res))) except: res = None # check if something exists if not res: raise ValueError('Could not extract cell resolution from dataset.') # return return res
3d87ff33190078753a496fd5f14854fa98eb1017
15,985
from typing import List from typing import Dict def match_relationships(relationships: List): """Creates a dict that connects object_id to all objects_ids it has a relationship with. Args: relationships (List): A list of relationship objects. Returns: Dict. Connects object_id to all objects_ids it has a relationship with. In the form of `id: [related_ids]` """ matches: Dict[str, set] = {} for relationship in relationships: source = relationship.get('source_ref') target = relationship.get('target_ref') if not source or not target: continue if source in matches: matches[source].add(target) else: matches[source] = {target} if target in matches: matches[target].add(source) else: matches[target] = {source} return matches
870db3b324f340a7f632251ebe22bfae6e693076
15,989
def makeChromTiles(db): """ Make a region for each chromosome """ out = [] for (k, v) in db.chromsTuple: out.append([k, 0, v]) return out
ca887035f05047bf7172c4e120fc7623a0fcb3e5
15,990
def data2mesh(data): """ Extracts from a given torch_geometric Data object the mesh elements Parameters ---------- data : Data a torch_geometric Data object Returns ------- (Tensor,LongTensor,Tensor) the points set, the topology and the vertex normals tensor """ return data.pos, data.face, data.norm
781489d95db76d106c910efda9bbc5f348e9d7ed
15,994
def prime_generator(maxi): """ Generate all the prime numbers below maxi. maxi is not included. The method uses Aristotle's sieve algorithm. >>> prime_generator(10) [2, 3, 5, 7] """ li = [] for _ in range(maxi): li.append(1) li[0] = li[1] = 0 for pos, val in enumerate(li): if val: for index in range(pos+pos, maxi, pos): li[index] = 0 primes = [] for pos, val in enumerate(li): if val: primes.append(pos) return primes
f2829ed995f0f289b22960fad706cf3ec0371446
15,998
def filter_content(contents, rules, mode=any): """ Filter contents by given rule. Args: contents `list` List of illust, the content may vary from source to source. Besure you know the data hierachy of object. rules `list` A list of function takes one content and returns boolean value, indicating the content is selected. mode `any` or `all` Choose wether satisfy all rules or any of them. Returns: list of filtered contents. Raises: None """ if not (mode in (any, all)): raise ValueError("Accept only one of 'any' or 'all'.") res = [] for i in contents: if mode(r(i) for r in rules): res.append(i) return res
a875ee3d8523a29043b576c9d19ef8a09589ed91
15,999
def get_dict_key_by_value(source_dict: dict, dict_value): """Return the first key of the ``source_dict`` that has the ``dict_value`` as value.""" for k, v in source_dict.items(): if v == dict_value: return k return
0ae198c7d07fe57898779f0b75b75bdb590f5a3d
16,000
from pathlib import Path def get_cases(f, sep="---"): """ Extracts inputs and outputs for each test/verification case within f, where f is a folder. Params ====== f: str The folder containing the cases to be extracted. sep: str The substring separating comments from the input from the output in each case file. Returns ======= cases: [] Array of dictionaries containing each case Each case is a dictionary with the following fields: - "filename": The name of the file - "comments": Any comments in the folder - "inputs": The inputs - "outputs": The expected outputs Raises ====== AssertionError: If the given path is not a folder. """ # Initialise path p = Path(f) # Assert that target folder is a folder assert p.is_dir() # List of cases in the folder cases = [] # Loop through all cases within the folder for f in p.iterdir(): # Open each case file with open(f) as fr: # Obtain the contents of the case file contents = fr.read() # The case files are structured such that it has COMMENTS, followed by the separator substring, followed by the INPUTS, followed by the separator substring, and finally followed by the OUTPUTS # Instantiate case dictionary c = {} # Separate the contents by the separator, and then clean each individual element of newline/whitespace contents = contents.split(sep) contents = [c.strip() for c in contents] # Populate dictionary c["filename"] = f.with_suffix("").name c["inputs"] = contents[0] c["outputs"] = contents[1] if len(contents) == 3: c["comments"] = contents[2] # Add dictionary to list of cases cases.append(c) # After all cases have been looped through, return cases return cases
bedcc7cedd791505dbed886a539df92aa0fa3f87
16,001
import requests def create_empty_zenodo_upload(access_token): """ Create an empty upload using Zenodo API. :param access_token: Zenodo access token. :return: requests.models.Response from Zenodo API """ headers = {"Content-Type": "application/json"} r = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': access_token}, json={}, headers=headers) return r
14a26a07a08b2dab1ccf55ddea8c3d4cb3d9a2d6
16,002
def slurp(name): """ Read the file :param name: read the named file :return: the content """ with open(name, "r") as f: return f.read()
2edb241b5cbb0c9298dbdc9dd5f1f89786fff036
16,004
def vec_is_void(a): """ Check whether a given vector is empty A vector is considered "void" if it is None or has no elements. Parameters ---------- a: list[] The vector to be checked Returns ------- bool True if the vector is empty, False otherwise """ return a is None or len(a) == 0
9a9b6f78ec2ddb81990fe54a5b429413c0472742
16,006
import calendar def _sort_order(count_items): """Key for sorting day counts in days of the week order.""" return list(calendar.day_name).index(count_items[0])
9fa5a3f37ee034a99c2c6ed428655261661210aa
16,012
def download_input(storage_provider, parsed_event, input_dir_path): """Receives the event where the file information is and the tmp_dir_path where to store the downloaded file. Returns the file path where the file is downloaded.""" return storage_provider.download_file(parsed_event, input_dir_path)
887ca61a40a658d172b4b77833132b73933a14ce
16,015
def get_bit(val: int, bitNo: int) -> int: """ Get bit from int """ return (val >> bitNo) & 1
19d49512387da66e5889fc1bacc014be240be4a9
16,016
def GetChildNodesWhereTrue(node_id, tree, stop_function): """walk down in tree and stop where stop_function is true The walk finishes at the leaves. returns a list of tuples of nodes and distance. """ result = [] def __getChildNodes(node_id, distance): node = tree.node(node_id) distance += node.data.branchlength if not node.succ: result.append((node_id, distance)) elif stop_function(node_id): result.append((node_id, distance)) else: for x in node.succ: __getChildNodes(x, distance) node = tree.node(node_id) __getChildNodes(node_id, -node.data.branchlength) return result
9837a8ac294b8202a6e17deecd213d25599f575b
16,017
def get_full_version(package_data): """ Given a mapping of package_data that contains a version and may an epoch and release, return a complete version. For example:: >>> get_full_version(dict(version='1.2.3')) '1.2.3' >>> get_full_version(dict(version='1.2.3', epoch='2')) '2~1.2.3' >>> get_full_version(dict(version='1.2.3', epoch='2', release='23')) '2~1.2.3-23' """ version = package_data['version'] release = package_data.get('release', '') if release: release = f'-{release}' epoch = package_data.get('epoch', '') if epoch: epoch = f'{epoch}~' version = f'{epoch}{version}{release}' return version
3a8cc3731da2ef3f99e3e9f203e084c9478f48c8
16,020
def knight_tour(n, path, u, limit): """ Conduct a knight's tour using DFS. Args: n: current depth of the search tree. path: a list of vertices visited up to this point. u: the vertex we wish to explore. limit: the number of nodes in the path. Returns: done (bool) """ visited = set(u) path.append(u) if n < limit: nbrList = list(u.get_connections()) i = 0 done = False while i < len(nbrList) and not done: if nbrList[i] in visited: done = knight_tour(n + 1, path, nbrList[i], limit) i = i + 1 if not done: # prepare to backtrack path.pop() visited.remove(u) else: done = True return done
f11a2da4e740183a85dbd2f281c65dda77237dad
16,021
def read_tags_and_datablocks(text): """ read a file consisting of blocks of numbers which are separated by tag lines. separate tag lines from data lines return two lists e.g. for pp.data file: Atomic number and pseudo-charge 14 4.00 Energy units (rydberg/hartree/ev): rydberg Angular momentum of local component (0=s,1=p,2=d..) 2 NLRULE override (1) VMC/DMC (2) config gen (0 ==> input/default value) 0 0 Number of grid points 1603 R(i) in atomic units 0.000000000000000E+00 0.719068853804059E-09 0.144778949458300E-08 will be parsed into: ['Atomic number and pseudo-charge', ... ['14 4.00', ... Args: text (str): file content Return: tuple: (tags, blocks), both are a list of strings """ lines = text.split('\n') tags = [] blocks = [] block = '' for line in lines: try: map(float, line.split()) block += line except: tags.append(line) blocks.append(block) block = '' blocks.append(block) return tags, blocks[1:]
372512010198aa401552302d45f0a0745477bec1
16,022
def convert_sensor_type(v): """ converts the sensor type value into something more meaningful. """ if v is 0: return "None" elif v is 1: return "RealPower" elif v is 2: return "ApparentPower" elif v is 3: return "Voltage" elif v is 4: return "Current" else: return "Unknown"
7f5bd77db7a240d21728b526daae7729b0871143
16,023
import bz2 def bunzip2(fileobj): """ bunzip2 the file object. """ return bz2.decompress(fileobj)
7da3f9b64cd0f6765860678b18be661fd42b813e
16,034
def observatory_from_string(string): """If "jwst" or "hst" is in `string`, return it, otherwise return None.""" if "jwst" in string: return "jwst" elif "hst" in string: return "hst" else: return None
217cc3cf3c5b802799c0db73563f6d11b7ab4c4d
16,035
def get_letters_pep(peptides_lst): """ Get letters list and letter-index dictionaries """ word_to_ix_ = dict((i, j) for j, i in enumerate(['<PAD>']+list(set(x for l in peptides_lst for x in l)))) ix_to_word_ = dict((j, i) for j, i in enumerate(['<PAD>']+list(set(x for l in peptides_lst for x in l)))) return word_to_ix_, ix_to_word_
5c341a9cdd99a874a34c1ed967a5e95ad1c7ed6f
16,037
def _get_lat_lon(df): """Get latitude and longitude from geometries.""" col = df._geometry_column_name df["latitude"] = [latlon.y for latlon in df[col]] df["longitude"] = [latlon.x for latlon in df[col]] return df
9ab4b1cb469ae88444de97ec0cb0cec008643c4a
16,040
def fix_fp(sequence, parallel): """ Fix footprints Parameters -------------- sequence Sequence parallel Parallel Returns ------------- sequence Sequence parallel Parallel """ sequence = sequence.difference(parallel) return sequence, parallel
721021af7d9f4b07ee25861788cde878a31b6135
16,044
def apply_ratio(o_w, o_h, t_w, t_h): """Calculate width or height to keep aspect ratio. o_w, o_h -- origin width and height t_w, t_h -- target width or height, the dimension to be calculated must be set to 0. Returns: (w, h) -- the new dimensions """ new_w = t_h * o_w / o_h new_h = t_w * o_h / o_w return new_w+t_w, new_h+t_h
f3143e5a5ad8aeafbb913e73aab40e8e8990ddd6
16,045
def maybe_append(usa_facts, jhu): """ Append dataframes if available, otherwise return USAFacts. If both data frames are available, append them and return. If only USAFacts is available, return it. If USAFacts is not available, return None. """ if usa_facts is None: return None if jhu is None: return usa_facts return usa_facts.append(jhu)
4f0831a09ac36caaec6f825036e69d0f5b62b19f
16,050
def evaluate(clauses, sol): """ evaluate the clauses with the solution """ sol_vars = {} # variable number -> bool for i in sol: sol_vars[abs(i)] = bool(i > 0) return all(any(sol_vars[abs(i)] ^ bool(i < 0) for i in clause) for clause in clauses)
be50aa2c8f04b6d1ac76a17aea86beedc7abff4c
16,053
import json def _load_iam_data(path): """Builds a dictionary containing the information about all the AWS IAM resources and the actions they relate to (for more information look at the README.md in this directory). The keys of the dictionary are all the possible IAM policy actions and the values are sets containing the resources they allow access to. For instance: {'ec2:allocateaddres':{'elastic-ip', 'ipv4pool-ec2'}}""" data = None with open(path, "r") as file: data = json.load(file) actions = {} for service in data: prefix = service["prefix"] for privilege in service["privileges"]: action = privilege["privilege"].lower() action = f"{prefix}:{action}" resources = set() for resource_type in privilege["resource_types"]: if "resource_type" not in resource_type: continue resource = resource_type["resource_type"].replace("*", "") if resource == "": continue # The actions related to S3 can give access to objects, buckets # or both (an object is a file in a bucket). Altimeter scans # buckets, but not objects. So,for us, if an action give access # to a object, it gives access to whole the bucket. if prefix == "s3" and resource == "object": resource = "bucket" resources.add(resource) actions[action] = resources return actions
7b394285f088ade8042207fdccbb9e6dfec78314
16,058
def is_field(x): """ Return whether or not ``x`` is a field. Alternatively, one can use ``x in Fields()``. EXAMPLES:: sage: R = PolynomialRing(QQ, 'x') sage: F = FractionField(R) sage: is_field(F) True """ return x.is_field()
87efa719721d72df5c751d734f2f26d6641190c1
16,059
from typing import Optional from typing import Dict def create_exclusive_start_key(player_id: str, start_key: Optional[str]) -> Optional[Dict[str, str]]: """ Create the 'ExclusiveStartKey' parameter for the DynamoDB query, based on the user-provided 'start_key' parameter to this Lambda function. """ if start_key: return { 'player_id': player_id, 'slot_name': start_key, } else: return None
7a03434e2d52908eb4f4d68483058183913ac9bb
16,061
def label_id_to_cluster_id(label_id, C, unused_labels): """Map the label id to the cluster id according to clustering matrix. Args: label_id: the label id. C: the cluster matrix of shape L x C. unused_labels: used to adjust the label id. Returns: the cluster id. """ # count how many unused labels that are smaller than label_id offset = sum([l < label_id for l in unused_labels]) row_id = label_id - offset assert C.indptr[row_id] + 1 == C.indptr[row_id + 1] cluster_id = C.indices[C.indptr[row_id]] return cluster_id
61593eb822dbaf88f101b2948c02de3fc07794d1
16,062
def expand_locations_and_make_variables(ctx, attr, values, targets = []): """Expands the `$(location)` placeholders and Make variables in each of the given values. Args: ctx: The rule context. values: A list of strings, which may contain `$(location)` placeholders, and predefined Make variables. targets: A list of additional targets (other than the calling rule's `deps`) that should be searched for substitutable labels. Returns: A list of strings with any `$(location)` placeholders and Make variables expanded. """ return_values = [] for value in values: expanded_value = ctx.expand_location( value, targets = targets, ) expanded_value = ctx.expand_make_variables( attr, expanded_value, {}, ) return_values.append(expanded_value) return return_values
cb426117582161c5f32034df2cc1db29ebe37205
16,065
def get_result_or_raise(future): """Returns the ``result`` of *future* if it is available, otherwise raise. """ return future.result
8f6b2b6b6def964d48829f2b63467a6e39e3b853
16,071
def missing_respondents(reported, observed, identified): """Fill in missing respondents for the f1_respondent_id table. Args: reported (iterable): Respondent IDs appearing in f1_respondent_id. observed (iterable): Respondent IDs appearing anywhere in the ferc1 DB. identified (dict): A {respondent_id: respondent_name} mapping for those observed but not reported respondent IDs which we have been able to identify based on circumstantial evidence. See also: `pudl.extract.ferc1.PUDL_RIDS` Returns: list: A list of dictionaries representing minimal f1_respondent_id table records, of the form {"respondent_id": ID, "respondent_name": NAME}. These records are generated only for unreported respondents. Identified respondents get the values passed in through ``identified`` and the other observed but unidentified respondents are named "Missing Respondent ID" """ records = [] for rid in observed: if rid in reported: continue elif rid in identified: records.append( { "respondent_id": rid, "respondent_name": f"{identified[rid]} (PUDL determined)", }, ) else: records.append( { "respondent_id": rid, "respondent_name": f"Missing Respondent {rid}", }, ) return records
f919a9d398898b06d4442c75cc314a8cb52e1c5f
16,073
def distance(x_0, y_0, x_1, y_1): """Return distance between 2 points (x_0, y_0) and (x_1, y_1) """ x_dist = x_0 - x_1 y_dist = y_0 - y_1 return(x_dist ** 2 + y_dist ** 2) ** 0.5
06c250b09e2a386f1814fe9c748cad574869a741
16,077
from typing import Callable from typing import Sequence def randline(filename: str, randchoice: Callable[[Sequence[str]], str]) -> str: """ return a randomly-selected line from the given file """ with open(filename, "rt", encoding="utf-8") as fh: return randchoice(fh.readlines()).rstrip()
6978158b25a8702e99ee6e7f9461cd391873eee4
16,081
async def latency(ctx): """Returns my gateway latency.""" return f'{ctx.client.gateway.latency*1000.:.0f} ms'
f2d088adfa485bfff8da5154ce672232e4d57e1d
16,082
def str2intlist(s, delim=","): """ create a list of ints from a delimited string Parameters ---------- s: string delim: string Returns ------- int_list: list of ints Examples -------- >>> str2intlist("1,2,3") [1, 2, 3] >>> str2intlist("1-3") [1, 2, 3] >>> str2intlist("2,3-4,6") [2, 3, 4, 6] >>> str2intlist("a") Traceback (most recent call last): ... TypeError: not a valid list of ints: "a" """ def get_int(n): try: return int(n) except: raise TypeError('not a valid list of ints: "{}"'.format(s)) return sum(((list(range(*[get_int(j) + k for k, j in enumerate(i.split('-'))])) if '-' in i else [get_int(i)]) for i in s.split(delim)), [])
ae7a568a9e8b7c55e146515fad4dd810bee4ae46
16,084
def generate_timestamp_format(date_mapper: dict) -> str: """ Description ----------- Generates a the time format for day,month,year dates based on each's specified time_format. Parameters ---------- date_mapper: dict a dictionary for the schema mapping (JSON) for the dataframe filtered for "date_type" equal to Day, Month, or Year. Output ------ e.g. "%m/%d/%Y" """ day = "%d" month = "%m" year = "%y" for kk, vv in date_mapper.items(): if vv["date_type"] == "day": day = vv["time_format"] elif vv["date_type"] == "month": month = vv["time_format"] elif vv["date_type"] == "year": year = vv["time_format"] return str.format("{}/{}/{}", month, day, year)
cd535a4fb35917517711cf149430c128e2c46b6d
16,085
def get_command(line, fmt_space): """ Given a header line, get the possible command Parameters ----------- line : string Line of the header fmt_space : boolean Yes = Novonix format with spaces in the commands Returns -------- command : string Instruction in the header line Examples --------- >>> import preparenovonix.novonix_io as prep >>> command = prep.get_command('[Open circuit storage]',fmt_space=True) >>> print(command) Open circuit storage """ command = " " fw = line.strip() # Find commands ignoring left spaces if fmt_space: command = fw[1:-1] else: if ":" in fw: command = fw.split(":")[1].strip() else: command = fw[1:-1] return command
78642fd6e98817b85ce8431774a34723ed649473
16,086
import json def harmonize_credentials(secrets_file=None, cromwell_username=None, cromwell_password=None): """ Takes all of the valid ways of providing authentication to cromwell and returns a username and password :param str cromwell_password: :param str cromwell_username: :param str secrets_file: json file containing fields cromwell_user and cromwell_password :return tuple: (string of cromwell username, string of cromwell password) """ if cromwell_username is None or cromwell_password is None: if secrets_file is None: raise ValueError('One form of cromwell authentication must be provided, please pass ' 'either cromwell_user and cromwell_password or a secrets_file.') else: with open(secrets_file) as f: secrets = json.load(f) cromwell_username = secrets['cromwell_user'] cromwell_password = secrets['cromwell_password'] return cromwell_username, cromwell_password
f0802b3e65ebec76393090f608c77abea312867b
16,087
import pathlib def lambda_filtered_paths(directory: str): """ Return list of filepaths for lambda layers and functions. Unecessary files are filtered out. """ paths = pathlib.Path(directory).rglob("*") return [ f for f in paths if not any(pat in str(f) for pat in ["__pycache__", ".mypy_cache", "~"]) ]
1638e821a249244fde95e26a027176d1e6d87491
16,089
def rescale(values, old_min, old_max, new_min, new_max): """Rescale a set of values into a new min and max """ output = [] for v in values: new_v = (new_max - new_min) * (v - old_min) / (old_max - old_min) + new_min output.append(new_v) return output
c07173fca2f6ba0d1e1e32c257b9e4f4a39fe5a7
16,091
def KeyValuePairMessagesToMap(key_value_pair_messages): """Transform a list of KeyValuePair message to a map. Args: key_value_pair_messages: a list of KeyValuePair message. Returns: a map with a string as key and a string as value """ return {msg.key: msg.value for msg in key_value_pair_messages}
7ab0d9a3dea7da762a559efa00ae50247ee8d2d4
16,092
def parse_result(results): """ Given a string, return a dictionary of the different key:value pairs separated by semicolons """ if not results: return {} rlist = results.split(";") keyvalpairs = [pair.split(":") for pair in rlist] keydict = { pair[0].strip(): pair[1].strip() for pair in keyvalpairs if len(pair) == 2 } return keydict
eca808c2baa0b5c95e6fd052f2afabf53b05bd3a
16,096
def case(bin_spec: str, default: str = "nf") -> str: """ Return the case specified in the bin_spec string """ c = default if "NF" in bin_spec: c = "nf" elif "ÞF" in bin_spec: c = "þf" elif "ÞGF" in bin_spec: c = "þgf" elif "EF" in bin_spec: c = "ef" return c
b2fdab5d1a48e1d20c3a561707033970cac55356
16,097
def name_func(testcase_func, _, param): """Create a name for the test function.""" return '{}_{}_{}'.format(testcase_func.__name__, param.args[0], param.args[1].__name__)
804f593850cff07758a61bd0ae2ccd92b2e46b19
16,100
def Convert(string): """converts string to list""" li = list(string.split(" ")) return li
a446d46be5d7c2df7139460a461e0825784f5e89
16,102
import pathlib def get_paths_to_patient_files(path_to_imgs, append_mask=True): """ Get paths to all data samples, i.e., CT & PET images (and a mask) for each patient. Parameters ---------- path_to_imgs : str A path to a directory with patients' data. Each folder in the directory must corresponds to a single patient. append_mask : bool Used to append a path to a ground truth mask. Returns ------- list of tuple A list wherein each element is a tuple with two (three) `pathlib.Path` objects for a single patient. The first one is the path to the CT image, the second one - to the PET image. If `append_mask` is True, the path to the ground truth mask is added. """ path_to_imgs = pathlib.Path(path_to_imgs) #patients = [p for p in os.listdir(path_to_imgs) if os.path.isdir(path_to_imgs / p)] patients = [f.name.split("_")[0] for f in path_to_imgs.rglob("*_ct*")] print(str(patients)) paths = [] for p in patients: path_to_ct = path_to_imgs / (p + '_ct.nii.gz') path_to_pt = path_to_imgs / (p + '_pt.nii.gz') if append_mask: path_to_mask = path_to_imgs / (p + '_gtvt.nii.gz') paths.append((path_to_ct, path_to_pt, path_to_mask)) else: paths.append((path_to_ct, path_to_pt)) return paths
61480fee3e300d2ca97e819fae875cf4c7a637e1
16,103
def check_chars_in_positions(password, left, right, in_char): """ Check if password is valid based on if char count is in exactly one of position left or position right returns bool (True = valid password) """ is_in_left = password[left-1] == in_char is_in_right = password[right-1] == in_char # need to xor the two return is_in_left != is_in_right
36a80525307ecf359cf631079e128617c2d22bc3
16,104
from typing import Dict def _create_stats_dict_from_values( total_sum_w: float, total_sum_w2: float, total_sum_wx: float, total_sum_wx2: float ) -> Dict[str, float]: """Create a statistics dictionary from the provided set of values. This is particularly useful for ensuring that the dictionary values are created uniformly. Args: total_sum_w: Total sum of the weights (ie. the frequencies). total_sum_w2: Total sum of the weights squared (ie. sum of Sumw2 array). total_sum_wx: Total sum of weights * x. total_sum_wx2: Total sum of weights * x * x. Returns: Statistics dict suitable for storing in the metadata. """ return { "_total_sum_w": total_sum_w, "_total_sum_w2": total_sum_w2, "_total_sum_wx": total_sum_wx, "_total_sum_wx2": total_sum_wx2, }
4ef02ef12b903a4a0a14f3c5fa9ce7edf11f6380
16,107