content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def draw_on_pattern(shape, pattern): """ Draws a shape on a pattern. >>> draw_on_pattern([(0, 0, 1), (0, 1, 3), (1, 1, 8)], [[0, 0, 0], [0, 0, 0]]) [[1, 3, 0], [0, 8, 0]] """ y_size = len(pattern) x_size = len(pattern[0]) new_pattern = pattern.copy() for cell in shape: y, x, colour = cell if 0 <= y < y_size and 0 <= x < x_size: new_pattern[y][x] = colour return new_pattern
85c8f8f59b3bc241684798898f1780c7eda475f4
23,879
def image_to_normalized_device_coordinates(image): """Map image value from [0, 255] -> [-1, 1]. """ return (image / 127.5) - 1.0
c92fe3ef499164c957b1b4330cce584a6b6d9f1f
23,884
def first_bad_pair(sequence, k): """Return the first index of a pair of elements in sequence[] for indices k-1, k+1, k+2, k+3, ... where the earlier element is not less than the later element. If no such pair exists, return -1.""" if 0 < k < len(sequence) - 1: if sequence[k-1] >= sequence[k+1]: return k-1 for i in range(k+1, len(sequence)-1): if sequence[i] >= sequence[i+1]: return i return -1
0fe2957d8feb95fe285088be0c5d85ddf4b657ad
23,886
def stitle(self, nline="", title="", **kwargs): """Defines subtitles. APDL Command: /STITLE Parameters ---------- nline Subtitle line number (1 to 4). Defaults to 1. title Input up to 70 alphanumeric characters. Parameter substitution may be forced within the title by enclosing the parameter name or parametric expression within percent (%) signs. If Title is blank, this subtitle is deleted. Notes ----- Subtitles (4 maximum) are displayed in the output along with the main title [/TITLE]. Subtitles do not appear in GUI windows or in ANSYS plot displays. The first subtitle is also written to various ANSYS files along with the main title. Previous subtitles may be overwritten or deleted. Issue /STATUS to display titles. This command is valid in any processor. """ command = "/STITLE,%s,%s" % (str(nline), str(title)) return self.run(command, **kwargs)
80775dca1d9de3bd2d1b88fbb9843d047d6d3766
23,888
def getDetailedChannelBoxAttrs(node): """ Return the list of attributes that are included when the 'detailed channel box' is enabled for a node. """ attrs = [ # rotate order 'ro', # rotate axis 'rax', 'ray', 'raz', # rotate pivot 'rpx', 'rpy', 'rpz', # scale pivot 'spx', 'spy', 'spz', # rotate pivot translate 'rptx', 'rpty', 'rptz', # scale pivot translate 'sptx', 'spty', 'sptz', ] if node.nodeType() == 'joint': attrs += [ # joint orient 'jox', 'joy', 'joz', ] return attrs
649eff52fcc43243891ce853732c2cf914ecc60a
23,889
import torch def get_class_weights(target: torch.Tensor, class_weight_power: float = 1.0) -> torch.Tensor: """ Returns class weights inversely proportional to some power of the number of pixels in each class. :param target: one-hot tensor of shape (B, C, Z, X, Y); thus class dimension (of size C) is dimension 1 :param class_weight_power: power to raise 1/c to, for each class count c """ with torch.no_grad(): class_counts = target.sum([0] + list(range(2, target.dim()))).float() # sum over all except class dimension class_counts[class_counts == 0.0] = 1.0 # prevent 1/0 when invert - value doesn't matter if no voxels class_weights = class_counts ** (-class_weight_power) # Normalize so mean of class weights is 1.0 class_weights *= class_weights.shape[0] / class_weights.sum() return class_weights
5199722699da87a57c8ddebfbaa5b44c11707393
23,890
import shlex def split_string(s): """split string to list""" if s is None: return [] else: return shlex.split(s)
e9cbd0c1928d16c673d70cc9aeffc0275de43f30
23,892
def get_collection_id(collection): """Return id attribute of the object if it is collection, otherwise return given value.""" return collection.id if type(collection).__name__ == 'Collection' else collection
5c7b344ff89d28609b21962fda58acc3cd7684a1
23,893
def trainings(states): """Creates a dict from training_id to a list of game states.""" ret_value = {} for state in states: if state.training_id not in ret_value: ret_value[state.training_id] = [] ret_value[state.training_id].append(state) return ret_value
d58b815bcb5176989c581e12901863fb00da31d0
23,894
def lerp(value, start, end, minimum=0.0, maximum=1.0): """ Linear Interpolation between two points """ value = float(value) start = float(start) end = float(end) return minimum+((1.0-value) * start +value * end)*maximum
a27ef478a51790c2bb5c449c6d73a15ac0ab02d0
23,895
def get_charset(message): """Get the charset defined for the message. The charset can be retrieved in two ways. Try the preferred method first and, if that fails, try the other method. Args: message (Message): An email Message object. Returns: (unicode) The charset that was found or `None` if not found. """ charset = message.get_content_charset() if not charset: charset = message.get_charset() return charset
447fa14e12792737f0c8aeb9a0c80214cb8e04bf
23,897
from typing import Tuple def is_valid_choice(options): """ Returns a function that tests if the config value is an element of the passed options. :param options: The options that are considered as valid choices. :return: A functions that takes a value and tests if it is within the specified choices. This function returns True if the value in the config is in the passed options. """ def validator(value) -> Tuple[bool, str]: return value in options, f"Value is not contained in the options {options}" return validator
4c1fb6c32a21e068256443c0110fa09a6b31d60b
23,901
def synthesize_thermal(results_df): """ Reduce thermal metrics table to one time-point like table where for each metric, only the worst metric value of all time-points is recorded """ filter_cols = ["name", "substation", "feeder", "placement", "sample", "penetration_level", "scenario"] df = ( results_df.groupby(filter_cols)[ [ c for c in results_df.columns if (c not in filter_cols) and (c != "time_point") ] ] .max() .reset_index() ) return df
88b7c090a635d4410b91e69ff85462f4329e9051
23,902
def Keck_distortion(measured_wave, cutoff=10000.): """Telescope dependent distortion function for the Keck sample.""" slope1 = .0600 intercept1 = -100 slope2 = .160 intercept2 = -1500 if measured_wave < cutoff: return measured_wave * slope1 + intercept1 else: return measured_wave * slope2 + intercept2
b0592bc8e9fde3d3f04234f6627ea8c5f7e7717a
23,904
def wpoffs(self, xoff="", yoff="", zoff="", **kwargs): """Offsets the working plane. APDL Command: WPOFFS Parameters ---------- xoff, yoff, zoff Offset increments defined in the working plane coordinate system. If only ZOFF is used, the working plane will be redefined parallel to the present plane and offset by ZOFF. Notes ----- Changes the origin of the working plane by translating the working plane along its coordinate system axes. This command is valid in any processor. """ command = f"WPOFFS,{xoff},{yoff},{zoff}" return self.run(command, **kwargs)
e7382911339938e0c7f89606f8ffd2b66923c955
23,910
def resample_dataframe(df, resolution): """ Resamples a dataframe with a sampling frquency of 'resolution' -> Smoothes the plots :param df: Dataframe to be resampled. Must contain numbers only :param resolution: Resolution of the sampling to be done :return: Resampled dataframe """ df = df.set_index('timedelta', drop=True) # set timedelta as new index resampled = df.resample(str(resolution)+'S').mean() resampled.reset_index(inplace=True) # timedelta was resampled, so we need to do the same with the Time-column resampled['Time'] = resampled['timedelta'].apply(lambda time: time.total_seconds()) return resampled
17a2c555e45a9067e68c97958200508525bef873
23,917
def equal_zip(*args): """ Zip which also verifies that all input lists are of the same length """ # make sure that all lists are of the same length assert len(set(map(len, args))) == 1, "lists are of different lengths {}".format(args) return zip(*args)
f50ad8586d24516ba641e1bbef5d62879a0f3d6b
23,922
import hashlib from re import M def hash_key(integer): """Hash the given integers and trim to l digits Arguments: integer {Integer} Returns: Integer -- Hashed Integer Value """ name = str(integer) m = hashlib.sha1(name.encode('utf-8')) key_hash = m.hexdigest()[:M // 4] return int(key_hash, 16)
d37361d02d270e75758b3268efbe0e18c7723d63
23,925
def _swap_ending(s, ending, delim="_"): """ Replace the ending of a string, delimited into an arbitrary number of chunks by `delim`, with the ending provided Parameters ---------- s : string string to replace endings ending : string string used to replace ending of `s` delim : string string that splits s into one or more parts Returns ------- new string where the final chunk of `s`, delimited by `delim`, is replaced with `ending`. """ parts = [x for x in s.split(delim)[:-1] if x != ""] parts.append(ending) return delim.join(parts)
ffce5c55d2f914668a4daa5634ad379785fd3f2a
23,928
def extract_testing_substructures(doc): """ Extract the search space for predicting with the mention pair model, The mention ranking model consists in computing the optimal antecedent for an anaphor, which corresponds to predicting an edge in graph. This functions extracts the search space for each such substructure (one substructure corresponds to one antecedent decision for an anaphor). The search space is represented as a nested list of mention pairs. The mention pairs are candidate arcs in the graph. The ith list contains the ith mention pair, where we assume the following order: (m_2, m_1), (m_3, m_2), (m_3, m_1), (m_4, m_3), ... Args: doc (CoNLLDocument): The document to extract substructures from. Returns: (list(list(Mention, Mention))): The nested list of mention pairs describing the search space for the substructures. """ substructures = [] # iterate over mentions for i, ana in enumerate(doc.system_mentions): # iterate in reversed order over candidate antecedents for ante in sorted(doc.system_mentions[1:i], reverse=True): substructures.append([(ana, ante)]) return substructures
b18e9ef7bf825580618925ff2889b3df53fe4f89
23,933
import six def explode(_string): """Explodes a string into a list of one-character strings.""" if not _string or not isinstance(_string, six.string_types): return _string else: return list(_string)
c9c46382f2be8362e8f271983e32f1b1d2c2f7cc
23,934
def validation(size, training): """ Obtain the validation set corresponding to the given training set """ result = [] for i in range(0, size): if i not in training: result.append(i) return result
7d40be54aa5f54139f24c1b4b20695f70586d97f
23,937
from typing import Callable from typing import Any def call_safe(cb: Callable, *args, **argv) -> Any: """Safely call a Callable""" try: cb(*args, **argv) except TypeError: return None
5cdcca9fc5f3b834d161e7a596d20af73e5ed3b5
23,938
from typing import Dict from typing import Any def track_data() -> Dict[str, Any]: """Get track data to instantiate a Track.""" return dict( title="Python 3.8 new features", release_date="2016-07-30", podcast_id=1 )
61b41f23d25e56bbb33b4179f2f24c4f643ff44e
23,947
def _residual_str(name): """Makes a residual symbol.""" return '\\mathcal{R}(%s)' % name
385c7a0f4f1ea3e9ed37be73978eaa34ded9afa4
23,949
def update_df(df, dirname, filename, coverage, mean, std_dev, segmented_pixel_count): """ update the coverage, mean, std_dev in a given df for a specific segmentation type (dirname) """ df.loc[df['names'] == filename, f'{dirname}_coverage'] = coverage df.loc[df['names'] == filename, f'{dirname}_mean'] = mean df.loc[df['names'] == filename, f'{dirname}_std_dev'] = std_dev df.loc[df['names'] == filename, f'{dirname}_segmentation_count'] = segmented_pixel_count return df
d803dc4ab0c6946a3b9ac0e70fa5c1eb327d85a7
23,950
def dist_between(h,seg1,seg2): """ Calculates the distance between two segments. I stole this function from a post by Michael Hines on the NEURON forum (www.neuron.yale.edu/phpbb/viewtopic.php?f=2&t=2114) """ h.distance(0, seg1.x, sec=seg1.sec) return h.distance(seg2.x, sec=seg2.sec)
7c8ca520ea27f6297740eaffacb52a2ceeead287
23,951
def locate(tlsToFind, sets): """return - the set in which the given traffic light exists - the pair in which it was found - the index within the pair """ for s in sets: for pair in s: if tlsToFind == pair.otl: return s, pair, 0 elif tlsToFind == pair.tl: return s, pair, 1 return None, None, None
00956c33433bb34e7a5b8dd8a200a90d36bad801
23,956
def margin_fee(self, **kwargs): """Query Cross Margin Fee Data (USER_DATA) Get cross margin fee data collection with any vip level or user's current specific data as https://www.binance.com/en/margin-fee GET /sapi/v1/margin/crossMarginData https://binance-docs.github.io/apidocs/spot/en/#query-cross-margin-fee-data-user_data Keyword Args: vipLevel (int, optional): User's current specific margin data will be returned if vipLevel is omitted coin (str, optional) recvWindow (int, optional): The value cannot be greater than 60000 """ return self.sign_request("GET", "/sapi/v1/margin/crossMarginData", kwargs)
f2ca69c13b77fffadf6aa319f1847004ed5615da
23,958
def list2string(list_of_strings): """ Return a string (OUTPUT) from a list of strings (INPUT). E.g., ["I think,", "Therefore, I am."] => "I think. Therefore, I am" """ return " ".join(list_of_strings)
10efd017e6c09a2d17ec7f9135376f0a2f57c789
23,960
def clean_df(df): """ Takes in a Pandas Dataframe from Dreamclinic and cleans it for aggregation. """ # remove rows where HrsWorked = 0 # because they are just used by the front desk staff somehow df = df[df['HrsWorked'] != 0] # fill NaN values in 'Service_Category with 'Massage' df['Service_Category'] = df['Service_Category'].fillna(value='Massage') # remove white space from Therapist names df['Therapist'] = df['Therapist'].str.strip() # make all therapist names lowercase to avoid typos in data entry df['Therapist'] = df['Therapist'].str.lower() # find and replace nicknames with domain knowledge df = df.replace('abby thomson', 'abigail thomson') # Drop Address_City and Addres_State Columns from Dataframe df.drop(['Address_City', 'Address_State', 'Invoice_Category'], axis=1, inplace=True) # Drop rows without a clientID df = df.dropna() return df
0db35bdd25fcba3eb3c366bb1830d8402e019d9c
23,962
def standard_dict(text): """Count with standard dict. """ d = {} for key in text: d.setdefault(key, 0) d[key] += 1 return d
9cd2a00e889cd28df7133fb0ea29480611bbe851
23,965
def a_send_line(text, ctx): """Send text line to the controller followed by `os.linesep`.""" ctx.ctrl.sendline(text) return True
79f40657fe2b346c695d808bb3cdc7650077b76e
23,966
def invertMask(mask): """ Inverts a numpy binary mask. """ return mask == False
f6a668a9b2f0928e2a71dc7e4de4d1e04cf307de
23,978
def _counter(metric): """Create a signal handler that counts metrics""" def signal(sender, **kwargs): metric.inc(job=sender.name) return signal
5eac04e45fe3d992d26b576e81b891416bb0bcef
23,979
def get_full_name_from_any_step( step): """Gets the full name of a protobuf from a google.protobuf.Any step. An any step is of the form (foo.com/bar.Baz). In this case the result would be bar.Baz. Args: step: the string of a step in a path. Returns: the full name of a protobuf if the step is an any step, or None otherwise. """ if not step: return None if step[0] != "(": return None if step[-1] != ")": return None step_without_parens = step[1:-1] return step_without_parens.split("/")[-1]
2be9ac75331947b4fa89cdb51bc62cc41c1935e5
23,981
import inspect def configurable(pickleable: bool = False): """Class decorator to allow injection of constructor arguments. Example usage: >>> @configurable() ... class A: ... def __init__(self, b=None, c=2, d='Wow'): ... ... >>> set_env_params(A, {'b': 10, 'c': 20}) >>> a = A() # b=10, c=20, d='Wow' >>> a = A(b=30) # b=30, c=20, d='Wow' Args: pickleable: Whether this class is pickleable. If true, causes the pickle state to include the constructor arguments. """ # pylint: disable=protected-access,invalid-name def cls_decorator(cls): assert inspect.isclass(cls) # Overwrite the class constructor to pass arguments from the config. base_init = cls.__init__ def __init__(self, *args, **kwargs): if pickleable: self._pkl_env_args = args self._pkl_env_kwargs = kwargs base_init(self, *args, **kwargs) cls.__init__ = __init__ # If the class is pickleable, overwrite the state methods to save # the constructor arguments if pickleable: # Use same pickle keys as gym.utils.ezpickle for backwards compat. PKL_ARGS_KEY = '_ezpickle_args' PKL_KWARGS_KEY = '_ezpickle_kwargs' def __getstate__(self): return { PKL_ARGS_KEY: self._pkl_env_args, PKL_KWARGS_KEY: self._pkl_env_kwargs, } cls.__getstate__ = __getstate__ def __setstate__(self, data): saved_args = data[PKL_ARGS_KEY] saved_kwargs = data[PKL_KWARGS_KEY] inst = type(self)(*saved_args, **saved_kwargs) self.__dict__.update(inst.__dict__) cls.__setstate__ = __setstate__ return cls # pylint: enable=protected-access,invalid-name return cls_decorator
8c037f92f070d36ecb9c1efa3586cb3e4a8a7a09
23,982
def getSchedule(filename): """ Expects a single line of the form (1,4,3,2,6,4,5,0,8) """ with open(filename) as filly: line = filly.readlines()[0] line = line.replace('(', ' ').replace(')', ' ').replace(',', ' ') return [int(x) for x in line.strip().split()]
0435cb08093a262b1e6c2224998b361b3fc77853
23,983
def highest_sum_list(lst: list, n: int) -> int: """ Returns highest sum for n consecutive numbers in a list Using sliding window technique that is an O(n) in time complexity Args: lst (list): list of ints n (int): n, consecutive nums Returns: highest sum for n consecutive numbers """ initial = sum(lst[:n]) max_num = initial i = 0 while i+n < len(lst): max_num = max_num - lst[i] + lst[i+n] i += 1 return max_num
fa453d717ffbfbceb47effc6559bd22a4d7542ba
23,985
def writedataset(h5group, name, data, overwrite=True): """ Write a dataset to a hdf5 file. The `overwrite` parameter controls the behaviour when the dataset already exists. """ if name in h5group: if overwrite: del h5group[name] else: return False h5group[name] = data return True
ae53c15155b79e850bc01a4ddadc6dc23adec7ea
23,987
def get_url_and_type(xml): """ Gets the URL of the audio file, and it's type """ attr = xml.find('channel').find('item').find('enclosure').attrib return attr['url'], attr['type']
5e6feacb59edaaca0192342e4c1424c8d23f2bfe
23,993
def read_problem(filename): """ Read a 0hh1 puzzle from file <filename> Takes a filename, and returns a problem dictionary with the following fields: - 'size': the number of rows/columns (assumed to be the same; an error will be raised if there is a size mismatch) - 'variables': the puzzle itself, a dictionary with (row, column) tuples as keys and the corresponding value 'r', 'b', or '.' - 'state': initially 'unsolved', to be updated by other methods """ with open(filename, 'r') as problem_file: problem = [['rb' if c == '.' else c for c in line if c in '.rb'] for line in problem_file] size = len(problem) assert all(len(v) == size for v in problem) cells = {(r, c): problem[r][c] for r in range(size) for c in range(size)} problem_dict = {'size': size, 'variables': cells, 'state': 'unsolved'} return problem_dict
554f97862ddc56337a0ba786052a28955c10b03a
23,997
import stat def s_isdev(mode): """ Check to see if a file is a device. """ return stat.S_ISBLK(mode) | stat.S_ISCHR(mode)
9caea3ac877f8b745c6c24487ad26f105ef99ca1
23,998
def in_dict(obj, key, default=False): """ Returns true if key exists in obj.__dict__; false if not in. If obj.__dict__ is absent, return default """ return (key in obj.__dict__) if getattr(obj, '__dict__', None) else default
905bc7afe9677f5f1dd70a1c6ec296df608cc6c3
24,000
def normalize_min_max(img, a=0.1, b=0.9): """ Normalize the image with Min-Max scaling to a range of [a, b] Args: img: The image to be normalized Returns: Normalized image data """ # Assume image data is in grayscale, with the current values in # the range [0, 255] (uint8). x_min = 0 x_max = 255 # x' = a + ((x - x_min) * (b - a)) / (x_max - x_min) normalized_img = a + ((img - x_min) * (b - a)) / (x_max - x_min) return normalized_img
0772ce27ca4a7cc75df2ae2d40e37f638b43359f
24,002
import threading def wait_for_events(completion_queues, deadline): """ Args: completion_queues: list of completion queues to wait for events on deadline: absolute deadline to wait until Returns: a sequence of events of length len(completion_queues). """ results = [None] * len(completion_queues) lock = threading.Lock() threads = [] def set_ith_result(i, completion_queue): result = completion_queue.next(deadline) with lock: results[i] = result for i, completion_queue in enumerate(completion_queues): thread = threading.Thread(target=set_ith_result, args=[i, completion_queue]) thread.start() threads.append(thread) for thread in threads: thread.join() return results
fd172ade97f78a538bcd9e6c0c95f5ee2102688c
24,010
def analyse(text): """ Return a dictionary with the number of occurrences for each character in the given string. """ frequencies = {} for c in text: if c in frequencies: frequencies[c] += 1 else: frequencies[c] = 1 return frequencies
5f4cabd36ab090ba3f26dab89851947ac8f70616
24,012
def norm_vec(lst, idx): """Get the normal vector pointing to cube at idx (from previous cube).""" assert len(lst)>1 and 0 < idx < len(lst) return [a-a1 for (a,a1) in zip(lst[idx], lst[idx-1])]
76fb86ac29b2cd8b5ceab0ba83b9bb8360914764
24,016
def end_chat(input_list): """Function to end the chat. Parameters ---------- input_list : list List that contains the input that the user input to the chatbot. Returns ------- output : bool Returns True to end chat or False to continue chat. """ if str('quit') in input_list: output = True return output else: output = False return output
cf9a592af3981706c04aa97f4519a4d58a1d87af
24,017
def isint(s): """ Is the given string an integer?""" try: i = int(s) except ValueError: return False return True
fc734c88031b2d0aa1c60b59d0605276b0215a83
24,025
def val_proto_to_python(msg): """ Converts a `protobuf` `Value` `Message` object into a Python variable. Parameters ---------- msg : google.protobuf.struct_pb2.Value `protobuf` `Value` `Message` representing a variable. Returns ------- one of {None, bool, float, int, str} Python variable represented by `msg`. """ value_kind = msg.WhichOneof("kind") if value_kind == "null_value": return None elif value_kind == "bool_value": return msg.bool_value elif value_kind == "number_value": return int(msg.number_value) if msg.number_value.is_integer() else msg.number_value elif value_kind == "string_value": return msg.string_value elif value_kind == "list_value": return [val_proto_to_python(val_msg) for val_msg in msg.list_value.values] elif value_kind == "struct_value": return {key: val_proto_to_python(val_msg) for key, val_msg in msg.struct_value.fields.items()} else: raise NotImplementedError("retrieved value type is not supported")
4dc710483a599ba16b6e27e0f1af6431c454a632
24,031
def shed(data): """ Drops columns that have been encoded """ data = data.drop(['Gender','Education','Married','Self_Employed'],axis=1) return data
a50b8974d09d04d38497d87e3c4c96c05bf973ea
24,032
import hashlib def file_hash(filename): """Calculates the hash of an entire file on disk Args: filename (str) Location of file on disk Returns: hash (str) SHA256 checksum """ hash_sha256 = hashlib.sha256() with open(filename, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest()
cebd76ad6dec53056ff829dacb5df6fee639ee10
24,034
def argsort_list(seq): """Returns indices such that the list is sorted. Example: argsort_list([2, 1, 3]) = [1, 0, 2]. """ return sorted(range(len(seq)), key=seq.__getitem__)
25e598b3771f1aae0b28dff83909705f9bd76b3d
24,036
def make_mgt_byte(service, sync=False): """ Creates the management byte according to the protocol :param service: Service code as defined in :obj:`~.Service` :param sync: boolean if synchronized mode should be used :return: integer """ msg = service + 3*16 if sync: msg += 64 return msg
2e69845f9fd19d03013f39eb0a74e0b5f0c0ea7e
24,037
def fuzzy_match(pattern, instring, adj_bonus=5, sep_bonus=10, camel_bonus=10, lead_penalty=-3, max_lead_penalty=-9, unmatched_penalty=-1): """Return match boolean and match score. :param pattern: the pattern to be matched :type pattern: ``str`` :param instring: the containing string to search against :type instring: ``str`` :param int adj_bonus: bonus for adjacent matches :param int sep_bonus: bonus if match occurs after a separator :param int camel_bonus: bonus if match is uppercase :param int lead_penalty: penalty applied for each letter before 1st match :param int max_lead_penalty: maximum total ``lead_penalty`` :param int unmatched_penalty: penalty for each unmatched letter :return: 2-tuple with match truthiness at idx 0 and score at idx 1 :rtype: ``tuple`` """ score, p_idx, s_idx, p_len, s_len = 0, 0, 0, len(pattern), len(instring) prev_match, prev_lower = False, False prev_sep = True # so that matching first letter gets sep_bonus best_letter, best_lower, best_letter_idx = None, None, None best_letter_score = 0 matched_indices = [] while s_idx != s_len: p_char = pattern[p_idx] if (p_idx != p_len) else None s_char = instring[s_idx] p_lower = p_char.lower() if p_char else None s_lower, s_upper = s_char.lower(), s_char.upper() next_match = p_char and p_lower == s_lower rematch = best_letter and best_lower == s_lower advanced = next_match and best_letter p_repeat = best_letter and p_char and best_lower == p_lower if advanced or p_repeat: score += best_letter_score matched_indices.append(best_letter_idx) best_letter, best_lower, best_letter_idx = None, None, None best_letter_score = 0 if next_match or rematch: new_score = 0 # apply penalty for each letter before the first match # using max because penalties are negative (so max = smallest) if p_idx == 0: score += max(s_idx * lead_penalty, max_lead_penalty) # apply bonus for consecutive matches if prev_match: new_score += adj_bonus # apply bonus for matches after a separator if prev_sep: new_score += sep_bonus # apply bonus across camelCase boundaries if prev_lower and s_char == s_upper and s_lower != s_upper: new_score += camel_bonus # update pattern index iff the next pattern letter was matched if next_match: p_idx += 1 # update best letter match (may be next or rematch) if new_score >= best_letter_score: # apply penalty for now-skipped letter if best_letter is not None: score += unmatched_penalty best_letter = s_char best_lower = best_letter.lower() best_letter_idx = s_idx best_letter_score = new_score prev_match = True else: score += unmatched_penalty prev_match = False prev_lower = s_char == s_lower and s_lower != s_upper prev_sep = s_char in '_ ' s_idx += 1 if best_letter: score += best_letter_score matched_indices.append(best_letter_idx) return p_idx == p_len, score
5d5f4164f6230161842f37b29f03758753f36ef6
24,038
def add_lists(list1, list2): """ Add list1 and list2 and remove any duplicates. Example: list1=[1,2,3,4] list2=[3,4,5,6] add_lists(list1, list2) = [1, 2, 3, 4, 5, 6] :param list1: input list 1 :param list2: input list 2 :return: added lists with removed duplicates """ return list1 + list(set(list2) - set(list1))
fc3f024ddbe0de0b6c5cfb92c173fedade0b1f54
24,039
def group(iterable, n_groups): """Group a list into a list of lists This function can be used to split up a big list items into a set to be processed by each worker in an MPI scheme. Parameters ---------- iterable : array_like A list of elements n_groups : int The number of groups you'd like to make Returns ------- groups : list A list of lists, where each element in `groups` is a list of approximately `len(iterable) / n_groups` elements from `iterable` See Also -------- interweave : inverse of this operation """ return [iterable[i::n_groups] for i in range(n_groups)]
bb981dbc8f1fc8034e2b672652c63d6af7de8f5b
24,040
def emails_parse(emails_dict): """ Parse the output of ``SESConnection.list_verified_emails()`` and get a list of emails. """ return sorted([email for email in emails_dict['VerifiedEmailAddresses']])
db5b1935057367bdacd923e1f27557727f1b9a00
24,050
def create_datafile_url(base_url, identifier, is_filepid): """Creates URL of Datafile. Example - File ID: https://data.aussda.at/file.xhtml?persistentId=doi:10.11587/CCESLK/5RH5GK Parameters ---------- base_url : str Base URL of Dataverse instance identifier : str Identifier of the datafile. Can be datafile id or persistent identifier of the datafile (e. g. doi). is_filepid : bool ``True`` to use persistent identifier. ``False``, if not. Returns ------- str URL of the datafile """ assert isinstance(base_url, str) assert isinstance(identifier, str) base_url = base_url.rstrip("/") if is_filepid: url = "{0}/file.xhtml?persistentId={1}".format(base_url, identifier) else: url = "{0}/file.xhtml?fileId={1}".format(base_url, identifier) assert isinstance(url, str) return url
d6deece390b3028f45b27a0f6aa1864688ed97fa
24,053
def suffix_all_lines(s, suffix): """ Returns 's', with 'suffix' appended to all lines. If the last line is empty, suffix is not appended to it. (If s is blank, returns s unchanged.) """ split = s.split('\n') last = split.pop() final = [] for line in split: final.append(line) final.append(suffix) final.append('\n') if last: final.append(last) final.append(suffix) return ''.join(final)
fe43cb9039254a7acf7cb11375356ef2fc1c01b4
24,059
def safe_float(x): """Returns the value of x converted to float, if fails return None. """ try: return float(x) except (ValueError, TypeError): return None
a492ce03eeb15c4fe192b56dc447a97fff94b03e
24,061
def string_to_scopes(scopes): """Converts stringifed scopes value to a list. Args: scopes (Union[Sequence, str]): The string of space-separated scopes to convert. Returns: Sequence(str): The separated scopes. """ if not scopes: return [] return scopes.split(' ')
ba304ebc17cd52739809d84c0617cf853de6d21b
24,064
def calculate_total_bill(subtotal): """ (float) -> float subtotal is passed through as an input HST_RATE variable in this function is multiplied by inputted variable Function returns the resulting variable "total", rounded and formatted to 2 decimal points. Variable "total" is then rounded to the nearest 5 cents using the following nickel rounding scheme standard rules in Canada: 0.01 to 0.02 will round down to 0.00. 0. 03 to 0.04 will round up to 0.05. 0.06 to 0.07 will round down to 0.05. 0.08 to 0.09 will round up to 0.10 >>> calculate_total_bill(3.0) 3.40 >>> calculate_total_bill(6.67) 7.55 >>> calculate_total_bill(2.05) 2.30 """ HST_RATE = 1.13 total_bill = subtotal *HST_RATE return format(round(0.05 * round(float(total_bill)/0.05), 2), '.2f')
6335c9e85e37e6d897eaa48ad09557b5d77d2e1b
24,066
import multiprocessing def consumer_process_factory(kafka_bootstrap_servers): """ Use this "factory as a fixture" to create multiprocessing.Process running a Kafka consumer polling loop in a test function. Parameters ---------- kafka_bootstrap_servers: pytest fixture (str) comma-separated list of host:port Returns ------- _consumer_process_factory : function(consumer_factory, topics, group_id, consumer_config, document_queue, **kwargs) factory function returning a multiprocessing.Process that will run a Kafka consumer polling loop """ def _consumer_process_factory( consumer_factory, topics, group_id, consumer_config, document_queue, **kwargs ): """ Parameters ---------- consumer_factory : function(topics, group_id, consumer_config, **kwargs) a factory function (or callable) returning a BlueskyConsumer-like object topics : list of str, required the underlying Kafka consumer will subscribe to the specified topics group_id : str, optional the underlying Kafka consumer will have the specified group_id, "pytest" by default consumer_config : dict, optional the underlying Kafka consumer will be created with the specified configuration parameters document_queue : multiprocessing.Queue the underlying Kafka consumer will place documents it receives in this queue kwargs kwargs will be passed to the consumer_factory to be used in constructing the underlying Kafka consumer Returns ------- document_queue: multiprocessing.Queue this queue will contain bluesky (name, document) pairs that were delivered to the underlying Kafka consumer """ if consumer_config is None: consumer_config = { # it is important to set a short time interval # for automatic commits or the Kafka broker may # not be notified by the consumer that messages # were received before the test ends; the result # is that the Kafka broker will try to re-deliver # those messages to the next consumer that subscribes # to the same topic(s) "auto.commit.interval.ms": 100, } # this function will run in the external process created below def start_consumer_with_queue(document_queue_): logger = multiprocessing.get_logger() logger.warning("constructing consumer process with inter-process queue") def put_document_in_queue(consumer, topic, name, doc): logger.warning("BlueskyConsumer putting %s in queue", name) document_queue_.put((name, doc)) # it is important the BlueskyConsumer be # constructed in the external process bluesky_consumer_ = consumer_factory( topics=topics, bootstrap_servers=kafka_bootstrap_servers, group_id=group_id, consumer_config=consumer_config, process_document=put_document_in_queue, **kwargs, ) # consume messages published by a Kafka broker bluesky_consumer_.start() # create an external process for the bluesky_kafka.BlueskyConsumer polling loop # but do not start it, the client of this function will start the process consumer_process = multiprocessing.Process( target=start_consumer_with_queue, args=(document_queue,), daemon=True, ) return consumer_process return _consumer_process_factory
90222728439dc071a39afa72b83c66c757b0a2a7
24,069
def pil_to_rgb(pil): """Convert the color from a PIL-compatible integer to RGB. Parameters: pil: a PIL compatible color representation (0xBBGGRR) Returns: The color as an (r, g, b) tuple in the range: the range: r: [0...1] g: [0...1] b: [0...1] >>> '(%g, %g, %g)' % pil_to_rgb(0x0080ff) '(1, 0.501961, 0)' """ r = 0xff & pil g = 0xff & (pil >> 8) b = 0xff & (pil >> 16) return tuple((v / 255.0 for v in (r, g, b)))
b6c2c3f150ddf2e82120febc676e400582d7db48
24,070
def get_instance(instances, field, value): """Given a list of instances (dicts), return the dictionary where field == value.""" for inst in instances: if inst[field] == value: return inst assert False, "Value '{}' for field '{}' was not found in instances!".format(value, field)
56b79d142885c54a356709e99d9333476bbc1c43
24,072
def version_number_to_array(version_num): """Given a version number, return an array of the elements, as integers.""" return [int(x) for x in version_num.split(".")]
b55605238bd0b534c92c7867f5f2257226e8382f
24,076
def get_lr(optimizer): """ Return current learning rate from optimiser function * :param optimiser(torch.optim): Optimiser function :return (float): Learning rate at current epoch """ for param_group in optimizer.param_groups: return param_group['lr']
d35703b89e07bce65113135c78733b282eebffd4
24,077
def dedup(lst): """Remove duplicates from lst""" result = [] repeated = set() for element in lst: if element not in repeated: result.append(element) repeated.add(element) return result
367640c9f4795ddc9ee2d7db1d88755a9b901290
24,078
def comma(text): """Split a string into components by exploding on a comma""" components = (el.strip() for el in text.split(',')) return tuple(filter(bool, components))
ebfd1d0fe7be436a93973e4ee14f7565414cd5f3
24,084
def get_column(puzzle: str, col_num: int) -> str: """Return column col_num of puzzle. Precondition: 0 <= col_num < number of columns in puzzle >>> get_column('abcd\nefgh\nijkl\n', 1) 'bfj' """ puzzle_list = puzzle.strip().split('\n') column = '' for row in puzzle_list: column += row[col_num] return column
4d4dd5c6d6345786e4ef0f41c4748e47d1231603
24,088
def _convert_boolean(value): """Convert a string to a boolean value the same way the server does. This is called by the transform_parameter_value function and shouldn't be called directly. Args: value: A string value to be converted to a boolean. Returns: True or False, based on whether the value in the string would be interpreted as true or false by the server. In the case of an invalid entry, this returns False. """ if value.lower() in ('1', 'true'): return True return False
0c30185b8745be6b431baab6d6e4331c608c93cb
24,089
import click def prompt_tag_selection(tags): """Prompt user to chose a tag or <HEAD>.""" # Prompt user to select a tag to export tags = sorted(tags, key=lambda t: t.created) text_prompt = 'Tag to export: \n\n<HEAD>\t[1]\n' text_prompt += '\n'.join( '{}\t[{}]'.format(t.name, i) for i, t in enumerate(tags, start=2) ) text_prompt += '\n\nTag' selection = click.prompt( text_prompt, type=click.IntRange(1, len(tags) + 1), default=1 ) if selection > 1: return tags[selection - 2] return None
913ebd41f23905acf967f466910966e435d957e1
24,092
def _status_code_for_git_status_line(status_line): """Returns the XY status git status. Git status char ' ' (empty space) is replaced with an underscore per the MergeFile status spec above. See `git status --help` for details. """ assert len(status_line) >= 2, status_line return status_line[:2].replace(" ", "_")
51e802e4f6aded85d5344eba89bb0f20af192d7f
24,093
def open_file(fname): """Open <fname> and return its contents.""" with open(fname, "r") as reader: data = reader.read() return data
c08c02414fcacdffc7fc1391ee9201a6a0151c5a
24,096
def abi_crs(G, reference_variable="CMI_C01"): """ Get coordinate reference system for the Advanced Baseline Imager (ABI). Parameters ---------- G : xarray.Dataset An xarray.Dataset to derive the coordinate reference system. reference_variable : str A variable in the xarray.Dataset to use to parse projection from. Returns ------- Three objects are returned 1. cartopy coordinate reference system 2. data projection coordinates in x direction 3. data projection coordinates in y direction """ # We'll use the `CMI_C01` variable as a 'hook' to get the CF metadata. dat = G.metpy.parse_cf(reference_variable) crs = dat.metpy.cartopy_crs # We also need the x (north/south) and y (east/west) axis sweep of the ABI data x, y = (dat.x, dat.y) return crs, x, y
b420ea4cda12f1acd4b9af6ecd9c2f70c756f761
24,097
def EncodePublic(pk): """ Encode a public key into bytes. """ return bytes(pk)
a4dcc174f12fa78b3ec425b4129c038d51312fea
24,098
def _without_otter_metadata(metadata): """ Return a copy of the metadata with all the otter-specific keys removed. """ meta = {k: v for (k, v) in metadata.get('metadata', {}).iteritems() if not (k.startswith("rax:auto_scaling") or k.startswith("rax:autoscale:"))} return {'metadata': meta}
b35a10d3961c1885df4ae9a25b39c639d4f77b9c
24,101
import math def griewank(phenome): """The bare-bones Griewank function.""" ssum = 0.0 product = 1.0 for i in range(len(phenome)): ssum += phenome[i] ** 2 / 4000.0 product *= math.cos(phenome[i] / math.sqrt(i + 1.0)) return ssum - product + 1.0
b2f5ad697051b3d1e491da9c159aafa2faa1cc6d
24,111
def tx_vport(b2b_raw_config_vports): """Returns a transmit vport""" return b2b_raw_config_vports.ports[0]
b2b6629a868d1ee75566a2c86acf5e38c75e992c
24,114
import json def get_hosts_deployed(cls): """ Method to get all the hosts deployed in the cluster Args: cls: cephadm instance object Returns: List of the names of hosts deployed in the cluster """ out, _ = cls.shell(args=["ceph", "orch", "host", "ls", "-f", "json"]) hosts = list() host_obj = json.loads(out) for host in host_obj: hosts.append(host["hostname"]) return hosts
221bd1a437bb4a271c1b85af9f80de5cbf99884c
24,116
def initial_status_ok (db, status_id, cat_id, is_simple) : """ Allow "escalated" when submitting new issue. This is allowed in case the analysis was done in advance. The first (submission) message should contain the analysis results. Allow "open" when submitting new non-cert issue. This is allowed in case the analysis and decision about implementation was made in advance, the first message should contain the analysis results and decision. Allow open for simple kind (the ones using simple_transitions). """ status = db.status.get (status_id, 'name') if status == 'escalated' and not is_simple : return True if status == 'open' and is_simple : return True if ( status == 'open' and cat_id and db.category.get (cat_id, 'valid') and not db.category.get (cat_id, 'cert_sw') ) : return True return False
f2e28779258a5f1f590e6cf2a2e9a26a415c16b7
24,117
import json def process_synonyms(genes, synonyms_file): """Process a list of genes into a dictionary with synonyms The user may select only a subset of genes to look for, rather than the entire dictionary. This function creates a new dictionary for the genes used by the user. Returns: dict: Dictionary containing the gene names provided as keys, and the synonyms as values. """ with open(synonyms_file) as f: gene_to_syns = json.load(f) target_to_syns = {} targets_lower = [target.lower() for target in genes] for symbol, synonyms in gene_to_syns.items(): if symbol in genes: target_to_syns[symbol] = synonyms else: for synonym in synonyms: if synonym.lower() in targets_lower: target_to_syns[synonym] = synonyms target_to_syns[synonym].append(symbol) return target_to_syns
f2452464eb58e70487c8f0fbf3159dcb8a7a0d75
24,120
from typing import Any import inspect from pydantic import BaseModel # noqa: E0611 def _is_basemodel(obj: Any) -> bool: """Checks if object is a subclass of BaseModel. Args: obj (Any): Any object Returns: bool: Is a subclass of BaseModel. """ return inspect.isclass(obj) and issubclass(obj, BaseModel)
30027d6e5ce9f685c4b227ef3c2f83d76a963da7
24,121
import statistics def median(wm): """ Median Operator wm = list of importance weighted membership values returns the middle value in the set """ return statistics.median(wm)
bee3aa1f17d943ed1951277c17eff958a6b16e12
24,123
def title(info): """ concatenate page title from info dict """ version = info['node']['version'] try: # perhaps cut away "-stable-316fc7ec" version = version.split("-")[0] except: pass T = "(%s) %s %s with %d txs: %.1f TPS" T = T % (info['diagrams']['prefix'], info['node']['name'], version, info['send']['num_txs'], info['diagrams']['blocktimestampsTpsAv']) return T
94d1116bfedc2b9f2585ba6479e2cb04a1a05b70
24,130
def has_duplicates(t): """Takes a list returns True if any element appears more than once, otherwise False""" seen_letters = [] for i in t: if i in seen_letters: return True else: seen_letters.append(i) return False
dc1316d94ed510eaa468feb26a62ae9afd6cfbc2
24,131
def rational_polynomial2(data): """Rational polynomial benchmark function. .. list-table:: :widths: 10 50 :stub-columns: 1 * - Range - :math:`\mathbf{x} \in [0, 6]^2` * - Function - :math:`f(\mathbf{x}) = \\frac{(x_1 - 3)^4 + (x_2 - 3)^3 - (x_2 - 3)}{(x_2 - 2)^4 + 10}` """ return ((data[0] - 3)**4 + (data[1] - 3)**3 - (data[1] - 3)) / ((data[1] - 2)**4 + 10)
99d7802acf72ee57421d543c3a324958e7bb5281
24,132
def split_join_text(text, n, prefix, joiner): """Splits the text into chunks that are n characters long, then joins them up again.""" return joiner.join(f"{prefix}{text[i:i+n]}" for i in range(0, len(text), n))
a44c5944c6718360af287db2b2520ebd643aaf76
24,141
def moving_average(input_list): """Compute the moving average with window size of 3 Args: input_list (list of floats of ints): list of numbers Returns: list of floats or ints: moving average with window size of 3 Example: moving_average([2,3,4,5,6]) = [3,4,5] """ N = 3 output = [] for i in range(len(input_list)): if i < N-1: continue else: tmp_sum = 0 for k in range(N): tmp_sum+= input_list[i-k] output.append(tmp_sum/N) return output
e0412ffb9c348c60c0e471405708757cd7917cfe
24,145
import socket def get_container_hostname(device): """Maps a device to its container hostname.""" this_host = socket.gethostname().split('.')[0] if device.physical_port is not None: return '%s--device%d' % (this_host, device.physical_port) else: return '%s--%s' % (this_host, device.serial)
c1ee92ddc7dc2fd310b56a657098f42e9645abd5
24,158
def object_list_check_any_has_attribute(object_list,attr_name): """ check if any object in the list has the attribute. """ unique = False for obj in object_list: if hasattr(obj,attr_name): unique = True break else: pass return unique
cac90dab958b4bfc25493a2dba86b72454230222
24,162
import re def file_to_array(file_name): """Read file and extract lines to list. Parameters ---------- file_name : str Path of file to read. Returns ------- array : list List of lines contained in file. """ with open(file_name, 'r') as f: array = [line.strip() for line in f] array = [line for line in array if line] array = [line for line in array if not re.match('\#',line)] return array
762e96b2098d6a5435af549d2a72c3033d12ae33
24,166
def dotProduct(D1, D2): """ Returns the dot product of two documents """ Sum = 0.0 for key in D1: if key in D2: Sum += (D1[key] * D2[key]) return Sum
d3d81029a85d34269de3454511a193aa8e433370
24,167
def normal_from_lineseg(seg): """ Returns a normal vector with respect to the given line segment. """ start, end = seg x1, y1 = start x2, y2 = end dx = x2 - x1 dy = y2 - y1 return (dy, -dx)
a64d18a9bd82ee8a7f28ac96f84403bac4ea2981
24,168
def escape_backslash(s: str) -> str: """Replaces any \\ character with \\\\""" return s.replace('\\', '\\\\')
e185c40d4cfe52eeed77e5fab2ee40a15c6a760c
24,169
def is_collection(v): """ Decide if a variable contains multiple values and therefore can be iterated, discarding strings (single strings can also be iterated, but shouldn't qualify) """ # The 2nd clause is superfluous in Python 2, but (maybe) not in Python 3 # Therefore we use 'str' instead of 'basestring' return hasattr(v,'__iter__') and not isinstance(v,str)
e51ee293566e0be9f7143524abb055da0e35671e
24,170
def format_node(cluster_name, node): """Formats a string representation of a node.""" return '<{0}[{1}]>'.format(cluster_name, node)
7a8b35dd2d8845d3fdf9484769d8ee2db819aaa6
24,171
import hashlib def sha256(s): """ Return the SHA256 HEX digest related to the specified string. """ m = hashlib.sha256() m.update(bytes(s,"utf-8")) return m.hexdigest()
e868e345cf942127c1bbd0b56402d3a007bb8ff6
24,173