content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def parse_flattened_result_only_intent(to_parse): """ Parse out the belief state from the raw text. Return an empty list if the belief state can't be parsed Input: - A single <str> of flattened result e.g. 'User: Show me something else => Belief State : DA:REQUEST ...' Output: - Parsed result in a JSON format, where the format is: [ { 'act': <str> # e.g. 'DA:REQUEST', 'slots': [ <str> slot_name, <str> slot_value ] }, ... # End of a frame ] # End of a dialog """ def parse_intent(intent_str): pos_list = [] for i in range(len(intent_str)): for j in range(i + 1, min(len(intent_str), i + 4)): sub_str = intent_str[i:j + 1] if sub_str == 'da:' or sub_str == 'err:': pos_list.append(i) break if not pos_list or len(pos_list) == 1: return [intent_str] return [intent_str[:pos] for pos in pos_list[1:]] + [intent_str[pos_list[-1]:]] belief = [] # Parse # to_parse: 'DIALOG_ACT_1 : [ SLOT_NAME = SLOT_VALUE, ... ] ...' to_parse = to_parse.strip() intents = parse_intent(to_parse) for idx, dialog_act in enumerate(intents): d = dict() d['act'] = dialog_act.replace('.', ':') d['slots'] = [] if d != {}: belief.append(d) return belief
05c7346197fa3f98f1c4194ea7d92622483d0f51
7,541
import re def strip_outer_dollars(value): """Strip surrounding dollars signs from TeX string, ignoring leading and trailing whitespace""" if value is None: return '{}' value = value.strip() m = re.match(r'^\$(.*)\$$', value) if m is not None: value = m.groups()[0] return value
3ad283af2835ba2bcf2c57705f2faa9495ea4e7a
7,543
import zipfile def unzip(zip_address, file_name, encoding='UTF-8'): """解压zip数据包 :param zip_address: 压缩包的地址 :param file_name: 压缩包里面文件的名字 :param encoding: 文件的编码 :return: 压缩包里面的数据:默认编码的UTF-8 """ f = zipfile.ZipFile(zip_address) fp = f.read(file_name) lines = fp.decode(encoding) return lines
ba71fd6f923c2c410b0461796da513583c15b9aa
7,551
import logging import json def read_json_file_into_memory(json_file): """ Purpose: Read properly formatted JSON file into memory. Args: json_file (String): Filename for JSON file to load (including path) Returns: json_object (Dictonary): Dictonary representation JSON Object Examples: >>> json_file = 'some/path/to/file.json' >>> json_object = read_json_file_into_memory(json_file) >>> print(json_object) >>> { >>> 'key': 'value' >>> } """ logging.info(f"Reading JSON File Into Memory: {json_file}") try: with open(json_file, "r") as json_file_obj: return json.load(json_file_obj) except Exception as err: logging.exception(f"Cannot Read json into memory ({json_file}): {err}") raise err
70c2e6ab6180700ce77469b8afa0d0df8e0eee95
7,555
import torch def dist(batch_reprs, eps = 1e-16, squared=False): """ Efficient function to compute the distance matrix for a matrix A. Args: batch_reprs: vector representations eps: float, minimal distance/clampling value to ensure no zero values. Returns: distance_matrix, clamped to ensure no zero values are passed. """ prod = torch.mm(batch_reprs, batch_reprs.t()) norm = prod.diag().unsqueeze(1).expand_as(prod) res = (norm + norm.t() - 2 * prod).clamp(min = 0) if squared: return res.clamp(min=eps) else: return res.clamp(min = eps).sqrt()
be4d50e35ed11255eef2dc1acb4645de1453964c
7,559
def unquote(s): """Strip single quotes from the string. :param s: string to remove quotes from :return: string with quotes removed """ return s.strip("'")
15d29698e6a3db53243fc4d1f277184958092bc6
7,561
def binary_to_decimal(binary): """ Converts a binary number into a decimal number. """ decimal = 0 index = 0 while binary > 0: last = binary % 10 binary = binary / 10 decimal += (last * (2 ** index)) index += 1 return decimal
f1efdf19c802345e6badfed430dd82e3f067a419
7,564
def namelist(names: list) -> str: """ Format a string of names like 'Bart, Lisa & Maggie' :param names: an array containing hashes of names :return: a string formatted as a list of names separated by commas except for the last two names, which should be separated by an ampersand. """ if not names: return "" names_list = [name['name'] for name in names] if len(names_list) == 1: return names_list[0] elif len(names_list) == 2: return '{} & {}'.format(names_list[0], names_list[1]) else: return ', '.join(names_list[:-1]) + ' & ' + names_list[-1]
b0b931f3f9365824931173c2aec3ac213342e0c3
7,565
import re def normalize_newlines(text): """Normalizes CRLF and CR newlines to just LF.""" # text = force_text(text) re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines return re_newlines.sub('\n', text)
835f038f0873546db96d8e9d2a0d622b4e974f7d
7,570
import math def calc_distance(x1, y1, x2, y2): """ Calculate the distance between 2 point given the coordinates :param x1: Point 1 X coordinate :param y1: Point 1 Y coordinate :param x2: Point 2 X coordinate :param y2: Point 2 Y coordinate :return: Double. Distance between point 1 and point 2 """ dx = x2-x1 dy = y2-y1 distance = math.hypot(dx, dy) return distance
dd143ed5dd088c8a4d205c47363cd463798796d9
7,581
def unit(value, unit, parenthesis=True): """Formats the numeric value of a unit into a string in a consistent way.""" formatted = f"{value:,g} {unit}" if parenthesis: formatted = f"({formatted})" return formatted
fe039ec681a16e4a317f1a500c29ab44615addd1
7,584
def normalize_val_list(val_list): """Returns a list of numeric values by the size of their maximum value.""" max_val = float(max(val_list)) return [ val/max_val if max_val != 0 else 0 for val in val_list ]
f594a6c281253bc181e436d1e1e3e1a83d07b56c
7,587
def sieve(iterable, indicator): """Split an iterable into two lists by a boolean indicator function. Unlike `partition()` in iters.py, this does not clone the iterable twice. Instead, it run the iterable once and return two lists. Args: iterable: iterable of finite items. This function will scan it until the end indicator: an executable function that takes an argument and returns a boolean Returns: A tuple (positive, negative), which each items are from the iterable and tested with the indicator function """ positive = [] negative = [] for item in iterable: (positive if indicator(item) else negative).append(item) return positive, negative
55f64a2aea55af05bd2139328157e3a877b8f339
7,589
import re def property_to_snake_case(property_type): """Converts a property type to a snake case name. Parameters ---------- property_type: type of PhysicalProperty of str The property type to convert. Returns ------- str The property type as a snake case string. """ if not isinstance(property_type, str): property_type = property_type.__name__ return re.sub(r"(?<!^)(?=[A-Z])", "_", property_type).lower()
fc0aa1c811a2de0bbd77f146a1816e8d7a31e08a
7,590
def read_binary(filepath): """return bytes read from filepath""" with open(filepath, "rb") as file: return file.read()
98587f79b5a2d8b8ff82909cb03957c0b3e2db7d
7,592
def sra_valid_accession(accession): """ Test whether a string is an SRA accession """ if accession.startswith('SRR') and len(accession) == 10: return True return False
3a4c5f40490f68490620ddacb430a0fcc8dfdd89
7,593
def alaska_transform(xy): """Transform Alaska's geographical placement so fits on US map""" x, y = xy return (0.3*x + 1000000, 0.3*y-1100000)
3f86caeee7b34295ce0680017dd70d6babea2a62
7,594
import re def remove_punctuation(line): """ 去除所有半角全角符号,只留字母、数字、中文 :param line: :return: """ rule = re.compile(u"[^a-zA-Z0-9\u4e00-\u9fa5]") line = rule.sub('', line) return line
d63f355c5d48ec31db0412dd7b12474bf9d6dd6b
7,615
def parse_int(text: str) -> int: """ Takes in a number in string form and returns that string in integer form and handles zeroes represented as dashes """ text = text.strip() if text == '-': return 0 else: return int(text.replace(',', ''))
1f49a2d75c2fadc4796456640e9999796fddfa93
7,620
def _get_object_url_for_region(region, uri): """Internal function used to get the full URL to the passed PAR URI for the specified region. This has the format; https://objectstorage.{region}.oraclecloud.com/{uri} Args: region (str): Region for cloud service uri (str): URI for cloud service Returns: str: Full URL for use with cloud service """ server = "https://objectstorage.%s.oraclecloud.com" % region while uri.startswith("/"): uri = uri[1:] return "%s/%s" % (server, uri)
72b069e1657b94c7800ba7e5fd2269909e83c856
7,621
def sequence_delta(previous_sequence, next_sequence): """ Check the number of items between two sequence numbers. """ if previous_sequence is None: return 0 delta = next_sequence - (previous_sequence + 1) return delta & 0xFFFFFFFF
8580c26d583c0a816de2d3dfc470274f010c347f
7,623
import re def find_matched_pos(str, pattern): """ Find all positions (start,end) of matched characters >>> find_matched_pos('ss12as34cdf', '\d') [(2, 3), (3, 4), (6, 7), (7, 8)] """ match_objs = re.finditer(pattern ,str) match_pos = [match_obj.span() for match_obj in match_objs] return match_pos
17e352299d7874bdfb66a4a181d04e90fba0af7e
7,628
def box_area(box): """ Calculates the area of a bounding box. Source code mainly taken from: https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/ `box`: the bounding box to calculate the area for with the format ((x_min, x_max), (y_min, y_max)) return: the bounding box area """ return max(0, box[0][1] - box[0][0] + 1) * max(0, box[1][1] - box[1][0] + 1)
3a52a41e8dc92d3a3a2e85a33a4ebcbbb7131091
7,629
def dsigmoid(sigmoid_x): """ dSigmoid(x) = Sigmoid(x) * (1-Sigmoid(x)) = Sigmoid(x) - Sigmoid(x)^2 """ return sigmoid_x - sigmoid_x**2
37e163e6baab1a2b584e9eef726895c919c80406
7,630
def progress_bar(iteration, total): """ entertain the user with a simple yet exquisitely designed progress bar and % :param iteration: :param total: :return: """ try: perc = round((iteration/total)*100, 2) bar = ('░'*19).replace('░','█',int(round(perc/5,0))) + '░' return str("{:.2f}".format(perc)).zfill(6) + '% [' + bar + ']' except ZeroDivisionError: # this happens in never versions of python. return nothing return ''
a32bd5edd108142583f0ea1573848c91c6d61c33
7,631
def get_min_freeboard(fjord): """ Get a required minimum median freeboard for filtering out icebergs """ minfree = {"JI": 15, "KB":5} try: return minfree.pop(fjord) except KeyError: print("The current fjord does not have a minimum freeboard median entry - using a default value!") return 10
b80fec4224dcf8ce0d24a19ca28e88e66db52150
7,636
def build_tag_regex(plugin_dict): """Given a plugin dict (probably from tagplugins) build an 'or' regex group. Something like: (?:latex|ref) """ func_name_list = [] for func_tuple in plugin_dict: for func_name in func_tuple: func_name_list.append(func_name) regex = '|'.join(func_name_list) return '(?:' + regex + ')'
367b95067cabd4dcdfd8981f6a14b650da3601c5
7,638
def is_html_like(text): """ Checks whether text is html or not :param text: string :return: bool """ if isinstance(text, str): text = text.strip() if text.startswith("<"): return True return False return False
a499e14c243fcd9485f3925b68a0cef75fa069cb
7,639
import importlib def import_optional_dependency(name, message): """ Import an optional dependecy. Parameters ---------- name : str The module name. message : str Additional text to include in the ImportError message. Returns ------- module : ModuleType The imported module. """ try: return importlib.import_module(name) except ImportError: raise ImportError( f"Missing optional dependency '{name}'. {message} " + f"Use pip or conda to install {name}." ) from None
22d638e86b8d979b746507790532273d161323c8
7,642
def curate_url(url): """ Put the url into a somewhat standard manner. Removes ".txt" extension that sometimes has, special characters and http:// Args: url: String with the url to curate Returns: curated_url """ curated_url = url curated_url = curated_url.replace(".txt", "") curated_url = curated_url.replace("\r", "") # remove \r and \n curated_url = curated_url.replace("\n", "") # remove \r and \n curated_url = curated_url.replace("_", ".") # remove "http://" and "/" (probably at the end of the url) curated_url = curated_url.replace("http://", "") curated_url = curated_url.replace("www.", "") curated_url = curated_url.replace("/", "") return curated_url
42ea836e84dfb2329dd35f6874b73ac18bde48d1
7,643
def patch_utils_path(endpoint: str) -> str: """Returns the utils module to be patched for the given endpoint""" return f"bluebird.api.resources.{endpoint}.utils"
31ae02bd0bee7c51ea5780d297427fdac63295b3
7,647
def rgbToGray(r, g, b): """ Converts RGB to GrayScale using luminosity method :param r: red value (from 0.0 to 1.0) :param g: green value (from 0.0 to 1.0) :param b: blue value (from 0.0 to 1.0) :return GreyScale value (from 0.0 to 1.0) """ g = 0.21*r + 0.72*g + 0.07*b return g
59a874d1458ae35e196e1ca0874b16eadfd1a434
7,650
def maximum(x, y): """Returns the larger one between real number x and y.""" return x if x > y else y
e98a4e720936a0bb271ee47c389811111fd65b00
7,652
def is_acceptable_multiplier(m): """A 61-bit integer is acceptable if it isn't 0 mod 2**61 - 1. """ return 1 < m < (2 ** 61 - 1)
d099dd53296138b94ca5c1d54df39b9cf7ad2b5d
7,653
def extractRSS_VSIZE(line1, line2, record_number): """ >>> extractRSS_VSIZE("%MSG-w MemoryCheck: PostModule 19-Jun-2009 13:06:08 CEST Run: 1 Event: 1", \ "MemoryCheck: event : VSIZE 923.07 0 RSS 760.25 0") (('1', '760.25'), ('1', '923.07')) """ if ("Run" in line1) and ("Event" in line1): # the first line event_number = line1.split('Event:')[1].strip() else: return False """ it's first or second MemoryCheck line """ if ("VSIZE" in line2) and ("RSS" in line2): # the second line RSS = line2.split("RSS")[1].strip().split(" ")[0].strip() #changed partition into split for backward compatability with py2.3 VSIZE = line2.split("RSS")[0].strip().split("VSIZE")[1].strip().split(" ")[0].strip() #Hack to return the record number instea of event number for now... can always switch back of add event number on top #return ((event_number, RSS), (event_number, VSIZE)) return ((record_number, RSS), (record_number, VSIZE)) else: return False
89c14bbb3a2238ec9570daf54248b4bd913eecca
7,654
def delete_redshift_cluster(config, redshift): """ Deletes the Redshift cluster specified in config Args: config: a ConfigParser object redshift: a boto3 client object for the AWS Redshift service """ try: print("Deleting Redshift Cluster: ", config['CLUSTER']['IDENTIFIER']) return redshift.delete_cluster( ClusterIdentifier=config['CLUSTER']['IDENTIFIER'], SkipFinalClusterSnapshot=True ) except Exception as e: print(e)
2267eb4f017354563c9a7cf047a3a84983cd0044
7,659
def add_trailing_load(axle_spacing, axle_wt, space_to_trailing_load, distributed_load, span1_begin, span2_end, pt_load_spacing=0.5): """Approximates the distributed trailing load as closely spaced point loads. The distributed trailing load is approximated as discretly spaced point loads. The point load spacing is assumed to be 0.5 unless the user specifically enters a different spacing. The number of loads to add is determined by dividing the total span length, span 1 plus span 2, by the point load spacing. Args: axle_spacing (list of floats): spacing of axles used for analysis axle_wt (list of floats): weight of axles used for analysis space_to_trailing_load (float): distance from last discrete axle to beginning of distributed load distributed_load (float): uniformly distributed trailing load magnitude span1_begin (float): coordinate location of beginning of span 1 span2_end (float): coordinate location of end of span 2 point_load_spacing (float, optional): spacing of approximate discretely spaced point loads, defaults to 0.5 Returns: axle_spacing (list of floats): user input axle spacing appended with axle spacing for discretely spaced loads to approximate the distributed load axle_wt (list of floats): user input axle weights appended with axle weights for discretely spaced loads to approximate the distributed load Notes: Based on testing it can be shown that a reasonable level of accuracy is found in the forces and reactions using a discrete point load spacing of 0.5. This spacing assumes the span lengths are entered in feet. If the user does not want to have a distributed load on the entire length of the bridge it is suggested that the actual axle spacing and axle weights of the trailing load are entered and no distributed load is specified. """ #approximate a distributed trailing load as closely spaced point loads #each point load is the distributed load times the point load spacing #the point load spacing is a function of the span lenght and number of #divisions required mod_axle_spacing = axle_spacing[:] mod_axle_wt = axle_wt[:] if space_to_trailing_load < 0.0: raise ValueError("Must enter a positive float for space to trialing" "load.") elif distributed_load < 0.0: raise ValueError("Must enter a positive float for distributed load.") elif pt_load_spacing <= 0.0: raise ValueError("Must enter a positive float (or nothing for default" "value of 0.5) for the point load spacing.") elif distributed_load != 0.0 and space_to_trailing_load != 0.0: total_span_length = span2_end - span1_begin num_loads = int(total_span_length/pt_load_spacing) equivalent_pt_load = distributed_load*pt_load_spacing mod_axle_spacing.append(space_to_trailing_load) mod_axle_wt.append(equivalent_pt_load) for x in range(num_loads): mod_axle_spacing.append(pt_load_spacing) mod_axle_wt.append(equivalent_pt_load) return mod_axle_spacing, mod_axle_wt
3eac900cff7d5e66c399e7f846d66aeff3e7389c
7,662
def str_append(string, add): """Append add in end string. Example: str_append('hou', 'se'); Return='house'""" return string + str(add) + "\n"
efbc9a085d1e63f290af3e6c447cde13bce5f5d0
7,663
def unformat(combination: str) -> str: """Unformats a formatted string to it's original state""" return str(combination).replace("<", "").replace(">", "")
d017903ddaac78adf5085198d25eb508b62a78b4
7,665
import re def _size_to_bytes(size): """ Parse a string with a size into a number of bytes. I.e. parses "10m", "10MB", "10 M" and other variations into the number of bytes in ten megabytes. Floating-point numbers are rounded to the nearest byte. :type size: ``str`` :param size: The size to parse, given as a string with byte unit. No byte unit is assumed to be in bytes. Scientific notation is not allowed; must be an integer or real number followed by a case-insensitive byte unit (e.g. as "k" or "KB" for kilobyte, "g" or "Gb" for gigabyte, or a similar convention). Positive/negative sign in front of number is allowed. :rtype: ``long`` :return: The number of bytes represented by the given string. """ units = 'KMGTPEZY' # note that position of letter is same as power - 1 match = re.search(r'^\s*([-+]?\s*[0-9]*\.?[0-9]*)\s*([' + units + r']?\s*B?\s*S?)\s*', size, re.IGNORECASE) if match is None or match.group(1) == '': raise ValueError("size string not in proper format 'number [kmgtpezy]': " + size) mem_size = float(re.sub(r'\s*', '', match.group(1))) unit = re.sub(r'\s*', '', match.group(2)).upper() unit = re.sub(r'B?S?$', '', unit) # remove trailing units symbol if unit == '': unit_pow = 0 else: unit_pow = units.find(unit) + 1 byte_size = int(round(mem_size * (1024 ** unit_pow))) return byte_size
833657e51bb2c54b0e86684759e263d2f8b03ffe
7,666
def insertDoubleQuote(string, index): """ Insert a double quote in the specified string at the specified index and return the string.""" return string[:index] + '\"' + string[index:]
00d16f3bc619765895408f9fcdd3a7a6e428b153
7,672
def powerlaw(x, a, b, c): """Powerlaw function used by fitting software to characterise uncertainty.""" return a * x**b + c
e67a0be2f5faaff7867b713b43caec48910bad87
7,673
def readPeakList(peak_file): """ Read in list of peaks to delete from peaks_file. Comment lines (#) and blank lines are ignored. """ f = open(peak_file,'r') peak_list = f.readlines() f.close() peak_list = [l for l in peak_list if l[0] != "#" and l.strip() != ""] peak_list = [l.strip() for l in peak_list] return peak_list
7c99f9fb18b36b658fe142a43adf18db7c42c7bd
7,675
def readable_timedelta(days): """Print the number of weeks and days in a number of days.""" #to get the number of weeks we use integer division weeks = days // 7 #to get the number of days that remain we use %, the modulus operator remainder = days % 7 return "{} week(s) and {} day(s).".format(weeks, remainder)
120f517939842b4e0686a57a3117221e3db63004
7,681
def isMessageBody(line: str) -> bool: """ Returns True if line has more than just whitepsace and unempty or is a comment (contains #) """ return not (line.isspace() or line.lstrip().startswith('#'))
990ae3ff01f794a6c8d4d45ecb766a763c51dff8
7,683
def bit_list_to_int(bit_list): """ Converts binary number represented as a list of 0's and 1's into its corresponding base 10 integer value. Args: bit_list: a binary number represented as a list of 0's and 1's Returns: The base 10 integer value of the input binary number """ bit_string = ''.join([('0','1')[b] for b in bit_list]) base_ten_representation = int(bit_string, 2) return base_ten_representation
ade66899fe1d23a22c76cccf4ba57e9ad9bf0ba1
7,686
import re def email(value: str): """ Extract email from document Example Result: ['[email protected]', '[email protected]'] """ _email_pat = r'[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+' return re.findall(_email_pat, value)
c8f3dcb4163e99f0aefe7eb42e61b127ffbaa393
7,688
def TransposeTable(table): """Transpose a list of lists, using None to extend all input lists to the same length. For example: >>> TransposeTable( [ [11, 12, 13], [21, 22], [31, 32, 33, 34]]) [ [11, 21, 31], [12, 22, 32], [13, None, 33], [None, None, 34]] """ transposed = [] rows = len(table) cols = max(len(row) for row in table) for x in range(cols): transposed.append([]) for y in range(rows): if x < len(table[y]): transposed[x].append(table[y][x]) else: transposed[x].append(None) return transposed
d53dc20a9eff391560269e818e99d41f8dc2ce94
7,689
def percentage(value): """Return a float with 1 point of precision and a percent sign.""" return format(value, ".1%")
43567c120e4994b54a92570405c02934eb989a6f
7,692
def identity_matrix(dim): """Construct an identity matrix. Parameters ---------- dim : int The number of rows and/or columns of the matrix. Returns ------- list of list A list of `dim` lists, with each list containing `dim` elements. The items on the "diagonal" are one. All other items are zero. Examples -------- >>> identity_matrix(4) [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] """ return [[1. if i == j else 0. for i in range(dim)] for j in range(dim)]
dd37f0c7df41478e23dd26df727341a37a201ec1
7,697
import yaml def load_yaml(file_path): """Load a yaml file into a dictionary""" try: with open(file_path, 'r') as file: return yaml.safe_load(file) except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available return None
3d4fa37794bc99c352959e49057d2e9cfb0d4c92
7,700
import time def timed_call(f, args): """Call function f with arguments args and time its run time. Args: f: The function to call args: The arguments to pass to f Returns: Return the result of the function call and how much time it takes as tuple e.g. (result, time). """ start_time = time.time() result = f(*args) elapsed_time = time.time() - start_time return result, round(elapsed_time, 3)
e592ecdf5ebb4aa3391b2500b2a3a20d2faa9b40
7,703
def count_words(texts): """ Counts the words in the given texts, ignoring puncuation and the like. @param texts - Texts (as a single string or list of strings) @return Word count of texts """ if type(texts) is list: return sum(len(t.split()) for t in texts) return len(texts.split())
f08cbb1dcac3cbd6b62829cf4467167ae9b7694e
7,704
def quadraticEval(a, b, c, x): """given all params return the result of quadratic equation a*x^2 + b*x + c""" return a*(x**2) + b*x + c
cfb808435b50ec262ec14cd54cf9caf30f2bc4b8
7,706
def stringdb_escape_text(text): """Escape text for database_documents.tsv format.""" return text.replace('\\', '\\\\').replace('\t', '\\t')
5d41b0b224cb314141b669ff721896d04a2fe2e8
7,707
def friends(graph, user): """Returns a set of the friends of the given user, in the given graph""" return set(graph.neighbors(user))
125c3cc21be4cc29f9ff6f0ff0bb60b35a1074ba
7,710
def diagonal(a, offset=0, axis1=None, axis2=None, extract=True, axes=None): """ diagonal(a, offset=0, axis1=None, axis2=None) Return specified diagonals. If `a` is 2-D, returns the diagonal of `a` with the given offset, i.e., the collection of elements of the form ``a[i, i+offset]``. If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing `axis1` and `axis2` and appending an index to the right equal to the size of the resulting diagonals. Parameters ---------- a : array_like Array from which the diagonals are taken. offset : int, optional Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal (0). axis1 : int, optional Axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to first axis (0). axis2 : int, optional Axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to second axis (1). Returns ------- array_of_diagonals : ndarray If `a` is 2-D, then a 1-D array containing the diagonal and of the same type as `a` is returned unless `a` is a `matrix`, in which case a 1-D array rather than a (2-D) `matrix` is returned in order to maintain backward compatibility. If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` are removed, and a new axis inserted at the end corresponding to the diagonal. Raises ------ ValueError If the dimension of `a` is less than 2. Notes ----- Unlike NumPy's, the cuNumeric implementation always returns a copy See Also -------- numpy.diagonal Availability -------- Multiple GPUs, Multiple CPUs """ return a.diagonal( offset=offset, axis1=axis1, axis2=axis2, extract=extract, axes=axes )
e18a9ca2dcab7beb5891f701cdc0f26c3943f749
7,711
def ctd_sbe16digi_preswat(p0, t0, C1, C2, C3, D1, D2, T1, T2, T3, T4, T5): """ Description: OOI Level 1 Pressure (Depth) data product, which is calculated using data from the Sea-Bird Electronics conductivity, temperature and depth (CTD) family of instruments. This data product is derived from SBE 16Plus instruments outfitted with a digiquartz pressure sensor. This applies to the CTDBP-N,O instruments only. Implemented by: 2013-05-10: Christopher Wingard. Initial Code. 2013-05-10: Christopher Wingard. Minor edits to comments. 2014-01-31: Russell Desiderio. Standardized comment format. 2014-01-31: Russell Desiderio. Modified algorithm to use pressure [Hz] (pf) to calculate pressure period instead of pressure [counts] (p0). See SeaBird 16Plus V2 User Manual (reference (2)), page 57, item 5. Usage: p = ctd_sbe16digi_preswat(p0,t0,C1,C2,C3,D1,D2,T1,T2,T3,T4,T5) where p = sea water pressure (PRESWAT_L1) [dbar] p0 = raw pressure (PRESWAT_L0) [counts] t0 = raw temperature from pressure sensor thermistor [counts] C1 = digiquartz pressure calibration coefficients C2 = digiquartz pressure calibration coefficients C3 = digiquartz pressure calibration coefficients D1 = digiquartz pressure calibration coefficients D2 = digiquartz pressure calibration coefficients T1 = digiquartz pressure calibration coefficients T2 = digiquartz pressure calibration coefficients T3 = digiquartz pressure calibration coefficients T4 = digiquartz pressure calibration coefficients T5 = digiquartz pressure calibration coefficients References: OOI (2012). Data Product Specification for Pressure (Depth). Document Control Number 1341-00020. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00020_Data_Product_SPEC_PRESWAT_OOI.pdf) OOI (2011). SeaBird 16Plus V2 User Manual. 1341-00020_PRESWAT Artifact. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> >> REFERENCE >> Data Product Specification Artifacts >> 1341-00020_PRESWAT >> PRESWAT_SeaBird_16PlusV2_2009.pdf) """ # Convert raw pressure input to frequency [Hz] pf = p0 / 256.0 # Convert raw temperature input to voltage tv = t0 / 13107.0 # Calculate U (thermistor temp): U = (23.7 * (tv + 9.7917)) - 273.15 # Calculate calibration parameters C = C1 + C2 * U + C3 * U**2 D = D1 + D2 * U T0 = T1 + T2 * U + T3 * U**2 + T4 * U**3 + T5 * U**4 # Calculate T (pressure period, in microseconds): T = (1.0 / pf) * 1.0e6 # compute pressure in psi, rescale and compute in dbar and return p_psi = C * (1.0 - T0**2 / T**2) * (1.0 - D * (1.0 - T0**2 / T**2)) p_dbar = (p_psi * 0.689475729) - 10.1325 return p_dbar
3756752c661773bd74436311a278efdaa3d3913f
7,714
import hashlib def sha256(file: str): """ Reads a file content and returns its sha256 hash. """ sha = hashlib.sha256() with open(file, "rb") as content: for line in content: sha.update(line) return sha.hexdigest()
c6babc2939e25228df25827a5a0b383d6c68dd07
7,715
import json def to_json(response): """ Return a response as JSON. """ assert response.status_code == 200 return json.loads(response.get_data(as_text=True))
4fb4d62eb8b793363394b6d0759a923f90315072
7,719
def get_interface_by_name(interfaces, name): """ Return an interface by it's devname :param name: interface devname :param interfaces: interfaces dictionary provided by interface_inspector :return: interface dictionary """ for interface in interfaces: if interface['devname'] == name: return interface
9d63bf667a0677ba7d0c3fdde2b4b35affc3b72b
7,726
def normalize_trans_probs(p): """ Normalize a set of transition probabilities. Parameters ---------- p : pandas.DataFrame, dtype float Unnormalized transition probabilities. Indexed by source_level_idx, destination_level_idx. Returns ------- pandas.DataFrame, dtype float Normalized transition probabilities: the sum of all probabilites with the same source_level_idx sum to one. Indexed by source_level_idx, destination_level_idx. """ p_summed = p.groupby(level=0).sum() index = p.index.get_level_values("source_level_idx") p_norm = p / p_summed.loc[index].values p_norm = p_norm.fillna(0.0) return p_norm
d484c4ac08ee785e5451b1aa92ff2b85fc945384
7,728
def get_number_of_ones(n): """ Deterine the number of 1s ins the binary representation of and integer n. """ return bin(n).count("1")
83fb14c29064008dd9f8e7ecea4c1d9dfae1dafa
7,729
from typing import Iterable from typing import Dict from typing import Sequence import hashlib import mmap def verify_checksums(sources: Iterable[str], hashes: Dict[str, Sequence[str]]) -> bool: """Verify checksums for local files. Prints a message whenever there is a mismatch. Args: sources: An iterable of source strings. hashes: A dictionary of hash name -> hashes. For each entry, the list of hashes corresponds to `sources` in order but may be shorter. Returns: True if no checksums are mismatched, otherwise False. """ valid = True hashlib_warnings = set() for i, source in enumerate(sources): try: source_file, _ = source.split("::") except ValueError: source_file = source for hashname, source_hashes in hashes.items(): try: expected_digest = source_hashes[i] except IndexError: continue if expected_digest.lower() == "skip": continue try: h = hashlib.new(hashname) except ValueError: # Hopefully unlikely. As of the time of writing, all of HASHES are # in hashes.algorithms_guaranteed. if hashname not in hashlib_warnings: print( f"Warning: Your version of hashlib doesn't support {hashname}" ) hashlib_warnings.add(hashname) continue try: with open(source_file, "rb") as f: # Memory map in case the file is large contents = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) h.update(contents) # type: ignore except FileNotFoundError: break # No point trying other hashes actual_digest = h.hexdigest() if expected_digest != actual_digest: print(source_file) print(f"\tExpected ({hashname}): {expected_digest}") print(f"\tActual ({hashname}): {actual_digest}") valid = False return valid
95abd66c3e6a007b8df8b2caaecde8a85a1f0886
7,730
def get_least_sig_bit(band): """Return the least significant bit in color value""" mask = 0x1 last_bit = band & mask return str(last_bit)
9d56bc5fdbf613f31bf7b21ee08f4f753b3a92db
7,738
def denormalize_image(image): """ Undo normalization of image. """ image = (image / 2) + 0.5 return image
c49a1465d89e317a1c8013969fbee913bf705f4a
7,744
import imghdr def check_bmp(input_file_name): """ Check if filename is a BMP file :param input_file_name: input file name :type input_file_name: string :return whether the file is .bmp :rtype boolean """ return 'bmp' == imghdr.what(input_file_name)
3a8749832418d3976825a79a0bd89c7a77649fe8
7,746
def n2es(x): """None/Null to Empty String """ if not x: return "" return x
cf73dd72230040cfc1c71b248b4cdd490004a213
7,751
import re def __remove_punctuation(string): """ Remove all the punctuation symbols and characters in a string. :param string: the string where the punctation characters must be removed. :return: a string without punctuation. """ return re.sub("[!@#£$.()/-]", "", string)
bb2015dc040fedb3656099b57b103f7fb9c416b9
7,755
def pad_chunk_columns(chunk): """Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.""" columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
2e5d91ad03ad613b55bcaea97fd8c0785eec977f
7,756
def make_a_tweet(Hash, Message, Reduced): """ Generate a valid tweet using the info passed in. """ tweet = Hash + ': ' + Message if Reduced: tweet += '…' return tweet
1d0c3246874f8a6c9b3cb1b1f7cf27040ff1bd1b
7,761
def ret_int(potential): """Utility function to check the input is an int, including negative.""" try: return int(potential) except: return None
682ab4987e94d7d758be5957b610dc1ee72156a1
7,769
def read_file(file_name): """Read contents of file.""" with open(file_name, encoding='utf8') as file: return file.read().rstrip().lstrip()
cb8e85c076baa97d8f1a5361abe6ab4ee5b9f00c
7,770
def deep_dictionary_check(dict1: dict, dict2: dict) -> bool: """Used to check if all keys and values between two dicts are equal, and recurses if it encounters a nested dict.""" if dict1.keys() != dict2.keys(): return False for key in dict1: if isinstance(dict1[key], dict) and not deep_dictionary_check(dict1[key], dict2[key]): return False elif dict1[key] != dict2[key]: return False return True
b5011c2c79c79ecc74953e5f44db5c4a62464c07
7,776
def SEARCH(find_text, within_text, start_num=1): """ Returns the position at which a string is first found within text, ignoring case. Find is case-sensitive. The returned position is 1 if within_text starts with find_text. Start_num specifies the character at which to start the search, defaulting to 1 (the first character of within_text). If find_text is not found, or start_num is invalid, raises ValueError. >>> SEARCH("e", "Statements", 6) 7 >>> SEARCH("margin", "Profit Margin") 8 >>> SEARCH(" ", "Profit Margin") 7 >>> SEARCH('"', 'The "boss" is here.') 5 >>> SEARCH("gle", "Google") 4 >>> SEARCH("GLE", "Google") 4 """ # .lower() isn't always correct for unicode. See http://stackoverflow.com/a/29247821/328565 return within_text.lower().index(find_text.lower(), start_num - 1) + 1
1afc843583695a801aca28b5013a6afa21221094
7,778
def getvaluelist(doclist, fieldname): """ Returns a list of values of a particualr fieldname from all Document object in a doclist """ l = [] for d in doclist: l.append(d.fields[fieldname]) return l
b85d171b537636477b00021ce717788b5e4735da
7,780
def changed_keys(a: dict, b: dict) -> list: """Compares two dictionaries and returns list of keys where values are different""" # Note! This function disregards keys that don't appear in both dictionaries return [k for k in (a.keys() & b.keys()) if a[k] != b[k]]
77ae93614a2c736091886024338c1b4ecb1f6ec1
7,783
def _negation(value): """Parse an optional negation after a verb (in a Gherkin feature spec).""" if value == "": return False elif value in [" not", "not"]: return True else: raise ValueError("Cannot parse '{}' as an optional negation".format(value))
c13f06b8a11ecbe948a4c2d710e165e1731f08fd
7,784
import warnings def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): """ Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None. """ dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: # This means num_vars_accounted_for > num_dataframe_vars msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
0470503c8adac107f85dd628409fc3ca8de641d3
7,788
def arrayizeDict(g): """Transforms a dict with unique sequential integer indices into an array""" mk = max(g.keys()) ga = [None] * mk for k, v in g.items(): ga[k - 1] = v return ga
d2da3848436be8d47b3f338797eefd87cfa4344c
7,790
def get_values(units, *args): """ Return the values of Quantity objects after optionally converting to units. Parameters ---------- units : str or `~astropy.units.Unit` or None Units to convert to. The input values are converted to ``units`` before the values are returned. args : `~astropy.units.Quantity` Quantity inputs. """ if units is not None: result = [a.to_value(unit) for a, unit in zip(args, units)] else: result = [a.value for a in args] return result
462e336fa2f4bcdfd77ba43658c37cf4c6782c75
7,792
def parse_print_dur(print_dur): """ Parse formatted string containing print duration to total seconds. >>> parse_print_dur(" 56m 47s") 3407 """ h_index = print_dur.find("h") hours = int(print_dur[h_index - 2 : h_index]) if h_index != -1 else 0 m_index = print_dur.find("m") minutes = int(print_dur[m_index - 2 : m_index]) if m_index != -1 else 0 s_index = print_dur.find("s") seconds = int(print_dur[s_index - 2 : s_index]) if s_index != -1 else 0 return hours * 60 * 60 + minutes * 60 + seconds
7b1a29f31ba38e7d25b4dca9600d4be96a1da3ac
7,797
def _find_vlan(mac, domain_interfaces): """ Given a mac address and a collection of domains and their network interfaces, find the domain that is assigned the interface with the desired mac address. Parameters ---------- mac : str The MAC address. domain_interfaces : dict The list of domain interfaces. Returns ------- The domain """ for d, i in domain_interfaces.items(): if i.get(mac): return d return None
c4f667dd80146de83157e8966cb34e5867457397
7,799
def isPostCSP(t, switch=961986575.): """ Given a GALEX time stamp, return TRUE if it corresponds to a "post-CSP" eclipse. The actual CSP was on eclipse 37423, but the clock change (which matters more for calibration purposes) occured on 38268 (t~=961986575.) :param t: The time stamp to test. :type t: float :param switch: The GALEX time stamp that defines pre- and post-CSP. :type switch: float :returns: bool -- Does this time correspond to a post-CSP eclipse? """ # Check that the tscale has been applied properly if not switch/100. < t < switch*100.: raise ValueError('Did you apply tscale wrong?') return t >= switch
2b731ec0bb06ce08d2a36137e3d48563e5cb0c11
7,803
import uuid def get_a_uuid() -> str: """Gets a base 64 uuid Returns: The id that was generated """ r_uuid = str(uuid.uuid4()) return r_uuid
20e87638c3718b4b75ffc48cf980216120edaea8
7,806
from pathlib import Path def cookiecutter_cache_path(template): """ Determine the cookiecutter template cache directory given a template URL. This will return a valid path, regardless of whether `template` :param template: The template to use. This can be a filesystem path or a URL. :returns: The path that cookiecutter would use for the given template name. """ template = template.rstrip('/') tail = template.split('/')[-1] cache_name = tail.rsplit('.git')[0] return Path.home() / '.cookiecutters' / cache_name
cbdc72195bf47fb1bb91368ac2bbca3890775a60
7,807
def get_cloudify_endpoint(_ctx): """ ctx.endpoint collapses the functionality for local and manager rest clients. :param _ctx: the NodeInstanceContext :return: endpoint object """ if hasattr(_ctx._endpoint, 'storage'): return _ctx._endpoint.storage return _ctx._endpoint
37ac79f564626399bd940d4c9d9fd9fb8417c424
7,809
def get_log_info(prefix='', rconn=None): """Return info log as a list of log strings, newest first. On failure, returns empty list""" if rconn is None: return [] # get data from redis try: logset = rconn.lrange(prefix+"log_info", 0, -1) except: return [] if logset: loglines = [ item.decode('utf-8') for item in logset ] else: return [] loglines.reverse() return loglines
d333fbeaff754e352a0b84c10f4d28e148badfa0
7,816
def spacecraft_vel(deltaw, deltan, deltar, dij, vmap): """ function to calculate pixel-wise spacecraft velocities for Sunpy map Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep. Parameters ---------- deltaw: float, array relative westward position of pixel deltan: float, array relative northward position of pixel deltar: float, array relative radial position of pixel dij: float distance between pixel ij and spacecraft vmap: map Sunpy map object (Dopplergram) Returns ------- vsc: float, array array of spacecraft velocities """ # velocity of spacecraft relative to sun vscw = vmap.meta['obs_vw'] vscn = vmap.meta['obs_vn'] vscr = vmap.meta['obs_vr'] # pixel-wise magnitude of spacecraft velocity vsc = - (deltaw * vscw + deltan * vscn + deltar * vscr) / dij return vsc
78c2acffc4f14c3f707cc50e1594ad7012bc1b08
7,819
import time def convert_time(t): """Takes epoch time and translates it to a human readable version""" return time.strftime('%Y-%m-%d', time.localtime(t))
e591e32e30a8ceb81c9934f4e67556896a56b79a
7,822
def valid_box(box, host_extent): """Returns True if the entire box is within a grid of size host_extent. input arguments (unmodified): box: numpy int array of shape (2, 3) lower & upper indices in 3 dimensions defining a logical cuboid subset of a 3D cartesian grid in python protocol of zero base, kji (normally) or ijk ordering same as for host_extent host_extent: triple int the extent (shape) of a 3D cartesian grid returns: boolean True if box is a valid box within a grid of shape host_extent, False otherwise """ if box.ndim != 2 or box.shape != (2, 3) or box.dtype != 'int': return False if len(host_extent) != 3: return False for d in range(3): if box[0, d] < 0 or box[0, d] > box[1, d] or box[1, d] >= host_extent[d]: return False return True
c6b1bc144e23b35002a1fbf17d4e02d9ba904655
7,824
import yaml def yaml_dump_result(obj, stream): """Redefinition of yaml.safe_dump with added float representer The float representer uses float precision of four decimal digits """ def float_representer(dumper, value): text = '{0:.4f}'.format(value) return dumper.represent_scalar(u'tag:yaml.org,2002:float', text) class ResultDumper(yaml.SafeDumper): def __init__(self, *args, **kwargs): super(ResultDumper, self).__init__(*args, **kwargs) self.add_representer(float, float_representer) yaml.dump(obj, stream, Dumper=ResultDumper, default_flow_style=False, sort_keys=True)
55a8a06e918276060505224a680e1cb136d4a541
7,825
def rho_NFW(r, rs, rhos): """ The density profile [GeV/cm**3] of an NFW halo. Parameters ---------- r : the distance from the center [kpc] rs : the NFW r_s parameter [kpc] rhos : the NFW rho_s parameter [GeV/cm**3] """ res = rhos/(r/rs)/(1.+r/rs)**2 return res
3b8f97713610c1622815e15f13a75d39ad7e64ba
7,827
def breakup_names(df): """Breakup full name into surname, title, first name""" df[['surname','given_name']] = df['Name'].str.split(",", expand= True) df[['title', 'first_name']] = df['given_name'].str.split(n=1, expand= True) df = df.drop(columns=['Name', 'given_name']) return df
ee76975b88702daf47fa9638c4d1217ca22e1e6e
7,830
from pathlib import Path def image_content_type(outfile: Path) -> str: """ Derives a content type from an image file's suffix """ return f"image/{outfile.suffix[1:]}"
4c1545dbdcaa31826fd8567cabe2935ed7237961
7,832
def play_again() -> str: """Ask the user if he wants to play another turn""" yes_or_no = input('Play again? [y/n]: ') while yes_or_no not in ['y', 'n']: yes_or_no = input('Please insert "y" or "n": ') return yes_or_no
8b3d74456b7ce13a0ffffab8dc9e946cbb5e594c
7,833
def scale_image(image, new_width=600): """ scales the image to new_width while maintaining aspect ratio """ new_w = new_width (old_w, old_h) = image.size aspect_ratio = float(old_h)/float(old_w) new_h = int(aspect_ratio * new_w) new_dim = (new_w, new_h) image = image.resize(new_dim) return image
e9bfdf6309cf97b1a7f44dff96b655aa09d64fd5
7,835
def percentage(sub, all): """Calculate percent relation between "sub" and "all". Args: sub (int): Some value. all (int): Maximum value. Returns: int: (sum * 100) / all """ return int((sub * 100) / all)
21b398c81f76de0ec81be9d26b2f79d8b0d08edc
7,837
def value(colors: list): """ Each resistor has a resistance value. Manufacturers print color-coded bands onto the resistors to denote their resistance values. Each band acts as a digit of a number. The program will take two colors as input, and output the correct number. :param colors: :return: """ encoded_colors = { 'black': 0, 'brown': 1, 'red': 2, 'orange': 3, 'yellow': 4, 'green': 5, 'blue': 6, 'violet': 7, 'grey': 8, 'white': 9 } result = '' for color in colors: if color not in encoded_colors.keys(): raise Exception("Invalid color: {}".format(color)) result += str(encoded_colors[color]) return int(result)
14a52f12bfcfccd921ade8fa7708d3563c9c2508
7,838
def quote_columns_data(data: str) -> str: """When projecting Queries using dot notation (f.e. inventory [ facts.osfamily ]) we need to quote the dot in such column name for the DataTables library or it will interpret the dot a way to get into a nested results object. See https://datatables.net/reference/option/columns.data#Types.""" return data.replace('.', '\\.')
db5e82e5d3641bebcac069ac4d5a7bb42baafcbb
7,843