content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import random def get_random_number_with_zero(min_num: int, max_num: int) -> str: """ Get a random number in range min_num - max_num of len max_num (padded with 0). @param min_num: the lowest number of the range in which the number should be generated @param max_num: the highest number of the range in which the number should be generated @return: A string like "00183" """ return str(random.randint(min_num, max_num)).zfill(len(str(max_num)))
bdc9f192286262566d2b2a87e8124abdf745ecbd
703,439
def union(list1, list2): """Union of two lists, returns the elements that appear in one list OR the other. Args: list1 (list): A list of elements. list2 (list): A list of elements. Returns: result_list (list): A list with the union elements. Examples: >>> union([1,2,3], [2,3,4]) [1, 2, 3, 4] """ return list(set(list1) | set(list2))
983e96ceb4f4eeb2b4b2d96362a0977be0cb2222
703,445
import re def list_all_links_in_page(source: str): """Return all the urls in 'src' and 'href' tags in the source. Args: source: a strings containing the source code of a webpage. Returns: A list of all the 'src' and 'href' links in the source code of the webpage. """ return re.findall( r'src\s*=\s*"([^"]+)"', source, ) + re.findall( r'href\s*=\s*"([^"]+)"', source, )
f17f2ac2724fcfdd041e2ad001557a0565b51e00
703,446
def get_direction(source, destination): """Find the direction drone needs to move to get from src to dest.""" lat_diff = abs(source[0] - destination[0]) long_diff = abs(source[1] - destination[1]) if lat_diff > long_diff: if source[0] > destination[0]: return "S" else: return "N" else: if source[1] > destination[1]: return "W" else: return "E"
224a8df79cbafbcf1eed8df522ab7f58cc93598d
703,447
def identity(*args, **kwargs): """ An identity function used as a default task to test the timing of. """ return args, kwargs
472808f6b5260569538f26513f24ffcb1bd88c4d
703,450
def create_logdir(method, weight, label, rd): """ Directory to save training logs, weights, biases, etc.""" return "bigan/train_logs/mnist/{}/{}/{}/{}".format(weight, method, label, rd)
4b4edcc9c0c36720e76013a6fb0faf1b49472bc0
703,454
import re def camel_case_to_title_case(camel_case_string): """ Turn Camel Case string into Title Case string in which first characters of all the words are capitalized. :param camel_case_string: Camel Case string :return: Title Case string """ if not isinstance(camel_case_string, str): return None title_case = re.sub('([^-])([A-Z][a-z-]+)', r'\1 \2', camel_case_string)\ .title() return title_case
bcc40753a8672355519741f5aec64c262431d582
703,456
def pmr_corr(vlos, r, d): """ Correction on radial proper motion due to apparent contraction/expansion of the cluster. Parameters ---------- vlos : float Line of sight velocity, in km/s. r : array_like, float Projected radius, in degrees. d : float Cluster distance from the Sun, in kpc. Returns ------- pmr : array_like, float Correction in the radial component of the proper motion, in mas/yr. """ r = r * 60 # Equation 4 from Bianchini et al. 2018. pmr = -6.1363 * 1e-5 * vlos * r / d return pmr
c9136c5ae33e89d6f57b936b92c23001fd30ccfa
703,457
def get_min_corner(sparse_voxel): """ Voxel should either be a schematic, a list of ((x, y, z), (block_id, ?)) objects or a list of coordinates. Returns the minimum corner of the bounding box around the voxel. """ if len(sparse_voxel) == 0: return [0, 0, 0] # A schematic if len(sparse_voxel[0]) == 2 and len(sparse_voxel[0][0]) == 3 and len(sparse_voxel[0][1]) == 2: x, y, z = list(zip(*list(zip(*sparse_voxel))[0])) # A list or coordinates elif len(sparse_voxel[0]) == 3: x, y, z = list(zip(*sparse_voxel)) else: raise Exception("Unknown schematic format") return min(x), min(y), min(z)
371b25b795a1a2ffeb0fc3724f01f1a329f917ae
703,458
from typing import Any def validate_delta(delta: Any) -> float: """Make sure that delta is in a reasonable range Args: delta (Any): Delta hyperparameter Raises: ValueError: Delta must be in [0,1]. Returns: float: delta """ if (delta > 1) | (delta < 0): raise ValueError("The delta values must be in [0,1]") return delta
1fd3084c047724a14df753e792b8500a103a34c0
703,461
def build_authenticate_header(realm=''): """Optional WWW-Authenticate header (401 error)""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
74c2e4e4188b608120f241aadb20d8480ac84008
703,464
import json import copy def get_args(request, required_args): """ Helper function to get arguments for an HTTP request Currently takes args from the top level keys of a json object or www-form-urlencoded for backwards compatability. Returns a tuple (error, args) where if error is non-null, the requesat is malformed. Otherwise, args contains the parameters passed. """ args = None if ( request.requestHeaders.hasHeader('Content-Type') and request.requestHeaders.getRawHeaders('Content-Type')[0].startswith('application/json') ): try: args = json.load(request.content) except ValueError: request.setResponseCode(400) return {'errcode': 'M_BAD_JSON', 'error': 'Malformed JSON'}, None # If we didn't get anything from that, try the request args # (riot-web's usage of the ed25519 sign servlet currently involves # sending the params in the query string with a json body of 'null') if args is None: args = copy.copy(request.args) # Twisted supplies everything as an array because it's valid to # supply the same params multiple times with www-form-urlencoded # params. This make it incompatible with the json object though, # so we need to convert one of them. Since this is the # backwards-compat option, we convert this one. for k, v in args.items(): if isinstance(v, list) and len(v) == 1: args[k] = v[0] missing = [] for a in required_args: if a not in args: missing.append(a) if len(missing) > 0: request.setResponseCode(400) msg = "Missing parameters: "+(",".join(missing)) return {'errcode': 'M_MISSING_PARAMS', 'error': msg}, None return None, args
b44e058590945211ca410005a5be2405b4756ca4
703,466
def determine_header_length(trf_contents: bytes) -> int: """Returns the header length of a TRF file Determined through brute force reverse engineering only. Not based upon official documentation. Parameters ---------- trf_contents : bytes Returns ------- header_length : int The length of the TRF header, prior to the table portion of the TRF file. """ column_end = b"\t\xdc\x00\xe8\t\xdc\x00\xe9\t\xdc\x00\xea\t\xdc\x00\xeb\t\xdc\x00" header_length = trf_contents.index(column_end) + len(column_end) # test = trf_contents.split(b"\t") # row_skips = 6 # i = next(i for i, item in enumerate(test[row_skips::]) if len(item) > 3) + row_skips # header_length_old_method = len(b"\t".join(test[0:i])) + 3 # if header_length_old_method != header_length: # raise ValueError("Inconsistent header length determination") return header_length
e8aa5110691e877c34f208af5bd508f0f5ec4760
703,469
import re def _join_lines(source): """Remove Fortran line continuations""" return re.sub(r'[\t ]*&[\t ]*[\r\n]+[\t ]*&[\t ]*', ' ', source)
9caa3b6f470a96a7b473f3cce12f57d3787e91dd
703,470
import requests def get_kalliope_bijlage(path, session): """ Perform the API-call to get a poststuk-uit bijlage. :param path: url of the api endpoint that we want to fetch :param session: a Kalliope session, as returned by open_kalliope_api_session() :returns: buffer with bijlage """ r = session.get(path) if r.status_code == requests.codes.ok: return r.content else: raise requests.\ exceptions.HTTPError('Failed to get Kalliope poststuk bijlage (statuscode {})'.format(r.status_code))
b2a126b8e33bab50b1e2aa122645d7a45d6dfea9
703,472
def filter_by_book_style(bibtexs, book_style): """Returns bibtex objects of the selected book type. Args: bibtexs (list of core.models.Bibtex): queryset of Bibtex. book_style (str): book style key (e.g. JOUNRAL) Returns: list of Bibtex objectsxs """ return [bib for bib in bibtexs if bib.bib_type_key == book_style]
ca3b46772930a6f6e28b6fc0ee4d175ee8d69c3c
703,474
import random def random_val(index, tune_params): """return a random value for a parameter""" key = list(tune_params.keys())[index] return random.choice(tune_params[key])
1cf7ba9d1a3ff651f946a8013e338d62f4fec3ab
703,478
def coding_problem_19(house_costs): """ A builder is looking to build a row of N houses that can be of K different colors. He has a goal of minimizing cost while ensuring that no two neighboring houses are of the same color. Given an N by K matrix where the nth row and kth column represents the cost to build the nth house with kth color, return the minimum cost. Example: >>> house_size = [1, 2, 4, 2] # size of the house >>> color_cost = [1, 2, 4] # cost of paint for each color >>> house_costs = [[cc * hs for cc in color_cost] for hs in house_size] # 4 houses, 3 colors >>> coding_problem_19(house_costs) # [1,2,4,2] . [1,2,1,2] == 1*1 + 2*2 + 1*4 + 2*2 == 13 13 Notes: this is dynamic programming in disguise. We assign a color to each house in order, and keep track of the minimum cost associated with each choice of color. These costs are the minimum over all possible permutation in which the last added house is of a particular color. """ best_costs = [0] * len(house_costs[0]) for color_costs in house_costs: # color_costs are those paid to paint last added house with a certain color best_costs = [color_costs[i] + min(best_costs[:i] + best_costs[i + 1:]) for i in range(len(best_costs))] return min(best_costs)
f9d8b33f449d7a2f87118be3e4d894988b781476
703,486
import requests def requests_get(url): """Make a get request using requests package. Args: url (str): url to do the request Raises: ConnectionError: If a RequestException occurred. Returns: response (str): the response from the get request """ try: r = requests.get(url) except requests.exceptions.RequestException: raise ConnectionError return r
a438d211e6b5bf78af56783b5d22c85961a2d563
703,487
def cases_vs_deaths(df): """Checks that death count is no more than case count.""" return (df['deaths'] <= df['cases']).all()
994ae93fb23090de50fc4069342487d0d8e621ed
703,489
import re def add_pronom_link_for_puids(text): """If text is a PUID, add a link to the PRONOM website""" PUID_REGEX = r"fmt\/[0-9]+|x\-fmt\/[0-9]+" # regex to match fmt/# or x-fmt/# if re.match(PUID_REGEX, text) is not None: return '<a href="https://nationalarchives.gov.uk/PRONOM/{}" target="_blank">{}</a>'.format( text, text ) return text
5fdc9c15895dfd75be54ad0258e66625462204a2
703,491
import random def shuffle_string(s): """ Shuffle a string. """ if s is None: return None else: return ''.join(random.sample(s, len(s)))
25d109f11737b60cecf391fd955f2df4366de7e6
703,494
def get_rule_full_description(tool_name, rule_id, test_name, issue_dict): """ Constructs a full description for the rule :param tool_name: :param rule_id: :param test_name: :param issue_dict: :return: """ issue_text = issue_dict.get("issue_text", "") # Extract just the first line alone if issue_text: issue_text = issue_text.split("\n")[0] if not issue_text.endswith("."): issue_text = issue_text + "." return issue_text
546dcb5ce2cbc22db652b3df2bf95f07118611cf
703,495
import itertools def check_length(seed, random, query_words, key_max): """ Google limits searches to 32 words, so we need to make sure we won't be generating anything longer Need to consider - number of words in seed - number of words in random phrase - number of words in the lists from the query Will raise exception if there are too many words Args: seed: the seed for segment 1 random: the random query string for segment 0 query_words: object with key=name of dimension, value=list of keywords to use in query key_max: the maximum number of words (32 in Google's case) Returns: bool: True for correct number of words, False for too many """ all_query_words = " ".join(list(itertools.chain.from_iterable(query_words))).split(' ') total_words = len(seed.split(" ")) + len(random.split(" ")) + len(all_query_words) if total_words <= key_max: return True else: message = "The maximum number of keywords is:", key_max, "\nYou have:", total_words raise Exception(message)
14871b468454f324223673a0c57941ea9e63341a
703,496
def is_string(val): """ Is the supplied value a string or unicode string? See: https://stackoverflow.com/a/33699705/324122 """ return isinstance(val, (str, u"".__class__))
99b082ec080f261a7485a4e8e608b7350997cf18
703,497
def context_get(stack, name): """ Find and return a name from a ContextStack instance. """ return stack.get(name)
a5a9a50c54e8f0f685e0cf21991e5c71aee0c3d6
703,501
def tree_names (tree): """Get the top-level names in a tree (including files and directories).""" return [x[0] for x in list(tree.keys()) + tree[None] if x is not None]
12a5522974671f3ab81f3a1ee8e8c4db77785bd3
703,504
def fixed_width_repr_of_int(value, width, pad_left=True): """ Format the given integer and ensure the result string is of the given width. The string will be padded space on the left if the number is small or replaced as a string of asterisks if the number is too big. :param int value: An integer number to format :param int width: The result string must have the exact width :return: A string representation of the given integer. """ ret = '{:{pad_dir}{width}d}'.format(value, pad_dir='>' if pad_left else '>', width=width) return '*' * width if len(ret) > width else ret
adb212746dd081112ec1de2c4ea8745d2601c055
703,505
def ConvertListToCSVString(csv_list): """Helper to convert a list to a csv string.""" return ','.join(str(s) for s in csv_list)
547311ceac094211d47d0cc667d54b2a3e697f4e
703,508
def min_distance_bottom_up(word1: str, word2: str) -> int: """ >>> min_distance_bottom_up("intention", "execution") 5 >>> min_distance_bottom_up("intention", "") 9 >>> min_distance_bottom_up("", "") 0 """ m = len(word1) n = len(word2) dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] for i in range(m + 1): for j in range(n + 1): if i == 0: # first string is empty dp[i][j] = j elif j == 0: # second string is empty dp[i][j] = i elif ( word1[i - 1] == word2[j - 1] ): # last character of both substing is equal dp[i][j] = dp[i - 1][j - 1] else: insert = dp[i][j - 1] delete = dp[i - 1][j] replace = dp[i - 1][j - 1] dp[i][j] = 1 + min(insert, delete, replace) return dp[m][n]
8cd87ffe877aa24d5b1fa36ce1d3b96efb7b4e1e
703,510
def periodize_filter_fourier(h_f, nperiods=1, aggregation='sum'): """ Computes a periodization of a filter provided in the Fourier domain. Parameters ---------- h_f : array_like complex numpy array of shape (N*n_periods,) n_periods: int, optional Number of periods which should be used to periodize aggregation: str['sum', 'mean'], optional 'sum' will multiply subsampled time-domain signal by subsampling factor to conserve energy during scattering (rather not double-account for it since we already subsample after convolving). 'mean' will only subsample the input. Returns ------- v_f : array_like complex numpy array of size (N,), which is a periodization of h_f as described in the formula: v_f[k] = sum_{i=0}^{n_periods - 1} h_f[i * N + k] References ---------- This is a modification of https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/ filter_bank.py Kymatio, (C) 2018-present. The Kymatio developers. """ N = h_f.shape[0] // nperiods h_f_re = h_f.reshape(nperiods, N) v_f = (h_f_re.sum(axis=0) if aggregation == 'sum' else h_f_re.mean(axis=0)) v_f = v_f if h_f.ndim == 1 else v_f[:, None] # preserve dim return v_f
f9a2845dcf40eedce9d46faba506f1c1358ce6a5
703,511
def slope_id(csv_name): """ Extract an integer slope parameter from the filename. :param csv_name: The name of a csv file of the form NWS-<SLP>-0.5.csv :return: int(<SLP>) """ return int(csv_name.split("-")[1])
1ae34f1e5cc91fdc3aaff9a78e1ba26962475625
703,513
def minbox(points): """Returns the minimal bounding box necessary to contain points Args: points (tuple, list, set): ((0,0), (40, 55), (66, 22)) Returns: dict: {ulx, uly, lrx, lry} Example: >>> minbox((0, 0), (40, 55), (66,22)) {'ulx': 0, 'uly': 55, 'lrx': 66, 'lry': 0} """ x, y = [point[0] for point in points], [point[1] for point in points] return {'ulx': min(x), 'lrx': max(x), 'lry': min(y), 'uly': max(y)}
d8b11d40b52886f290d28f3434b17d2b9641c4fb
703,514
def apply_regression(df, w): """ Apply regression for different classifiers. @param df pandas dataframe; @param w dict[classifier: list of weights]; @return df with appended result. """ # get input if 'state' in df.columns: x = df.drop('state', axis=1).to_numpy(dtype='float64') else: x = df.to_numpy(dtype='float64') # initialize result new = df.copy() for classifier, wi in w.items(): # evaluate output y = x@wi # append output new[f'lms_{classifier}'] = y return new
1fb5fd9f4b297cd024e405dc5d9209213d28ff0d
703,516
def get_lambda_cloud_watch_func_name(stackname, asg_name, instanceId): """ Generate the name of the cloud watch metrics as a function of the ASG name and the instance id. :param stackname: :param asg_name: :param instanceId: :return: str """ name = asg_name + '-cwm-' + str(instanceId) return name[-63:len(name)]
893abaf60684cbf9d72774d6f5bb2c4351744290
703,518
def secondary_training_status_changed(current_job_description, prev_job_description): """Returns true if training job's secondary status message has changed. Args: current_job_description: Current job description, returned from DescribeTrainingJob call. prev_job_description: Previous job description, returned from DescribeTrainingJob call. Returns: boolean: Whether the secondary status message of a training job changed or not. """ current_secondary_status_transitions = current_job_description.get("SecondaryStatusTransitions") if ( current_secondary_status_transitions is None or len(current_secondary_status_transitions) == 0 ): return False prev_job_secondary_status_transitions = ( prev_job_description.get("SecondaryStatusTransitions") if prev_job_description is not None else None ) last_message = ( prev_job_secondary_status_transitions[-1]["StatusMessage"] if prev_job_secondary_status_transitions is not None and len(prev_job_secondary_status_transitions) > 0 else "" ) message = current_job_description["SecondaryStatusTransitions"][-1]["StatusMessage"] return message != last_message
b1d1a83cccb8cf84fa678345bcf7b3f6531aa2c5
703,519
def string_concat(str1, str2): """Concatenates two strings.""" return str1 + str2
5b6d842fca2d3623d33341d9bba4e4a76ec29e15
703,520
def remove_spaces(input_text, main_rnd_generator=None, settings=None): """Removes spaces. main_rnd_generator argument is listed only for compatibility purposes. >>> remove_spaces("I love carrots") 'Ilovecarrots' """ return input_text.replace(" ", "")
a9ba3bcca4a4ff1610d52271562e053f25815618
703,521
from typing import List from typing import Counter import heapq def top_k_frequent_pq(nums: List[int], k: int) -> List[int]: """Given a list of numbers, return the the top k most frequent numbers. Solved using priority queue approach. Example: nums: [1, 1, 1, 2, 2, 3], k=2 output: [1, 2] 1. Get the counts of each items, and turn them into tuples. 2. Put the counts on a heap, popping the min off if the heap is > k 3. Now we have a heap of size k with the largest elements (Note: We ensure k < n by checking at the start to get the times below.) Time: O(n log k): Where n is len(nums) Space: O(n+k): Where n is len(nums) n for the counts, and k for the heap """ if k == len(nums): return nums # Build a hashmap of frequencies O(n) counts = Counter(nums) # Build the heap of size k O(n log k) heap = [] for num, frequency in counts.items(): # O(n) heapq.heappush(heap, (frequency, num)) # O(k) if len(heap) > k: heapq.heappop(heap) # Get the elements in the heap O(k) return [element[1] for element in heap]
4d30bd7e11a087be1a23f9db5c8af7f78c083a5f
703,522
def monthFormat(month: int) -> str: """Formats a (zero-based) month number as a full month name, according to the current locale. For example: monthFormat(0) -> "January".""" months = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] return months[month % 12]
cb675f547d9beec0751d8252e99b5c025b8fd291
703,524
def build_fnln_contact(individual_contact): """ Expected parameter format for individual_contact ('My Name', '[email protected]') Sample output: {'email': '[email protected]', 'name': 'My Name'} """ return { "email": individual_contact[-1], "name": individual_contact[0] }
54080191320cfeb425de0f765f10e29726405d0d
703,525
import math def bearing(lat1: float, lon1: float, lat2: float, lon2: float) -> float: """Calculate bearing from lat1/lon2 to lat2/lon2 Arguments: lat1 {float} -- Start latitude lon1 {float} -- Start longitude lat2 {float} -- End latitude lon2 {float} -- End longitude Returns: float -- bearing in degrees """ rlat1 = math.radians(lat1) rlat2 = math.radians(lat2) rlon1 = math.radians(lon1) rlon2 = math.radians(lon2) dlon = math.radians(lon2-lon1) b = math.atan2(math.sin(dlon)*math.cos(rlat2),math.cos(rlat1)*math.sin(rlat2)-math.sin(rlat1)*math.cos(rlat2)*math.cos(dlon)) # bearing calc bd = math.degrees(b) br,bn = divmod(bd+360,360) # the bearing remainder and final bearing return bn
9148bc2726247cedf8a7de7279dac47316f2864f
703,536
def get_title(reg_doc): """ Extract the title of the regulation. """ parent = reg_doc.xpath('//PART/HD')[0] title = parent.text return title
744759addf0cea3ba6cc3dadad05d535b66b20d6
703,537
import random import requests def get_rising_submissions(subreddit): """Connects to the Reddit API and queries the top rising submission from the specified subreddit. Parameters ---------- subreddit : str The name of the subreddit without forward slashes. Returns ------- tuple A tuple containing a formatted message and an image url. """ endpoint = random.choice(['top','rising']) url = f"https://www.reddit.com/r/{subreddit}/{endpoint}.json?limit=1" headers = {"User-Agent": "Reddit Rising Checker v1.0"} with requests.get(url, headers=headers) as response: data = response.json()["data"]["children"] # Iterate over all the children. for item in data: item_data = item["data"] # We will collect only the fields we are interested in. title = item_data["title"] permalink = "https://reddit.com" + item_data["permalink"] author = item_data["author"] score = item_data["score"] image_url = item_data["url"] # Compose a Markdown message using string formatting. message = f"[{title}]({permalink})\nby **{author}**\n**{score:,}** points" return (message, image_url)
115edc8986acdefbb232218d237bcba7320db9a0
703,538
def new_headers() -> list: """Return list of new headers with clean names.""" return [ "date", "Rohs FB1", "Rohs FB2", "Rohs gesamt", "TS Rohschlamm", "Rohs TS Fracht", "Rohs oTS Fracht", "Faulschlamm Menge FB1", "Faulschlamm Menge FB2", "Faulschlamm Menge", "Temperatur FB1", "Temperatur FB2", "Faulschlamm pH Wert FB1", "Faulschlamm pH Wert FB2", "Faulbehaelter Faulzeit", "TS Faulschlamm", "Faulschlamm TS Fracht", "Faulbehaelter Feststoffbelastung", "GV Faulschlamm", "Faulschlamm oTS Fracht", "Kofermentation Bioabfaelle", # To be predicted. "Faulgas Menge FB1", "Faulgas Menge FB2", ]
c81c38e8521f159f0bbf29b3a32adeaa13d4c38f
703,541
import hashlib import io def calc_file_md5(filepath, chunk_size=None): """ Calculate a file's md5 checksum. Use the specified chunk_size for IO or the default 256KB :param filepath: :param chunk_size: :return: """ if chunk_size is None: chunk_size = 256 * 1024 md5sum = hashlib.md5() with io.open(filepath, 'r+b') as f: datachunk = f.read(chunk_size) while datachunk is not None and len(datachunk) > 0: md5sum.update(datachunk) datachunk = f.read(chunk_size) return md5sum.hexdigest()
8f391ade85a5b69ca63d8adb3eff6ff6af7a08e3
703,542
import struct def _read_int(f, b): """ Read an integer of a specified number of bytes from the filestream f """ if b == 1: return struct.unpack('<B', f.read(b))[0] elif b == 2: return struct.unpack('<H', f.read(b))[0] elif b == 4: return struct.unpack('<I', f.read(b))[0] elif b == 8: return struct.unpack('<Q', f.read(b))[0]
36bcc2ccd4edd61fd9c6a438c3a069e699b7b17a
703,544
def terminal_values(rets): """ Computes the terminal values from a set of returns supplied as a T x N DataFrame Return a Series of length N indexed by the columns of rets """ return (rets+1).prod()
bea1f2a668deac3e915f7531ab68aea207e57d91
703,551
def getColumnNames(hdu): """Get names of columns in HDU.""" return [d.name for d in hdu.get_coldefs()]
9ac8fefe6fcf0e7aed10699f19b849c14b022d47
703,553
def get_url(server_name, listen_port): """ Generating URL to get the information from namenode/namenode :param server_name: :param listen_port: :return: """ if listen_port < 0: print ("Invalid Port") exit() if not server_name: print("Pass valid Hostname") exit() URL = "http://"+server_name+":"+str(listen_port)+"/jmx" return URL
097e347a75eb581ae97734552fa6f9e7d3b11ce6
703,555
def latex_code(size): """ Get LaTeX code for size """ return "\\" + size + " "
03478f8f62bb2b70ab5ca6ece66d4cdca3595dd6
703,557
def has_cycle_visit(visiting, parents, adjacency_list, s): """ Check if there is a cycle in a graph starting at the node s """ visiting[s] = True for u in adjacency_list[s]: if u in visiting: return True if u in parents: continue parents[u] = s if has_cycle_visit(visiting, parents, adjacency_list, u): return True del visiting[s] return False
5f87f6f39d88cbae93e7e461590002ca6a94aa20
703,558
def remove_trailing_whitespace(line): """Removes trailing whitespace, but preserves the newline if present. """ if line.endswith("\n"): return "{}\n".format(remove_trailing_whitespace(line[:-1])) return line.rstrip()
2c5f7b3a35152b89cda6645911569c9d2815f47e
703,559
import networkx def make_cross(length=20, width=2) -> networkx.Graph: """Builds graph which looks like a cross. Result graph has (2*length-width)*width vertices. For example, this is a cross of width 3: ... +++ +++ ...+++++++++++... ...+++++++++++... ...+++++++++++... +++ +++ ... :param length: Length of a cross. :param width: Width of a cross. """ assert width < length * 2 nodes = set() for i in range(length // 2, length // 2 + width): for j in range(0, length): nodes.add((i, j)) nodes.add((j, i)) nodes = list(nodes) node_index = {nodes[i]: i for i in range(len(nodes))} graph = networkx.Graph() graph.add_nodes_from(range(len(nodes))) for x, y in nodes: for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]: if (x + dx, y + dy) in node_index: graph.add_edge(node_index[(x, y)], node_index[(x + dx, y + dy)]) return graph
f0be683fd8971ea8b3cc0b40977a939c117e9906
703,561
def must_be_known(in_limit, out_limit): """ Logical combinatino of limits enforcing a known state The logic determines that we know that the device is fully inserted or removed, alerting the MPS if the device is stuck in an unknown state or broken Parameters ---------- in_limit : ``bool`` Whether the in limit is active out_limit: ``bool`` Whether the out limit is active Returns ------- is_known Whether the logical combination of the limit switch ensure that the device position is known """ return in_limit != out_limit
241b66b359643d069aa4066965879bb6ac76f2ae
703,564
def scale(x): """Scales values to [-1, 1]. **Parameters** :x: array-like, shape = arbitrary; unscaled data **Returns** :x_scaled: array-like, shape = x.shape; scaled data """ minimum = x.min() return 2.0 * (x - minimum) / (x.max() - minimum) - 1.0
e5c40a4a840a1fa178a2902378a51bfefc83b368
703,565
def GetStaticPipelineOptions(options_list): """ Takes the dictionary loaded from the yaml configuration file and returns it in a form consistent with the others in GenerateAllPipelineOptions: a list of (pipeline_option_name, pipeline_option_value) tuples. The options in the options_list are a dict: Key is the name of the pipeline option to pass to beam Value is the value of the pipeline option to pass to beam """ options = [] for option in options_list: if len(list(option.keys())) != 1: raise Exception('Each item in static_pipeline_options should only have' ' 1 key/value') option_kv = list(option.items())[0] options.append((option_kv[0], option_kv[1])) return options
d49effbdeb687ec62a2e2296330f66521023944c
703,567
import json def read_json(json_file_path: str) -> dict: """Takes a JSON file and returns a dictionary""" with open(json_file_path, "r") as fp: data = json.load(fp) return data
07cb6c606de83b2b51ddcbf64f7eb45d6907f973
703,572
def enthalpy_Shomate(T, hCP): """ enthalpy_Shomate(T, hCP) NIST vapor, liquid, and solid phases heat capacity correlation H - H_ref (kJ/mol) = A*t + 1/2*B*t^2 + 1/3*C*t^3 + 1/4*D*t^4 - E*1/t + F - H t (K) = T/1000.0 H_ref is enthalpy in kJ/mol at 298.15 K Parameters T, temperature in Kelvin hCP, A=hCP[0], B=hCP[1], C=hCP[2], D=hCP[3], E=hCP[4], F=hCP[5], H=hCP[7] A, B, C, D, E, F, and H are regression coefficients Returns enthalpy in kJ/mol at T relative to enthalpy at 298.15 K """ t = T/1000.0 return hCP[0]*t + hCP[1]/2*t**2 + hCP[2]/3*t**3 + hCP[3]/4*t**4 - hCP[4]/t + hCP[5] - hCP[7]
05e3f3a35a87f767a44e2e1f4e35e768e12662f7
703,577
def get_acres(grid, coordinates): """Get acres from coordinates on grid.""" acres = [] for row, column in coordinates: if 0 <= row < len(grid) and 0 <= column < len(grid[0]): acres.append(grid[row][column]) return acres
e4ba6aabe07d8859481aefaba4d4559f1ec25e96
703,578
def highest_mag(slide): """Returns the highest magnification for the slide """ return int(slide.properties['aperio.AppMag'])
d42361c979a5addf0ebf4c7081a284c9bc0477ec
703,579
import re def enumerate_destination_file_name(destination_file_name): """ Append a * to the end of the provided destination file name. Only used when query output is too big and Google returns an error requesting multiple file names. """ if re.search(r'\.', destination_file_name): destination_file_name = re.sub( r'\.', f'_*.', destination_file_name, 1) else: destination_file_name = f'{destination_file_name}_*' return destination_file_name
6b398597db26a175e305446ac39c363a5077ba96
703,581
def combine_envs(*envs): """Combine zero or more dictionaries containing environment variables. Environment variables later from dictionaries later in the list take priority over those earlier in the list. For variables ending with ``PATH``, we prepend (and add a colon) rather than overwriting. If you pass in ``None`` in place of a dictionary, it will be ignored. """ result = {} for env in envs: if env: for key, value in env.iteritems(): if key.endswith('PATH') and result.get(key): result[key] = '%s:%s' % (value, result[key]) else: result[key] = value return result
feb6e00b9c0b1262220339feac6c5ac2ae6b6b17
703,585
def pattern_matching(pattern, genome): """Find all occurrences of a pattern in a string. Args: pattern (str): pattern string to search in the genome string. genome (str): search space for pattern. Returns: List, list of int, i.e. all starting positions in genome where pattern appears as a substring. Examples: Find all the starting positions for a pattern string in the genome string. 1. >>> pattern = 'ATAT' >>> genome = 'GATATATGCATATACTT' >>> positions = pattern_matching(pattern, genome) >>> positions [1, 3, 9] 2. >>> pattern = 'CTTGATCAT' >>> genome = 'CTTGATCATCTTGATCATCTTGATCAT' >>> positions = pattern_matching(pattern, genome) >>> positions [0, 9, 18] """ positions = [] # output variable for i in range(len(genome)-len(pattern)+1): if genome[i:i+len(pattern)] == pattern: positions.append(i) return positions
86ae704586fbac937e044f41a8831f2669c4e7dc
703,594
import glob def findLblWithoutImg(pathI, pathII): """ :param pathI: a glob path. example: "D:/大块煤数据/大块煤第三次标注数据/images/*.jpg" :param pathII: a glob path. example: "D:/大块煤数据/大块煤第三次标注数据/labels/*.txt" :return: num of image which not has label """ num = 0 pathI = glob.glob(pathI) pathII = glob.glob(pathII) imgNames = [i.split("\\")[-1].split(".")[0] for i in pathI] lblNames = [i.split("\\")[-1].split(".")[0] for i in pathII] for lbl in lblNames: if lbl not in imgNames: print(lbl) num += 1 return num
30edbff244acb0014b54cf3c85d86a47d779d226
703,595
import math def lcf_float(val1, val2, tolerance): """Finds lowest common floating point factor between two floating point numbers""" i = 1.0 while True: test = float(val1) / i check = float(val2) / test floor_check = math.floor(check) compare = floor_check * test if val2 - compare < (tolerance / 2.0): return test i += 1.0
b4bef1a63984440f43a1b0aa9bf9805cb4bfd466
703,596
def _convert_text_to_logs_format(text: str) -> str: """Convert text into format that is suitable for logs. Arguments: text: text that should be formatted. Returns: Shape for logging in loguru. """ max_log_text_length = 50 start_text_index = 15 end_text_index = 5 return ( "...".join((text[:start_text_index], text[-end_text_index:])) if len(text) > max_log_text_length else text )
4f7ff95a5cd74bdccd41559f58292cc9b1003ba2
703,598
def digits_to_num(L, reverse=False): """Returns a number from a list of digits, given by the lowest power of 10 to the highest, or the other way around if `reverse` is True""" digits = reversed(L) if reverse else L n = 0 for i, d in enumerate(digits): n += d * (10 ** i) return n
1648073d46411fd430afea4f40f7fa9f4567793c
703,600
def parse_problems(lines): """ Given a list of lines, parses them and returns a list of problems. """ return [len(p) for p in lines]
83747e57bddf24484633e38ce27c51c7c8fce971
703,605
def create_class_hierarchy_dict(cls): """Returns the dictionary with all members of the class ``cls`` and its superclasses.""" dict_extension_order = cls.__mro__[-2::-1] # Reversed __mro__ without the 'object' class attrs_dict = {} for base_class in dict_extension_order: attrs_dict.update(vars(base_class)) return attrs_dict
0378fb3243a48964fcec42fddb38c0664a338ee4
703,608
def encode(string): """Encode some critical characters to html entities.""" return string.replace('&', '&amp;') \ .replace('<', '&lt;') \ .replace('>', '&gt;') \ .replace(' ', '&nbsp;&nbsp;') \ .replace('\n', '<br>') if string else ''
faea4b2f2e032b5e05e2c99bc56a1cf35e60b85d
703,610
def rem(x, a): """ x: a non-negative integer argument a: a positive integer argument returns: integer, the remainder when x is divided by a. """ if x == a: return 0 elif x < a: return x else: return rem(x-a, a)
22b421c090970810f9aa4d55ff5a700e1301c0b8
703,611
def identity(value): """ Node returning the input value. """ return value
e1af1346b11bc36a4426ad3c557dc01045657de8
703,616
import re def parse_members_for_workspace(toml_path): """Parse members from Cargo.toml of the worksapce""" with open(toml_path, mode='rb') as f: data = f.read() manifest = data.decode('utf8') regex = re.compile(r"^members\s*=\s*\[(.*?)\]", re.S | re.M) members_block = regex.findall(manifest)[0] out = [] members = members_block.split('\n') regex2 = re.compile(r'\s*"(.*?)".*') for mem in members: if (len(mem.strip()) == 0) or re.match(r".*#\signore", mem): continue out.append(regex2.findall(mem)[0]) return out
63926ac1eaadc360e2407f2fff5f41b7667e52b4
703,618
def decodeXMLName(name): """ Decodes an XML (namespace, localname) pair from an ASCII string as encoded by encodeXMLName(). """ if name[0] is not "{": return (None, name.decode("utf-8")) index = name.find("}") if (index is -1 or not len(name) > index): raise ValueError("Invalid encoded name: %r" % (name,)) return (name[1:index].decode("utf-8"), name[index+1:].decode("utf-8"))
28a81aed2477b444ac061b21ca838912ee2bf24b
703,620
def plural(items_or_count, singular: str, count_format='', these: bool = False, number: bool = True, are: bool = False) -> str: """Returns the singular or plural form of a word based on a count.""" try: count = len(items_or_count) except TypeError: count = items_or_count prefix = ('this ' if count == 1 else 'these ') if these else '' num = f'{count:{count_format}} ' if number else '' suffix = (' is' if count == 1 else ' are') if are else '' if singular.endswith('y'): result = f'{singular[:-1]}{"y" if count == 1 else "ies"}' elif singular.endswith('s'): result = f'{singular}{"" if count == 1 else "es"}' else: result = f'{singular}{"" if count == 1 else "s"}' return f'{prefix}{num}{result}{suffix}'
01f397579b60538f25710566507973bdc6422874
703,627
def role_object_factory(role_name='role.test_role'): """Cook up a fake role.""" role = { 'nameI18n': role_name[:32], 'active': 1 } return role
e79e7b22a83d9da721b074d32878eabeca4054cd
703,629
import itertools def create_possible_expected_messages(message_template, expected_types): """ Some error messages contain dictionaries with error types. Dictionaries in Python are unordered so it makes testing unpredictable. This function takes in a message template and a list of types and returns a list that contains error messages for every permutation of the types. Args: message_template (str): A string with a single format argument ({}). expected_types (list): A list of types. Ex: [str, int] Returns: list: A list of the given error message formatted with all permutations of the given type list. """ # Create an iterable of all the permutations of the types. # Can roughly be thought of as a list of tuples type_premutations = itertools.permutations(expected_types) # For each permutation, map each type in the permutation to a string # and remove '<' and '>' since this is how our exceptions format them. formatted_string_types = [ map(lambda s: str(s).replace('<', '').replace('>', ''), t) for t in type_premutations ] # Finally, create an error message with each permutation. return [ message_template.format(', '.join(f)) for f in formatted_string_types ]
674dec990e0786d3e313a9b8deec72e37588d8eb
703,630
from pathlib import Path def fp_search_rglob(spt_root="../", st_rglob='*.py', ls_srt_subfolders=None, verbose=False, ): """Searches for files with search string in a list of folders Parameters ---------- spt_root : string root folder to search in st_rglob : string search for files with this rglob string ls_srt_subfolders : :obj:`list` of :obj:`str` a list of subfolders of the spt_root to search in verbose: bool print details Returns ------- :obj:`list` of :obj:`str` A list of file names Examples -------- >>> ls_spn = fp_search_rglob(spt_root="../", >>> ls_srt_subfolders=['rmd', 'pdf'], >>> st_rglob = '*d*.py') [WindowsPath('../rmd/bookdownparse.py'), WindowsPath('../rmd/mattexmd.py'), WindowsPath('../rmd/rmdparse.py'), WindowsPath('../pdf/pdfgen.py')] """ # Directories to search in if ls_srt_subfolders is None: # ls_srt_subfolders = ['calconevar/', 'derivative/', 'derivative_application/', # 'matrix_basics/', # 'opti_firm_constrained/', 'opti_hh_constrained_brsv/', # 'opti_hh_constrained_brsv_inequality/'] # ls_srt_subfolders = ['matrix_basics/'] ls_spt = [spt_root] else: ls_spt = [spt_root + srt_subf for srt_subf in ls_srt_subfolders] if verbose: print(ls_spt) # get file names ls_spn_found_tex = [spn_file for spt_srh in ls_spt for spn_file in Path(spt_srh).rglob(st_rglob)] if verbose: print(ls_spn_found_tex) return ls_spn_found_tex
e8fe03f9549ae77147ec96263411db9273d2c4cb
703,636
def get_minimum(feature): """Get minimum of either a single Feature or Feature group.""" if hasattr(feature, "location"): return feature.location.min() return min(f.location.min() for f in feature)
aec98f5bc065c6a97d32bc9106d67579f76f7751
703,637
def get_player_actions(game, player): """ Returns player's actions for a game. :param game: game.models.Game :param player: string :rtype: set """ qs = game.action_set.filter(player=player) return set(list(qs.values_list('box', flat=True)))
9b961afbee7c3a0e8f44e78c269f37a9b6613488
703,643
def create_lkas_ui(packer, main_on, enabled, steer_alert): """Creates a CAN message for the Ford Steer Ui.""" if not main_on: lines = 0xf elif enabled: lines = 0x3 else: lines = 0x6 values = { "Set_Me_X80": 0x80, "Set_Me_X45": 0x45, "Set_Me_X30": 0x30, "Lines_Hud": lines, "Hands_Warning_W_Chime": steer_alert, } return packer.make_can_msg("Lane_Keep_Assist_Ui", 0, values)
16c226698160b14194daf63baf61bb3952e1cd59
703,644
import torch def get_pytorch_device() -> torch.device: """Checks if a CUDA enabled GPU is available, and returns the approriate device, either CPU or GPU. Returns ------- device : torch.device """ device = torch.device("cpu") if torch.cuda.is_available(): device = torch.device("cuda:0") return device
0109f3146c96bec08bbe6fa62e5a8bc5638e461c
703,645
import time import random def creat_order_num(user_id): """ 生成订单号 :param user_id: 用户id :return: 订单号 """ time_stamp = int(round(time.time() * 1000)) randomnum = '%04d' % random.randint(0, 100000) order_num = str(time_stamp) + str(randomnum) + str(user_id) return order_num
339764f7dc943c46d959a9bfdc6c48dc9ecd6bef
703,646
def get_rst_title_char(level): """Return character used for the given title level in rst files. :param level: Level of the title. :type: int :returns: Character used for the given title level in rst files. :rtype: str """ chars = (u'=', u'-', u'`', u"'", u'.', u'~', u'*', u'+', u'^') if level < len(chars): return chars[level] return chars[-1]
b646b9e0010d87ece7f5c8c53c8abf89ca557a21
703,647
def format_sqlexec(result_rows, maxlen): """ Format rows of a SQL query as a discord message, adhering to a maximum length. If the message needs to be truncated, a (truncated) note will be added. """ codeblock = "\n".join(str(row) for row in result_rows) message = f"```\n{codeblock}```" if len(message) < maxlen: return message else: note = "(truncated)" return f"```\n{codeblock[:maxlen - len(note)]}```{note}"
3b98590c72245241ba488e07fdfdd20acae441cf
703,652
import re def regex_sub_groups_global(pattern, repl, string): """ Globally replace all groups inside pattern with `repl`. If `pattern` doesn't have groups the whole match is replaced. """ for search in reversed(list(re.finditer(pattern, string))): for i in range(len(search.groups()), 0 if search.groups() else -1, -1): start, end = search.span(i) string = string[:start] + repl + string[end:] return string
67e909778d9565d498fc3e7b3c9522378e971846
703,653
def deregister_device(device): """ Task that deregisters a device. :param device: device to be deregistered. :return: response from SNS """ return device.deregister()
44bec0e3ac356f150a0c4a6a0632939a20caafb0
703,656
def sequential_colors(n): """ Between 3 and 9 sequential colors. .. seealso:: `<https://personal.sron.nl/~pault/#sec:sequential>`_ """ # https://personal.sron.nl/~pault/ # as implemented by drmccloy here https://github.com/drammock/colorblind assert 3 <= n <= 9 cols = ['#FFFFE5', '#FFFBD5', '#FFF7BC', '#FEE391', '#FED98E', '#FEC44F', '#FB9A29', '#EC7014', '#D95F0E', '#CC4C02', '#993404', '#8C2D04', '#662506'] indices = [[2, 5, 8], [1, 3, 6, 9], [1, 3, 6, 8, 10], [1, 3, 5, 6, 8, 10], [1, 3, 5, 6, 7, 9, 10], [0, 2, 3, 5, 6, 7, 9, 10], [0, 2, 3, 5, 6, 7, 9, 10, 12]] return [cols[ix] for ix in indices[n - 3]]
d2ad5f8993f8c7dac99a577b6115a8452ad30024
703,657
def box2start_row_col(box_num): """ Converts the box number to the corresponding row and column number of the square in the upper left corner of the box :param box_num: Int :return: len(2) tuple [0] start_row_num: Int [1] start_col_num: Int """ start_row_num = 3 * (box_num // 3) start_col_num = 3 * (box_num % 3) return start_row_num, start_col_num
b11e7d4317e1dd32e896f1541b8a0cf05a342487
703,661
import re def hashtag(phrase, plain=False): """ Generate hashtags from phrases. Camelcase the resulting hashtag, strip punct. Allow suppression of style changes, e.g. for two-letter state codes. """ words = phrase.split(' ') if not plain: for i in range(len(words)): try: if not words[i]: del words[i] words[i] = words[i][0].upper() + words[i][1:] words[i] = re.sub(r"['./-]", "", words[i]) except IndexError: break return '#' + ''.join(words)
b6e7ab647330a42cf9b7417469565ce5198edd4f
703,665
import re def is_ignored(filename, ignores): """ Checks if the filename matches any of the ignored keywords :param filename: The filename to check :type filename: `str` :param ignores: List of regex paths to ignore. Can be none :type ignores: `list` of `str` or None :rtype: `bool` """ return ignores and len([x for x in ignores if re.search(x, filename)]) > 0
3d5016d5ac86efdf9f67a251d5d544b06347a3bf
703,668
def flip_thetas(thetas, theta_pairs): """Flip thetas. Parameters ---------- thetas : numpy.ndarray Joints in shape (num_thetas, 3) theta_pairs : list List of theta pairs. Returns ------- numpy.ndarray Flipped thetas with shape (num_thetas, 3) """ thetas_flip = thetas.copy() # reflect horizontally thetas_flip[:, 1] = -1 * thetas_flip[:, 1] thetas_flip[:, 2] = -1 * thetas_flip[:, 2] # change left-right parts for pair in theta_pairs: thetas_flip[pair[0], :], thetas_flip[pair[1], :] = \ thetas_flip[pair[1], :], thetas_flip[pair[0], :].copy() return thetas_flip
e19a274953a94c3fb4768bcd6ede2d7556020ab2
703,672
def from_time (year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None): """ Convenience wrapper to take a series of date/time elements and return a WMI time of the form yyyymmddHHMMSS.mmmmmm+UUU. All elements may be int, string or omitted altogether. If omitted, they will be replaced in the output string by a series of stars of the appropriate length. @param year The year element of the date/time @param month The month element of the date/time @param day The day element of the date/time @param hours The hours element of the date/time @param minutes The minutes element of the date/time @param seconds The seconds element of the date/time @param microseconds The microseconds element of the date/time @param timezone The timeezone element of the date/time @return A WMI datetime string of the form: yyyymmddHHMMSS.mmmmmm+UUU """ def str_or_stars (i, length): if i is None: return "*" * length else: return str (i).rjust (length, "0") wmi_time = "" wmi_time += str_or_stars (year, 4) wmi_time += str_or_stars (month, 2) wmi_time += str_or_stars (day, 2) wmi_time += str_or_stars (hours, 2) wmi_time += str_or_stars (minutes, 2) wmi_time += str_or_stars (seconds, 2) wmi_time += "." wmi_time += str_or_stars (microseconds, 6) wmi_time += str_or_stars (timezone, 4) return wmi_time
61d2bf9fb36225990ac0ac9d575c3931ef66e9f6
703,674
def plural(num, one, many): """Convenience function for displaying a numeric value, where the attached noun may be both in singular and in plural form.""" return "%i %s" % (num, one if num == 1 else many)
f29753d25e77bcda2fb62440d8eb19d9bd332d1e
703,675
import math import time def test_query_retry_maxed_out( mini_sentry, relay_with_processing, outcomes_consumer, events_consumer ): """ Assert that a query is not retried an infinite amount of times. This is not specific to processing or store, but here we have the outcomes consumer which we can use to assert that an event has been dropped. """ request_count = 0 outcomes_consumer = outcomes_consumer() events_consumer = events_consumer() @mini_sentry.app.endpoint("get_project_config") def get_project_config(): nonlocal request_count request_count += 1 print("RETRY", request_count) return "no", 500 RETRIES = 1 query_timeout = 0.5 # Initial grace period # Relay's exponential backoff: INITIAL_INTERVAL = 1s; DEFAULT_MULTIPLIER = 1.5; for retry in range(RETRIES): # 1 retry query_timeout += 1 * 1.5 ** (retry + 1) relay = relay_with_processing( {"limits": {"query_timeout": math.ceil(query_timeout)}} ) try: relay.send_event(42) time.sleep(query_timeout) outcomes_consumer.assert_dropped_internal() assert request_count == 1 + RETRIES for (_, error) in mini_sentry.test_failures[:-1]: assert isinstance(error, AssertionError) assert "error fetching project states" in str(error) _, last_error = mini_sentry.test_failures[-1] assert "failed to resolve project information" in str(last_error) finally: mini_sentry.test_failures.clear()
9339078c432cd087dcdbf799cad8d881defb41c2
703,676
def _to_bytes(str_bytes): """Takes UTF-8 string or bytes and safely spits out bytes""" try: bytes = str_bytes.encode('utf8') except AttributeError: return str_bytes return bytes
fd16c24e80bdde7d575e430f146c628c0000bf9a
703,678
def determine_host(environ): """ Extract the current HTTP host from the environment. Return that plus the server_host from config. This is used to help calculate what space we are in when HTTP requests are made. """ server_host = environ['tiddlyweb.config']['server_host'] port = int(server_host['port']) if port == 80 or port == 443: host_url = server_host['host'] else: host_url = '%s:%s' % (server_host['host'], port) http_host = environ.get('HTTP_HOST', host_url) if ':' in http_host: for port in [':80', ':443']: if http_host.endswith(port): http_host = http_host.replace(port, '') break return http_host, host_url
6e91f93d5854600fe4942f093de593e53aaf2aa0
703,680
def read_messages(message_file): """ (file open for reading) -> list of str Read and return the contents of the file as a list of messages, in the order in which they appear in the file. Strip the newline from each line. """ # Store the message_file into the lst as a list of messages. lst = message_file.readlines() # Strip the newline from each string in the list. for i in range(len(lst)): lst[i] = lst[i].strip() return lst
0e4b1a6995a6dd25ab3783b53e730d0dd446747c
703,681
def cradmin_titletext_for_role(context, role): """ Template tag implementation of :meth:`django_cradmin.crinstance.BaseCrAdminInstance.get_titletext_for_role`. """ request = context['request'] cradmin_instance = request.cradmin_instance return cradmin_instance.get_titletext_for_role(role)
8e6a29c369c5ae407701c12dc541e82dda31f193
703,684