content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os import sys import subprocess def get_git_sha1(): """Try to get the git SHA1 with git rev-parse.""" git_dir = os.path.join(os.path.dirname(sys.argv[0]), '..', '.git') try: git_sha1 = subprocess.check_output([ 'git', '--git-dir=' + git_dir, 'rev-parse', 'HEAD', ], stderr=open(os.devnull, 'w')).decode("ascii") except: # don't print anything if it fails git_sha1 = '' return git_sha1
38e1bb662229939f5f1aeec396e4c9d11c93bb73
701,522
def zip_tasks_verbose_output(table, stdstreams): """Zip a list of strings (table) with a list of lists (stdstreams) :param table: a formatted list of tasks :param stdstreams: for each task, a list of lines from stdout/stderr tail """ if len(table) != len(stdstreams): raise ValueError('Can only zip same-length lists') output = [] for i in range(len(table)): output.append(table[i]) output.extend([line for line in stdstreams[i]]) return output
33d74cd274ec39330cbc127a3088a430b80d234a
701,523
from typing import Union def is_chinese(x: Union[int, str]) -> bool: """Recognizes whether the server/uid is chinese :param x: A server or a genshin uid """ return str(x).startswith(("cn", "1", "2", "5"))
e63d7e98a3dcdaeb9f9853715dcd8f466d9a293b
701,524
def get_bridge(ip): """The name to be used for the bridge interface connecting the VMs and the host.""" return "spirebr%s" % ip.packed.hex().upper()
898347580052cce492bb12d60668fc7661a7f811
701,525
import pickle def try_deserialize_handler(serialized_handler): """Reverse function of try_serialize_handler. Args: serialized_handler: serialized handler str or None. Returns: handler instance or None. """ if serialized_handler: return pickle.loads(serialized_handler)
bc91e26c65add4e74affd148b8ed550fe923c925
701,526
def hardwareVersionToString (hwversion): """ Converts a raw integer value into a human readable string a.b.c.d. :param int hwversion: raw value as received from the generator :return str: a human readable string 'a.b.c.d'. """ if ((hwversion >> 30) & 1) != 0: # new format 30-22 + 21-16 # mask here with 0xFF instead of 0x3FF to ignore the first two bits a = (hwversion >> 22) & 0xFF # this one should always be 17 b = (hwversion >> 16) & 0x3F c = (hwversion >> 8) & 0xFF d = (hwversion & 0xFF) else: # old format a = 2000 + ((hwversion >> 26) & 0x3F) # 2000 + first 6 bits (MSB) b = (hwversion >> 22) & 0x0F # 4 next bits c = (hwversion >> 16) & 0x3F # 6 next bits d = (hwversion & 0xFFFF) # last 16 bits (LSB) return "%d.%d.%d.%d" % (a, b, c, d)
1f1fab23706e05fa593ef4cf56f3ec4e1a6f4c6f
701,527
def get_light_threshold(response): """Get light from response.""" light_threshold = response.get("highlight") return light_threshold
602d3b11fbabfa6c8b0d284a21b4b1ebb816d327
701,528
def analyze_group(group, verbosity, error_level): """analyze AD group object group -- group object to be analyzed verbosity - NAGIOS verbosity level - ignored error_level - ignored Returns the Nagios error code (always 0) and error message (group defn) """ return 0, '{name:%s, description:%s, objectId:%s, Security enabled:%r, '\ 'Dirsync enabled:%r, mail enabled:%r}' % (group['displayName'], group['description'], group['objectId'], group['securityEnabled'], group['dirSyncEnabled'], group['mailEnabled'])
868746cedd1815df1b9188b02671c3d5431bf83b
701,529
import re def search_bad_symbols_and_username(username): """Поиск зарезервированных слов и запрещенных символов""" search_bad_symbols_result = 'No' bad_username = ['root', 'admin', 'moderator', 'support', 'supports', 'helpdesk'] bad_symbols = ['&', ' ', '=', '+', '<', '>', ',', '.', '\"', '\'', '?', '!', ':', ';', '/', '\\', '|', '#', '|', '^'] if bool(re.search('[а-яА-Я]', username)): search_bad_symbols_result = f'Ошибка! В поле Login нельзя использовать русские буквы' if search_bad_symbols_result == 'No': for el in bad_username: if el == username: search_bad_symbols_result = f'Ошибка! Нельзя использовать {username} - зарезервированное имя!' break if search_bad_symbols_result == 'No': for el in bad_symbols: if str(username).find(el) != -1: search_bad_symbols_result = f'Ошибка! В поле Login найден запрещенный символ ({el})' break return search_bad_symbols_result
79beccff28ce3c319698e47e05353e1c8354522c
701,530
import fnmatch def FilterMatchesTest(filter_string, test_string): """Does something close enough to base/strings/pattern.h's MatchPattern() for our purposes here.""" return fnmatch.fnmatch(test_string, filter_string)
9f4cca026e60fe1b3fb0c2858477090fdf03c44e
701,531
import code def main(_): """Run an interactive console.""" code.interact() return 0
e85a21c0197a599378c3f25022ec99cb557d6017
701,532
import heapq def heapmerge(*inputs): """Like heapq.merge(), merges multiple sorted inputs (any iterables) into a single sorted output, but provides more convenient API: each input is a pair of (iterable, label) and each yielded result is a pair of (item, label of the input) - so that it's known what input a given item originates from. Labels can be any objects (e.g., object that produced the input stream).""" def entries(iterable, label): for obj in iterable: yield (obj, label) iterables = [entries(*inp) for inp in inputs] return heapq.merge(*iterables)
580ec9f2f0793f8907390f5c7f8eebf4ac539b59
701,533
import ast def get_version_from_module(content: str) -> str: """Get the __version__ value from a module.""" # adapted from setuptools/config.py try: module = ast.parse(content) except SyntaxError as exc: raise IOError(f'Unable to parse module: {exc}') try: return next( ast.literal_eval(statement.value) for statement in module.body if isinstance(statement, ast.Assign) for target in statement.targets if isinstance(target, ast.Name) and target.id == '__version__' ) except StopIteration: raise IOError('Unable to find __version__ in module')
bce0932d487bb778fd9dba5cbd0ae15c5f888fa6
701,534
def read_parameters_start(config): """ Read info on starting parameter values. The distributions can be 'gauss', 'uniform', or 'log-uniform'. """ section = 'EMCEE starting' parameters_to_fit = [var for var in config[section]] starting = {} for param in parameters_to_fit: words = config.get(section, param).split() starting[param] = [words[0]] + [float(word) for word in words[1:]] return (parameters_to_fit, starting)
4dc2c3660f0de3b96eeadb4b792c2ff40fab6822
701,536
import mimetypes def bundle_media_description(key, filename): """Bundle the media description necessary for uploading. :param key: form-data key name :param filename: Local file name or path. :return: tuple of ('key name', ('file name', 'file object', 'MIME content-type') :rtype: tuple """ content_type, _ = mimetypes.guess_type(filename) media_description = (key, (filename, open(filename, 'rb'), content_type)) return media_description
8c160a9c767d86a1c1867d22f018d6342239e68d
701,537
def format_write_request(address, value): """ Format a write request based on an address and the value to write to the FPGA. :param address: address at which to write date. :param value: data to write to the address. :return: formatted request. """ if address >= 2**(4 * 8): raise ValueError(f'Address {address} is too large (max 4 bytes).') if address < 0: raise ValueError(f'Address {address} cannot be negative.') if value >= 2**(4 * 8): raise ValueError(f'Value {value} is too large (max 4 bytes).') if value < 0: raise ValueError(f'Address {address} cannot be negative.') buff = bytearray(9) buff[0] = 1 << 7 buff[1:] = int.to_bytes(address, length=4, byteorder="little") buff[5:] = int.to_bytes(value, length=4, byteorder="little") return buff
8425b7ff3422162cb0127c64069dbf68a414becf
701,538
def tangent_basis_transposed(v): """ """ return v[:,:-1,...] - v[:,-1:,...]
e2cd1c0c398ba58e76541aa9976bae64cdecf339
701,539
from datetime import datetime def create_envelope(payload: dict) -> dict: """ Creates a dictionary with event label, timestamp and message field :param payload: The payload dict :return: An event message dictionary """ payload['timestamp'] = datetime.utcnow().timestamp() if 'event' not in payload: payload['event'] = 'cs.unknown' if 'message' not in payload: payload['message'] = None return payload
593a2b60a667dece41f10031d13ad98018d9f881
701,540
import os def os_path_separators(): """ list path spearators of current OS """ seps = [] for sep in os.path.sep, os.path.altsep: if sep: seps.append(sep) return seps
e3e18f5a77bbb6c76e1e7d602f1c32a36bbb237d
701,541
import re def prepare_template_data(fill_pairs): """ Prepares formatted data for filling template. It produces mutliple variants of keys (key, Key, KEY) to control format of filled template. Args: fill_pairs (iterable) of tuples (key, value) Returns: (dict) ('host', 'maya') > {'host':'maya', 'Host': 'Maya', 'HOST': 'MAYA'} """ fill_data = {} regex = re.compile(r"[a-zA-Z0-9]") for key, value in dict(fill_pairs).items(): # Handle cases when value is `None` (standalone publisher) if value is None: continue # Keep value as it is fill_data[key] = value # Both key and value are with upper case fill_data[key.upper()] = value.upper() # Capitalize only first char of value # - conditions are because of possible index errors # - regex is to skip symbols that are not chars or numbers # - e.g. "{key}" which starts with curly bracket capitalized = "" for idx in range(len(value or "")): char = value[idx] if not regex.match(char): capitalized += char else: capitalized += char.upper() capitalized += value[idx + 1:] break fill_data[key.capitalize()] = capitalized return fill_data
a6082093bcbe39ba429decd2735fb2797c6c11dd
701,542
from pathlib import Path from typing import Callable from typing import Any def readFileLines(filepath: Path, f: Callable[[str], Any] = str) -> list: """Reads the lines in a file Args: filepath (Path): The path of the file to be read. f (Callable[[str], Any], optional): Transformation for the lines. Defaults to `str`. Returns: list: list with the lines, with the defined transformation applied. """ lines = None with open(filepath, 'r') as file: lines = [] # `readline` reads one line (better do it like this for large files) while (line := file.readline()): lines.append(f(line)) # We want integer numbers return lines
21df2445f6132085d7da36276f371170ff0f2a4e
701,543
def get_default(arr, idx, default_value): """get arr[idx] or return default_value """ try: return arr[idx] except IndexError: return default_value
038b943da7fa1d36038444880264160da8e031f4
701,544
def second(xs): """ Returns the second element of a list, or None if the list is empty """ if not xs: return None return xs[1]
e5a915116d61e01842f86623aafcf6e2e6c8b5a3
701,545
import logging def get_logger(module: str, file: str) -> logging.Logger: """Configure a file logger for use in a script. Parameters ---------- module : str The name of the module from which the logger is called file : str The name of the log file to which the logger will write Returns ------- logging.Logger The configured logger instance. """ handler = logging.FileHandler(file) formatter = logging.Formatter( "{asctime} :: {levelname} :: {name} :: {message}", style="{" ) handler.setLevel(logging.INFO) handler.setFormatter(formatter) logging.basicConfig(level=logging.INFO, handlers=[handler]) logger = logging.getLogger(module) return logger
c9a68d216ca9a04ccb208dd546621e051dd37e36
701,546
def filled_grasp_grid(empty_grasp_grid, grid_file): """Return a GraspGrid instance filled from the grid_file.""" empty_grasp_grid.read(grid_file) grid_file.close() return empty_grasp_grid
97ed623c09b9d42642a208451b004e5bec4910ed
701,547
import pickle def get_camera_params(): """ Return a default camera parameter matrix """ pickle_in = open("config.pickle", "rb") data_dict = pickle.load(pickle_in) return data_dict['mtx'] # return np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])
51292054e8db77058ea37b2dbe06bf105183969b
701,548
def check_letters(): """ Check if all letters from input is in other string """ word = input("Give me a word: ").lower() letters = input("Give letters to check for in word: ") for l in letters: if l not in word: print("No match") return 0 print("Match")
4c419e5c3c70c383d06079ead7e105b38e6ca91e
701,549
def get_1d_coords(p, i, j): """ Finds index of site in 1d chain from 2d lattics based on snake decomposition, ie 1d coords on 2d lattice look like: 0, 1, 2, 3, 7, 6, 5, 4, 8, 8, 10, 11 Args: p - dictionary that contains the relevant system parameters i - row index on 2d lattice j - column index on 2d lattice Returns: reshaped_i - index of site on 1d chain """ if i % 2 == 0: reshaped_i = i * p['W'] + j else: reshaped_i = i * p['W'] + (p['W'] - j - 1) return reshaped_i
2852ec9d57c99f922380fc8b671bdd757e8decf4
701,550
import json def abrir_freq(file): """ Abre o ficheiro json com o dicionário das frequências das regras. :param file: ficheiro json :return: dicionário com as frequências das regras no corpus. """ with open(file) as f: freq_svo_dict = json.load(f) return freq_svo_dict
b1cbb7c98146cf72390b511428d7734881f0b944
701,551
def transpone(l): """transpones a list of multiple lists if the entrys do not have the same length, an empty string will be put in their place""" nestedlist=[] length=[] transponedlist=[] for i in range(len(l)): length.append(len(l[i])) for i in range(max(length)): for j in range(len(l)): try: nestedlist.append(l[j][i]) except: nestedlist.append("") transponedlist.append(nestedlist) nestedlist=[] return(transponedlist)
27d904910be743d4116fba5ba7479cfdc62ec18b
701,552
def get_minsep_range(minseps, cap=None): """ Create ranged minseps from an ensemble of minsep entries Args: minseps (list): A list of minsep dictionaries cap (tuple): Minimum-maximum caps Returns: minsep (dict): A minsep where values are minimum and maximum values """ base = {key: [value, value] for key, value in minseps[0].items()} for minsep in minseps: for key, value in minsep.items(): # If the key exists (it should) if key in base: existing = base[key] # Expand the minimum and maximum values if cap and (value < cap[0]): existing[0] = cap[0] elif cap and (value > cap[1]): existing[1] = cap[1] elif existing[0] > value: existing[0] = value elif existing[1] < value: existing[1] = value # add the pairs if needed else: base[key] = [value, value] return base
e60392fa70f7f2b0989f4894ce90c7bd78aac7d2
701,553
import itertools def group_by_day(list_all_objects): """ Recebe uma lista de objetos onde cada um deles possui um atributo tipo date e logo ordena eles por dia. :param: list_all_objects: Register.objects.all(): django.db.models.query.QuerySet :return: objects_day_ordered: [[objetos_do_dia_1][objetos_do_dia_2][objetos_do_dia_3]]: list of lists """ key_func = lambda x: x.date.day objects_day_ordered = [] for key, group in itertools.groupby(list_all_objects, key_func): objects_day_ordered.insert(0, list(group)) return objects_day_ordered
39feb54a2232e9e7a0965b72f2c6eb2509c7b610
701,554
def datetime_to_iso(date, only_date=True): """ Convert datetime format to ISO 8601 time format This function converts a date in datetime instance, e.g. ``datetime.datetime(2017,9,14,0,0)`` to ISO format, e.g. ``2017-09-14`` :param date: datetime instance to convert :type date: datetime :param only_date: whether to return date only or also time information. Default is ``True`` :type only_date: bool :return: date in ISO 8601 format :rtype: str """ if only_date: return date.isoformat().split('T')[0] return date.isoformat()
676a7e65de2c9e4de60cfe8d832ab342aa46af4f
701,555
def predict_hot_pxl(sqr, model): """takes a numpy array (colour image) size 64x64 and a ML model, returnes float number between 0 - 1 for hot pixel detection, for 0 no hot pixel detected, 1 hot pixel detected""" predict = model.predict(sqr.reshape(1,64,64,3)) y_pred = [i[0] for i in predict] return y_pred
e533c2b27df280f0f62b2727b1dd960a828fdee0
701,556
import traceback def write_to_file(file_name, dir_path, content): """ Rewrite or create and write to file. Parameters: file_name (string): file name dir_path (string): root path which file contains content (string): content to be written Returns: file_path (string): copied destination path """ file_path = ''.join([dir_path, '/', file_name]) try: with open(file_path, 'w') as file_object: file_object.write(content) return file_path except Exception: print(traceback.format_exc())
b3ade139d6455f57e54d3ae03ab4988dc5718644
701,557
def descendants(node, lst=None): """Return a list of all the descendants beneath a node""" if lst is None: lst = [] for child in node.children: lst.append(child) descendants(child, lst=lst) return lst
5fe6fb9d9fbfd63bbeb161fdbe1d0e54d20edf9e
701,558
import argparse def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description='Faster R-CNN demo') parser.add_argument('jsonFile', help='detections json file') parser.add_argument('-o', dest='resFile', type=str, default='dummpy.npy', help='detections json file') parser.add_argument('--imdb', dest='imdb_name', type=str, default='coco_2014_train', help='Which database was this run on') parser.add_argument('--nsave', dest='nsave', type=int, default=100, help='How many images to write into pdf') parser.add_argument('--pdfname', dest='pdfappend', type=str, default='dummy', help='str to append to the pdfName') parser.add_argument('--labels', dest='labels', type=str, default='/projects/databases/coco/labels.txt', help='str to append to the pdfName') parser.add_argument('--featfromlbl', dest='featfromlbl', type=str, default='', help='should we use lables.txt, if yes which feature?') parser.add_argument('--usecococls', dest='usecococls', type=int, default=1, help='wether to use coco class') parser.add_argument('--grid', dest='grid', type=int, default=4, help='grid size, gxg') parser.add_argument('--gridtype', dest='gridtype', type=str, default='reg', help='grid type, reg or rect') parser.add_argument('--use_gauss_weight', dest='use_gauss_weight', type=int, default=0, help='grid size, gxg') parser.add_argument('--scale_by_det', dest='scale_by_det', type=int, default=1, help='grid size, gxg') parser.add_argument('--dump_class_only', dest='dump_class_only', type=int, default=0, help='grid size, gxg') parser.add_argument('--appendtofeat', dest='appendtofeat', type=str, default=None, help='add features to already existing file') args = parser.parse_args() return args
3e270f034e9b4352dc6bc3254808a8b719655dd6
701,560
import requests def client(api_client): """Returns HttpClient instance with retrying feature skipped.""" api_client = api_client(disable_retry_status_list={503, 404}) # This won't work with Httpx but will save us about 10 mins for this test alone api_client.session = requests.Session() return api_client
615756a81fd243d4d7495bafca4538e19775f7b4
701,561
def create_elem_dict(row): """ Create new element dictionary from row with metadata common to all nodes/ways/relations. """ elem = { 'id': row.id, 'version': row.version, 'userId': row.user_id, 'userName': row.user_name, 'timestamp': row.timestamp, 'tags': row.tags, } if row.add: elem['added'] = True if row.delete: elem['deleted'] = True return elem
d97b712cd4b0bc6e79f5aa09f212491d715f1c69
701,562
from typing import Dict from typing import Any from typing import Tuple from typing import Optional def source_ip_and_reverse_dns( message: Dict[str, Any]) -> Tuple[Optional[str], Optional[str]]: """ Extract the source IP and reverse DNS information from a canary request. """ reverse_dns, source_ip = (None, None) if 'SourceIP' in message: source_ip = message['SourceIP'] # `ReverseDNS` can sometimes exist and still be empty. if 'ReverseDNS' in message and message['ReverseDNS']: reverse_dns = message['ReverseDNS'] return (source_ip, reverse_dns)
df2a9b6c4a177073fc019e88c33234f4d6124ccb
701,563
def fCO2_to_CO2(fCO2, Ks): """ Calculate CO2 from fCO2 """ return fCO2 * Ks.K0
2b71b46147291e7fffb99d51d0acb59ea4cd0c69
701,564
def check_if_in_team(api, team_id, person): """ Checks if a person is in a given team :param api: CiscoSparkAPI instance to query Spark with. :param team_id: The ID of the team to check for :param person: The person to check against the team """ team_memberships = api.team_memberships.list(team_id) # Check every membership to see if this person is contained within for membership in team_memberships: if person.id == membership.personId: return True return False
b20a5ee41485b2dd397c7dd23407ef95d0f34e4a
701,566
def group_per_category(bkgs): """ Groups a flat list of datasets into sublists with the same category E.g. [ ttjet, ttjet, ttjet, qcd, qcd ] --> [ [ttjet, ttjet, ttjet], [qcd, qcd] ] """ cats = list(set(bkg.get_category() for bkg in bkgs)) cats.sort() return [ [ b for b in bkgs if b.get_category() == cat ] for cat in cats ]
c331456b1b3066f667c94e6ed7d00c8421ec2160
701,567
def subtract_params(param_list_left: list, param_list_right: list): """Subtract two lists of parameters :param param_list_left: list of numpy arrays :param param_list_right: list of numpy arrays :return: list of numpy arrays """ return [x - y for x, y in zip(param_list_left, param_list_right)]
f8563cae337af0e30621428103afa10bde614e93
701,568
def _context_license_spdx(context, value): """convert a given known spdx license to another one""" # more values can be taken from from https://github.com/hughsie/\ # appstream-glib/blob/master/libappstream-builder/asb-package-rpm.c#L76 mapping = { "Apache-1.1": "ASL 1.1", "Apache-2.0": "ASL 2.0", "BSD-3-Clause": "BSD", "GPL-1.0+": "GPL+", "GPL-2.0": "GPLv2", "GPL-2.0+": "GPLv2+", "GPL-3.0": "GPLv3", "GPL-3.0+": "GPLv3+", "LGPL-2.1": "LGPLv2.1", "LGPL-2.1+": "LGPLv2+", "LGPL-2.0": "LGPLv2 with exceptions", "LGPL-2.0+": "LGPLv2+ with exceptions", "LGPL-3.0": "LGPLv3", "LGPL-3.0+": "LGPLv3+", "MIT": "MIT with advertising", "MPL-1.0": "MPLv1.0", "MPL-1.1": "MPLv1.1", "MPL-2.0": "MPLv2.0", "OFL-1.1": "OFL", "Python-2.0": "Python", } if context['spec_style'] == 'fedora': return mapping[value] else: # just use the spdx license name return value
3ad3b91bf7db36a9be3751877e248ed481d1be71
701,569
def html_header(): """ Global & common html header. SHould be used everywhere Returns: -------- out: str """ return """ <!DOCTYPE html> <head> <link rel="stylesheet" type="text/css" href="css/finkstyle.css"> <title>Mon programme test</title> </head> <body> <div class="hero-image"> <div class="hero-text"> <h1 style="font-size:50px">Fink</h1> <h3>Alert dataset monitor</h3> <div class="topnav"> """
7d4a7571be9a269927ec29a0d38daa950f1404b6
701,570
def indent(element, level=0): """ Indents an XML root object from ElementTree. Parameters ---------- element : ElementTree.Element The XML root object to manipulate and indent. level : int, optional I guess at which level of indentation it should start. Can be ignored and used with default value of 0. Returns ------- ElementTree.Element Same object as input, but indented. Notes ----- This function is not created by me. The original source is: https://effbot.org/zone/element-lib.htm#prettyprint 2004 by Fredrik Lundh """ i = '\n' + level * ' ' # if len(element) if element: if not element.text or not element.text.strip(): element.text = i + ' ' if not element.tail or not element.tail.strip(): element.tail = i for element in element: indent(element, level + 1) if not element.tail or not element.tail.strip(): element.tail = i else: if level and (not element.tail or not element.tail.strip()): element.tail = i return element
1f0049077d567308aa8f4e8018846b1dd57f957e
701,571
import torch def train_collate_fn(batch): """ # collate_fn这个函数的输入就是一个list,list的长度是一个batch size,list中的每个元素都是__getitem__得到的结果 """ imgs, pids, _, _, = zip(*batch) pids = torch.tensor(pids, dtype=torch.int64) return torch.stack(imgs, dim=0), pids
81f653acbf9c9643289416b70dcba08e002d2613
701,572
def index(): """ Basic index route. """ return {"msg": "Hi! This is an API by Mariia Sizova"}
211f24da5736e57216f160db39af0590f8eb2c4f
701,573
import torch def pretty_size(size): """ Pretty prints a torch.Size object By user machinethink: https://forums.fast.ai/t/gpu-memory-not-being-freed-after-training-is-over/10265/7 """ assert(isinstance(size, torch.Size)) return " × ".join(map(str, size))
006ae05ce22653bfe58a5791aa5912de7283d9ca
701,574
from typing import Tuple def convert_time(time: str, ampm: str) -> Tuple[int, int]: """Convert time given "HH:MM" to 24h format. Args: time (str): a time like "12:00" without ampm ampm (str): either "am" or "pm" Returns: Tuple[int, int]: (hour, minute) in 24h time format """ hour, minute = [int(n) for n in time.split(':')] hour %= 12 if ampm == 'pm': hour += 12 return hour, minute
b1bd57ea92e82ba629e3ad2733f173dfcc805e9e
701,575
def column_equality(series, col1, col2, comparison='equal', pos_return_val=1, neg_return_val=0): """ Apply to a dataframe row to return a binary feature depending on equality or inequality E.g. df.apply(lambda s: column_match(s, 'day_of_week', 'day_of_sale'), axis=1) to for matching the two. Result is series of positive_return_vals and neg_return_vals. Defaults to """ if comparison == 'equal': if series[col1] == series[col2]: return pos_return_val else: return neg_return_val if comparison == 'unequal': if series[col1] != series[col2]: return pos_return_val else: return neg_return_val
9ec71f5fd3af4a8d89b4cd58a255065ec8352eb2
701,576
import math def get_tile_from_lon_lat(lon: float, lat: float, zoom: int) -> tuple[int, int]: """ Turns a lon/lat measurement into a Slippy map tile at a given zoom. """ # Clamps lon, lat to proper mercator projection values lat = min(lat, 85.0511) lat = max(lat, -85.0511) lon = min(lon, 179.9999) lon = max(lon, -179.9999) lat_rad = math.radians(lat) n = 2.0 ** zoom xtile = int((lon + 180.0) / 360.0 * n) ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n) return xtile, ytile
cdd542c8a362d54dccb8760278b22a17f5df57f9
701,577
def simpson_integral(f, a, b): """辛普森求积公式 """ return (b - a) * (f(a) + 4 * f((a + b) / 2) + f(b)) / 6
572e7af1137ed0f7b6be12f2869a0aa5ba123f85
701,578
def get_cincinnati_channels(major, minor): """ :param major: Major for release :param minor: Minor version for release. :return: Returns the Cincinnati graph channels associated with a release in promotion order (e.g. candidate -> stable) """ major = int(major) minor = int(minor) if major != 4: raise IOError('Unable to derive previous for non v4 major') prefixes = ['candidate', 'fast', 'stable'] if major == 4 and minor == 1: prefixes = ['prerelease', 'stable'] return [f'{prefix}-{major}.{minor}' for prefix in prefixes]
e57ad8d26ea0a397e8c3f9edc99174f78b506564
701,580
def lagrange_four_point(x, y0, y1, y2, y3): """The third order polynomial p(x) with p(-1)=y0, p(0)=y1, p(1)=y2, p(2)=y3.""" a2 = 3 * (y0 + y2 - y1 - y1) a3 = 3 * (y1 - y2) + y3 - y0 a1 = -a3 + 3 * (y2 - y0) return y1 + x * (a1 + x * (a2 + x * a3)) * 0.166666666666666666666666
b60da1f8567c5b9babbc9e158b1444e30424bb1f
701,581
def capitalize_tag_kv(service: str) -> bool: """ Returns true or false depending on if the boto3 service name needs the key & value values capitalized """ return service in ("ec2", "iam", "ssm")
7a6f16be26fa684f6e906c5f8c5d0a3f87f95b63
701,582
import resource def health(): """Our instance health. If queue is too long or we use too much mem, return 500. Monitor might reboot us for this.""" is_bad_state = False msg = "Ok" stats = resource.getrusage(resource.RUSAGE_SELF) mem = stats.ru_maxrss if mem > 1024**3: is_bad_state = True msg = "Over memory " + str(mem) + ">" + str(1024**3) if is_bad_state: return msg, 500 return msg, 200
98570ba02d6e2438ab2be797c7d2c5862d8dabb9
701,583
import os def _count_files(path): """Returns number of files in a given directory.""" return len([filename for filename in os.listdir(path) if os.path.isfile(os.path.join(path, filename))])
1e26135aa64c3569f71913f73a26a73639a08eb7
701,585
import uuid def create_filename(prefix='', ext=''): """ Create a unique filename. :param str prefix: Prefix to add to filename. :param str ext: Extension to append to filename, e.g. 'jpg' :return: Unique filename. :rtype: str """ suffix = '.' + ext if ext else '' return prefix + str(uuid.uuid4()) + suffix
30cedc7bdcf3fdbf202b8a0d26e64bd6f865094d
701,586
def StressngCustomStressorsValidator(stressors): """Returns whether or not the list of custom stressors is valid.""" valid_stressors = { 'affinity', 'af-alg', 'aio', 'aio-linux', 'apparmor', 'bigheap', 'brk', 'bsearch', 'cache', 'chdir', 'chmod', 'clock', 'clone', 'context', 'cpu', 'cpu-online', 'crypt', 'daemon', 'dentry', 'dir', 'dup', 'epoll', 'eventfd', 'exec', 'fallocate', 'fault', 'fcntl', 'fiemap', 'fifo', 'filename', 'flock', 'fork', 'fp-error', 'fstat', 'futex', 'get', 'getrandom', 'getdent', 'handle', 'hdd', 'heapsort', 'hsearch', 'icache', 'iosync', 'inotify', 'itimer', 'kcmp', 'key', 'kill', 'klog', 'lease', 'link', 'lockbus', 'lockf', 'longjmp', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'mknod', 'mlock', 'mmap', 'mmapfork', 'mmapmany', 'mremap', 'msg', 'mq', 'nice', 'null', 'numa', 'oom-pipe', 'open', 'personality', 'pipe', 'poll', 'procfs', 'pthread', 'ptrace', 'qsort', 'quota', 'rdrand', 'readahead', 'remap-file-pages', 'rename', 'rlimit', 'seccomp', 'seek', 'sem-posix', 'sem-sysv', 'shm-posix', 'shm-sysv', 'sendfile', 'sigfd', 'sigfpe', 'sigpending', 'sigq', 'sigsegv', 'sigsuspend', 'sleep', 'socket', 'socket-fd', 'socket-pair', 'spawn', 'splice', 'stack', 'str', 'stream', 'switch', 'symlink', 'sync-file', 'sysinfo', 'sysfs', 'tee', 'timer', 'timerfd', 'tsc', 'tsearch', 'udp', 'udp-flood', 'unshare', 'urandom', 'userfaultfd', 'utime', 'vecmath', 'vfork', 'vm', 'vm-rw', 'vm-splice', 'wait', 'wcs', 'xattr', 'yield', 'zero', 'zlib', 'zombie' } return valid_stressors.issuperset(set(stressors))
7563d0a8829aa323e53ff807540cdf281b7df883
701,587
import os def list_files(directory, hidden=False): """Return a list of files in a directory.""" files = [] for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)) and ( hidden or not item.startswith(".") ): files.append(item) return files
c9e1ef221d773dec94acc7997501d17b99700b8d
701,588
def getSentenceList(row, field): """ return list of sentences from doc object field; each item will be token span """ return list(row[field].sents)
ecaa1159e6a9390ef2aa310741aae4a58105b8a9
701,589
def std_secao_filter(secao_list): """ Takes a words list from a secao filter and standardize the words to the same pattern as the one used to download the articles' URLs: extra -> e and suplemento -> a. """ return [str(s).lower().replace('extra','e').replace('suplemento','a') for s in secao_list]
6c5b1a52bec02078cd8c0e1dc35c97420a424936
701,591
def get_pronoun(mode, number, person, conjugated_verb): """Return a string with the pronoun to be used depending of the parameters""" pronoun = "" if mode == "Gérondif": pronoun = "en " elif mode == "Subjonctif": if number == "s": if person == "1": if conjugated_verb[0] in [ 'a', 'â', 'à', 'ä', 'e', 'ê', 'é', 'è', 'ë', 'i', 'î', 'ï', 'o', 'ô', 'ö', 'u', 'ù', 'y' ]: pronoun = "que j' " else: pronoun = "que je " elif person == "2": pronoun = "que tu " else: pronoun = "qu'il " else: if person == "1": pronoun = "que nous " elif person == "2": pronoun = "que vous " else: pronoun = "qu'ils " else: if number == "s": if person == "1": if conjugated_verb[0] in [ 'a', 'â', 'à', 'ä', 'e', 'ê', 'é', 'è', 'ë', 'i', 'î', 'ï', 'o', 'ô', 'ö', 'u', 'ù', 'y' ]: pronoun = "j' " else: pronoun = "je " elif person == "2": pronoun = "tu " else: pronoun = "il " else: if person == "1": pronoun = "nous " elif person == "2": pronoun = "vous " else: pronoun = "ils " return pronoun
f905f6755e8a990ff4ff0b1be4246a14c934eb0a
701,592
import subprocess def get_current_branch(): """Return the current branch we are on or the branch that the detached head is pointed to""" current_branch = subprocess.check_output( ['git', 'rev-parse', '--abbrev-ref', "HEAD"] ).decode("utf-8").strip() if current_branch == 'HEAD': # This means we are operating in a detached head state, will need to # parse out the branch that the commit is from. # Decode "bytes" type to UTF-8 sting to avoid Python 3 error: # "TypeError: a bytes-like object is required, not 'str'"" # https://docs.python.org/3/library/stdtypes.html#bytes.decode branches = subprocess.check_output(['git', 'branch']).decode('utf-8').split('\n') for branch in branches: # Git marks the current branch, or in this case the branch # we are currently detached from with an asterisk if '*' in branch: # Split on the remote/branch separator, grab the # last entry in the list and then strip off the trailing # parentheis detached_from_branch = branch.split('/')[-1].replace(')', '') return detached_from_branch else: # The assumption is that we are on a branch at this point. Return that. return current_branch
76f192d26daed7cc7dfafdf22f2d1c8a0a7cb0af
701,593
import re def generate_layer_name(layer): """ Generates unique name for layer. Parameters ---------- layer : BaseLayer Returns ------- str """ cls = layer.__class__ layer_id = cls.global_identifiers_map[cls] cls.global_identifiers_map[cls] += 1 classname = cls.__name__ if classname.isupper(): layer_name = classname.lower() else: layer_name = re.sub(r'(?<!^)(?=[A-Z][a-z_])', '-', classname) return "{}-{}".format(layer_name.lower(), layer_id)
8cce0bf0c68601dcbed2c0852a563243cd818743
701,594
import pkg_resources def get_costs_gas_pipeline(): """Return the full path with file name and extension to the default costs per km to gas connect to pipelines.""" return pkg_resources.resource_filename('cerf', 'data/costs_gas_pipeline.yml')
1dfde6366c286665deb34bb4c9e3f58fe8b31dd6
701,595
import math import statistics def calculate_mean(arr, log): """Calculate mean and sd of arr values.""" n = len(arr) if log: logged_arr = [math.log10(value) for value in arr if value > 0] if not logged_arr: return 0, 0, n mean = math.pow(10, statistics.mean(logged_arr)) if len(logged_arr) > 1: sd = math.pow(10, statistics.stdev(logged_arr)) else: sd = 0 return mean, sd, n mean = statistics.mean(arr) if n > 1: sd = statistics.stdev(arr) else: sd = 0 return mean, sd, n
9103bffe97696f4441a2d6a28352a55ee1be30cb
701,596
def recv_meas_outcome(socket): """Receive the measurement outcome (0 or 1) of the server's last measurement. """ return int(socket.recv(maxsize=1))
f49717272722be1476cb9bcc08bcbe7b8525c2ba
701,597
import base64 def make_basic_auth_header(username, password): """ create a basic authentication header :param username: user name [unicode on py2, str on py3] :param password: password [unicode on py2, str on py3] :return: basic auth header [str on py2, str on py3] """ # note: the coding dance in the next lines is to make sure we get str type # on python 2 as well as on python 3 as str is the type we get in the auth # object when practically running with a real web server. user_pass = u'%s:%s' % (username, password) return 'Basic ' + str(base64.b64encode(user_pass.encode('utf-8')).decode('ascii'))
69900bbc73a4df8e0f2f932a30e6acdb08cb9c4d
701,598
from typing import Union def alfa_key(alfa: Union[str, int]) -> Union[str, None]: """ Return the numeric value of a possible alfanumeric key name. See "alfanumeric key names". Parameters ---------- alfa : str | int The package name from "alfa numeric names" list or the correspondent index Returns ------- alfa_code : str The correspondent alfa numeric code Raises ------ TypeError If `alfa` type isn't str or int Examples -------- >>> alfa_key('A') 29 >>> alfa_key(10) 29 """ alfa_key_list = [ ['0', 7], ['1', 8], ['2', 9], ['3', 10], ['4', 11], ['5', 12], ['6', 13], ['7', 14], ['8', 15], ['9', 16], ['A', 29], ['B', 30], ['C', 31], ['D', 32], ['E', 33], ['F', 34], ['G', 35], ['H', 36], ['I', 37], ['J', 38], ['K', 39], ['L', 40], ['M', 41], ['N', 42], ['O', 43], ['P', 44], ['Q', 45], ['R', 46], ['S', 47], ['T', 48], ['U', 49], ['V', 50], ['W', 51], ['X', 52], ['Y', 53], ['Z', 54]] if type(alfa) == int: return(alfa_key_list[int(alfa)][1]) elif type(alfa) == str: for index in range(len(alfa_key_list)): if str(alfa_key_list[index][0]) == alfa: alfa_code = alfa_key_list[index][1] return(alfa_code) else: raise TypeError()
f800ec179075f00264d3d797c3df6bcf1e6b80da
701,599
def word_to_col(w): """Splits a hexadecimal string to a bytes column. Parameters ---------- w : str Hexadecimal 32-bit word. Returns ------- list 4 bytes column containing integers representing the input string. """ x = int(w, 16) return [x >> 24, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff]
540dabd4e42eb68ce166f608bfa849b31f1bc2fe
701,600
def _create_documents_per_words(freq_matrix: dict) -> dict: """ Returns a dictionary of words and the number of documents in which they appear. :param freq_matrix: The frequency matrix to be summarized. :return: A dictionary of words and the number of documents in which they appear. """ doc_per_words = dict() for sentence, freq_table in freq_matrix.items(): for word, frequency in freq_table.items(): if word in doc_per_words: doc_per_words[word] += 1 else: doc_per_words[word] = 1 return doc_per_words
3b25081ce3452629de9fdd6afd122bd058ee9acf
701,601
def replace_file_type(file_name, new_type): """ :param file_name: :param new_type: :return: """ file_name_parts = file_name.split(".") return file_name.replace(file_name_parts[len(file_name_parts)-1], new_type)
c6fd9e01befb0e6f1a96f8884fc9d00332741948
701,602
import functools def nonced(method): """ Decorates a handler to only accept requests with nonce header. If the request is missing the request handler we set the status as 400 with a malformed message. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if self.nonce_service.provided(request=self): nonce = self.nonce_service.from_request(request=self) if self.nonce_service.consume( request=self, nonce=nonce) is not None: retval = method(self, *args, **kwargs) self.nonce_service.clear(request=self, nonce=nonce) return retval else: self.nonce_service.block_request(request=self) return wrapper
1cfdd6966fdd7986b0ee85a927a6f7f6acbcfdf7
701,603
def userToJson(user): """Returns a serializable User dict :param user: User to get info for :type user: User :returns: dict """ obj = { "id": user.id, "username": user.username, "name": user.get_full_name(), "email": user.email, } return obj
da5e11bbc2e8cbdffb25e32e6a47e0ccabe8d33a
701,604
def remove_indices_from_dict(obj): """ Removes indices from a obj dict. """ if not isinstance(obj, dict): raise ValueError(u"Expecting a dict, found: {}".format(type(obj))) result = {} for key, val in obj.items(): bracket_index = key.find('[') key = key[:bracket_index] if bracket_index > -1 else key val = remove_indices_from_dict(val) if isinstance(val, dict) else val if isinstance(val, list): _val = [] for row in val: if isinstance(row, dict): row = remove_indices_from_dict(row) _val.append(row) val = _val if key in result: result[key].extend(val) else: result[key] = val return result
60842ed62b2d79852661f83699bf3bf92b63084a
701,605
def find_block(csv, name): """For an Illumina SampleSheet.csv, return a tuple of the index of the line containing the header specified by name, and the index of the line just past the end of the data block. `range(*r)` will index all lines for the block, starting at the header line. """ start = None end = None def blockend(f): maxfieldlen = max([0] + [len(x) for x in f]) if len(f) > 0 and len(f[0]) > 0 and f[0][0] == "[" and f[0][-1] == "]": return True return 0 == maxfieldlen for i, fields in enumerate(csv): if len(fields) > 0 and fields[0] == name: start = i elif start is not None and blockend(fields): return start, i if start is not None: end = len(csv) return start, end
7a1fc119e6e3e889d9a18884028cb2e6a67e0cd5
701,606
def cancel_and_stop_intent_handler(handler_input): """Single handler for Cancel and Stop Intent.""" # type: (HandlerInput) -> Response speech_text = "Alla prossima!" return handler_input.response_builder.speak(speech_text).response
73a9171b5ea01fedb7fc470dd7d6bcd232a605a0
701,607
import json def load_jsonl(filename): """Load json lines formatted file""" with open(filename, "r") as f: return list(map(json.loads, f))
7acb3513cf885139e62af56d17c19aa87e1c85ca
701,608
from typing import List def _read_get_graph_source_citation_section(jcamp_dict: dict) -> List[str]: """ Extract and translate from the JCAMP-DX dictionary the SciData JSON-LD citations in the 'sources' section from the '@graph' scection. :param jcamp_dict: JCAMP-DX dictionary to extract citations from :return: List for citation from SciData JSON-LD """ citation = [] if "$ref author" in jcamp_dict: citation.append(f'{jcamp_dict["$ref author"]} :') if "$ref title" in jcamp_dict: citation.append(f'{jcamp_dict["$ref title"]}.') if "$ref journal" in jcamp_dict: citation.append(f'{jcamp_dict["$ref journal"]}') if "$ref volume" in jcamp_dict: citation.append(f'{jcamp_dict["$ref volume"]}') if "$ref date" in jcamp_dict: citation.append(f'({jcamp_dict["$ref date"]})') if "$ref page" in jcamp_dict: citation.append(f'{jcamp_dict["$ref page"]}') return citation
dafe4fd793dd0e47b690d6c1fd745ca89265de39
701,610
def deltaify_traces(traces, final_byte_duration=9999): """Convert absolute start times in traces to durations. Traces returned by `read_traces_csv` pair bytes with start times. This function computes how long each byte remains on the bus and replaces the start time with this value in its output. Note that the final duration can't be calculated and will be given the duration `final_byte_duration`. Args: traces: Traces to "deltaify" as described. final_byte_duration: Duration to assign to the final byte. Returns: "Deltaified" traces as described. """ deltaified_traces = [] for i in range(len(traces) - 1): dt = traces[i+1][0] - traces[i][0] deltaified_traces.append((dt, traces[i][1])) deltaified_traces.append((final_byte_duration, traces[-1][1])) return deltaified_traces
8185a9825d4706bdf8a579fcefec5e27ca8c3baa
701,612
def rzpad(value, total_length): """ Right zero pad value `x` at least to length `l`. """ return value + b"\x00" * max(0, total_length - len(value))
76a0884e9f8a65e0ff3efac56223dfa2fbed31b4
701,613
def visual_map(visual_type='color', visual_range=None, visual_text_color=None, visual_range_text=None, visual_range_color=None, visual_range_size=None, visual_orient='vertical', visual_pos="left", visual_top="bottom", is_calculable=True, **kwargs): """ visualMap is a type of component for visual encoding, which maps the data to visual channels :param visual_type: visual map type, 'color' or 'size' color: For visual channel color, array is used, like: ['#333', '#78ab23', 'blue'], which means a color ribbon is formed based on the three color stops, and dataValues will be mapped to the ribbon. size: For visual channel size, array is used, like: [20, 50], which means a size ribbon is formed based on the two value stops, and dataValues will be mapped to the ribbon. :param visual_range: pecify the min and max dataValue for the visualMap component. :param visual_text_color: visualMap text color. :param visual_range_text: The label text on both ends, such as ['High', 'Low'] :param visual_range_size: For visual channel size, array is used, like: [20, 50]. :param visual_range_color: For visual channel color, array is used, like: ['#333', '#78ab23', 'blue']. :param visual_orient: How to layout the visualMap component, 'horizontal' or 'vertical'. :param visual_pos: Distance between visualMap component and the left side of the container. visual_pos value can be instant pixel value like 20; it can also be percentage value relative to container width like '20%'; and it can also be 'left', 'center', or 'right'. :param visual_top: Distance between visualMap component and the top side of the container. visual_top value can be instant pixel value like 20; it can also be percentage value relative to container width like '20%'; and it can also be 'top', 'middle', or 'bottom'. :param is_calculable: Whether show handles, which can be dragged to adjust "selected range". :param kwargs: :return: """ # defalut min and max value of visual_range is [0, 100] _min, _max = 0, 100 if visual_range: if len(visual_range) == 2: _min, _max = visual_range # defalut label text on both ends is ['low', 'high'] _tlow, _thigh = "low", "high" if visual_range_text: if len(visual_range_text) == 2: _tlow, _thigh = visual_range_text _inrange_op = {} if visual_type == 'color': range_color = ['#50a3ba', '#eac763', '#d94e5d'] if visual_range_color: if len(visual_range_color) >= 2: range_color = visual_range_color _inrange_op.update(color=range_color) if visual_type == 'size': range_size = [20, 50] if visual_range_size: if len(visual_range_size) >= 2: range_size = visual_range_size _inrange_op.update(symbolSize=range_size) _visual_map = { "type": "continuous", "min": _min, "max": _max, "text": [_thigh, _tlow], "textStyle": {"color": visual_text_color}, "inRange": _inrange_op, "calculable": is_calculable, "orient": visual_orient, "left": visual_pos, "top": visual_top } return _visual_map
8e812b50fab992efecaabc8fd5430eb409a4e679
701,614
def compose(*funcs): """Returns a function that is the composition of multiple functions.""" def wrapper(x): for func in reversed(funcs): x = func(x) return x return wrapper
d93d59f2f1979fa35638357fcac5130710e0fda3
701,616
from functools import reduce def compose(*functions): """ Compose all the function arguments together :param functions: Functions to compose :return: Single composed function """ # pylint: disable=undefined-variable return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
d69ab8953d8e846fffd50aa9c0925935e38e9e38
701,617
def mul_fft(f_fft, g_fft): """Multiplication of two polynomials (coefficient representation).""" deg = len(f_fft) return [f_fft[i] * g_fft[i] for i in range(deg)]
67db62dc812827b6aa7c7406a068ae9e47f97a65
701,618
import torch def custom_decode_labels(mask, num_images=1, num_classes=20): """Decode batch of segmentation masks. Args: mask: result of inference after taking argmax. num_images: number of images to decode from the batch. num_classes: number of classes to predict (including background). Returns: A batch with num_images RGB images of the same size as the input. """ n, h, w = mask.shape # import ipdb; ipdb.set_trace() assert ( n >= num_images ), "Batch size %d should be greater or equal than number of images to save %d." % ( n, num_images, ) hair_mask = torch.where(mask == 2, torch.ones_like(mask), torch.zeros_like(mask)) face_mask = torch.where(mask == 13, torch.ones_like(mask), torch.zeros_like(mask)) return hair_mask, face_mask
d4b7d7a95872b3d541b8f011004ae198f0cb508d
701,619
import re def clean_text(text): """ Cleans abstract text from scopus documents. Args: text (str): Unformatted abstract text. Returns: (str) Abstract text with formatting issues removed. """ if text is None: return None try: cleaned_text = re.sub("© ([0-9])\w* The Author(s)*\.( )*", "", text) cleaned_text = re.sub("Published by Elsevier Ltd\.", "", cleaned_text) cleaned_text = re.sub("\n ", "", cleaned_text) cleaned_text = re.sub("\n ", "", cleaned_text) cleaned_text = " ".join("".join(cleaned_text.split("\n ")).split()) cleaned_text = cleaned_text.replace("Abstract ", '', 1) return cleaned_text except: return None
7ffbf3a6ebe0c0caac203cea109e939b5c861724
701,620
from datetime import datetime def undersc_str2dt(undersc): """Converts the format with underscores to a datetime instance Args: undersc(str): time in underscores-format Returns: `datetime`: datetime instance """ (mydate, mytime) = undersc.split("_") ymd = mydate.split("-") ymd = [int(i) for i in ymd] hms = mytime.split("-") hms = [int(i) for i in hms] if len(hms) == 3: return datetime(ymd[0], ymd[1], ymd[2], hms[0], hms[1], hms[2]) else: return datetime(ymd[0], ymd[1], ymd[2], hms[0], hms[1], hms[2], hms[3])
36988c6af6a20590d781f7ec1ea1ee2e8713941e
701,622
import os def convert_path_extension(path, conversion = '.png'): """Converts a path extension to a different one (as provided).""" # Get the image name and create the final filepath. filename, _ = os.path.splitext(path) save_path = filename + conversion # Append the new path to the return list. return save_path
6aae3c63885013a05b66e71dc9b90f5c623d2515
701,623
def get_value(rgb, animated=False): """ Obtains pixel value if it is enabled. Color is not supported yet here. :param rgb: :param animated: It is a WTF from SP1. If I do animation it requires me to invert the values... :return: """ if rgb[0] > 0 or rgb[1] > 0 or rgb[2] > 0: return "1" if not animated else "0" else: return "0" if not animated else "1"
da154d07ca007c183a62ea05a9b6bb5d467b6b0c
701,624
import yaml def load_yaml_config(filepath): """Load Krake base configuration settings from YAML file Args: filepath (os.PathLike, optional): Path to YAML configuration file Raises: FileNotFoundError: If no configuration file can be found Returns: dict: Krake YAML file configuration """ with open(filepath, "r") as fd: return yaml.safe_load(fd)
a5970ab968a9da7c89733077834a88b05ca2d6a0
701,625
def max_cum_build_rule(mod, g, p): """ **Constraint Name**: GenNewLin_Max_Cum_Build_Constraint **Enforced Over**: GEN_NEW_LIN_VNTS_W_MAX_CONSTRAINT Can't build more than certain amount of capacity by period p. """ return mod.GenNewLin_Capacity_MW[g, p] \ <= mod.gen_new_lin_max_cumulative_new_build_mw[g, p]
9122a05867ccccbe36378c36d34e98462c62f85d
701,626
import os def writeValidUtf8(in_filename, out_filename, skipOrReplace = 'replace'): """ Read the input file bytes and write it to the output file as UTF-8. When skipOrReplace is skip, drop the invalid bytes, else replace them with U+FFFD. """ # Read the input file into memory as bytes. if not os.path.exists(in_filename): print("File is missing: %s" % in_filename) return 0 with open(in_filename, "rb") as fh: fileData = fh.read() # Decode the bytes as utf8 to produce a utf8 string. Drop the # invalid bytes. if skipOrReplace == 'skip': option = 'ignore' else: option = 'replace' string = fileData.decode("utf-8", option) # Write the string to the output file. with open(out_filename, "w") as fh: fh.write(string)
910f197d2657d03891e6d4e76161457773504a73
701,627
def total_seconds(td): """Since ``timedelta.total_seconds()`` is new in 2.7""" return float(td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
8a25267b3c61a41dee1cbe643174fd20ebb7d835
701,628
from typing import List def available_years() -> List[int]: """List available years with datasets.""" return [2012, 2013, 2014, 2015, 2016, 2017, 2018]
898fd02e0cde1ac71251cd70069c52c481a66acf
701,629
import aiohttp async def http_call(url, method, data=None, headers=None): """ Performs an http request Args: url (str): The URL to send the request to method (str): The HTTP method to use data (dict): The data to send with the request headers (dict): The headers to send with the request """ async with aiohttp.ClientSession(headers=headers) as session: async with session.request(method, url, data=data) as resp: return await resp.json()
10d72f32312b7fe9071a8776f784a1ce7c4954d6
701,630
def coordinates_contain_point(coordinates, point): """ This function uses `Crossing number method` (http:#geomalgorithms.com/a03-_inclusion.html) to check whether a polygon contains a point or not. For each segment with the coordinates `pt1` and `pt2`, we check to see if y coordinate of `point` is between y of `pt1` and `pt2`. If so, then we check to see if `point` is to the left of the edge; i.e. a line drawn from `point` to the right will intersect the edge. If the line intersects the polygon an odd number of times, it is inside. """ contains = False j = - 1 for i, pt1 in enumerate(coordinates): pt2 = coordinates[j] check_y = lambda: (pt1[1] <= point[1] < pt2[1]) or (pt2[1] <= point[1] < pt1[1]) # The following checks if `point` is to the left of the current segment check_x = lambda: point[0] < (point[1] - pt1[1]) * (pt2[0] - pt1[0]) / (pt2[1] - pt1[1]) + pt1[0] if check_y() and check_x(): contains = not contains j = i return contains
97176955324be459595503d204e0e13ce9983562
701,631