content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def _FormatDataTransferIdentifiers(client, transfer_identifier): """Formats a transfer config or run identifier. Transfer configuration/run commands should be able to support different formats of how the user could input the project information. This function will take the user input and create a uniform transfer config or transfer run reference that can be used for various commands. This function will also set the client's project id to the specified project id. Returns: The formatted transfer config or run. """ formatted_identifier = transfer_identifier match = re.search(r'projects/([^/]+)', transfer_identifier) if not match: formatted_identifier = ('projects/' + client.GetProjectReference().projectId + '/' + transfer_identifier) else: client.project_id = match.group(1) return formatted_identifier
951a3576a1a53f9dd141e718c31c8b0314a550d7
705,575
import numpy as np def Modelo(Mags, Phi, Me, alpha): """ Modelo para ajustar Parameters ---------- Mags, ERR : list Magnitudes observadas Phi, Me, alpha : .float, .float, .float Parámetros del modelo Returns -------- F : list Valores de la función """ M = Mags # Definición para mejor vizualización F = [] # Contendrá valores de la función ij = 0 while ij<len(M): # Para que no sea tan larga la def. de "F": parto en factores a la función # F = f1*f2*f3 f1 = 0.4*np.log(10)*Phi f2 = 10**(-0.4*(M[ij]-Me)*(alpha+1)) f3 = np.exp( -10**(-0.4*(M[ij]-Me)) ) F.append( f1*f2*f3 ) ij = ij + 1 return F
0e547058032bc682c6d0c5bffa5f00aaa1318989
705,576
import pickle def read_img_pkl(path): """Real image from a pkl file. :param path: the file path :type path: str :return: the image :rtype: tuple """ with open(path, "rb") as file: return pickle.load(file)
8c7045d460e0583b02b565b818888c6b7991bc6b
705,577
def conv_compare(node1, node2): """Compares two conv_general_dialted nodes.""" assert node1["op"] == node2["op"] == "conv_general_dilated" params1, params2 = node1["eqn"].params, node2["eqn"].params for k in ("window_strides", "padding", "lhs_dilation", "rhs_dilation", "lhs_shape", "rhs_shape"): if len(params1[k]) != len(params2[k]): return False if (len(params1["dimension_numbers"].lhs_spec) != # len(params2["dimension_numbers"].lhs_spec)): return False if (len(params1["dimension_numbers"].rhs_spec) != # len(params2["dimension_numbers"].rhs_spec)): return False if (len(params1["dimension_numbers"].out_spec) != # len(params2["dimension_numbers"].out_spec)): return False if ((params1["feature_group_count"] > 1) != # (params2["feature_group_count"] > 1)): return False if ((params1["batch_group_count"] > 1) != # (params2["batch_group_count"] > 1)): return False return True
cd7bad7d298e5f3faa971a9c968b3cd3a6a27812
705,578
import requests from bs4 import BeautifulSoup def get_urls(): """ get all sci-hub-torrent url """ source_url = 'http://gen.lib.rus.ec/scimag/repository_torrent/' urls_list = [] try: req = requests.get(source_url) soups = BeautifulSoup(req.text, 'lxml').find_all('a') for soup in soups: if '.torrent' not in soup.text: continue url = source_url + soup.text print(url) urls_list.append(url) except Exception as error: print(error) finally: return urls_list
e14f15ebc7e39393bd614183e1eccb8fc1933359
705,579
import torch def logsumexp(x, dim): """ sums up log-scale values """ offset, _ = torch.max(x, dim=dim) offset_broadcasted = offset.unsqueeze(dim) safe_log_sum_exp = torch.log(torch.exp(x-offset_broadcasted).sum(dim=dim)) return safe_log_sum_exp + offset
53a12a2c91c6a0cae3fcae46a860801f05480abe
705,580
import os def ls(directory, create=False): """ List the contents of a directory, optionally creating it first. If create is falsy and the directory does not exist, then an exception is raised. """ if create and not os.path.exists(directory): os.mkdir(directory) onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] return onlyfiles
42672a4070a00ca35ede6be83d7349518bcdb255
705,581
async def get_prices(database, match_id): """Get market prices.""" query = """ select timestamp::interval(0), extract(epoch from timestamp)::integer as timestamp_secs, round((food + (food * .3)) * 100) as buy_food, round((wood + (wood * .3)) * 100) as buy_wood, round((stone + (stone * .3)) * 100) as buy_stone, round((food - (food * .3)) * 100) as sell_food, round((wood - (wood * .3)) * 100) as sell_wood, round((stone - (stone * .3)) * 100) as sell_stone from market where match_id=:match_id order by timestamp """ results = await database.fetch_all(query, values=dict(match_id=match_id)) return list(map(dict, results))
3571006c37319135a3202622b73e7e2379ca93ee
705,582
import os def ete_database_data(): """ Return path to ete3 database json """ user = os.environ.get('HOME', '/') fp = os.path.join(user, ".mtsv/ete_databases.json") if not os.path.isfile(fp): with open(fp, 'w') as outfile: outfile.write("{}") return fp
19fbb418ae91dab9c3dfc94da4a27375f507780b
705,583
import os def link(srcPath, destPath): """create a hard link from srcPath to destPath""" return os.link(srcPath, destPath)
cdcb988e953c3918e616b179f3dc5d547bb75ccf
705,584
import inspect import sys def is_implemented_in_notebook(cls): """Check if the remote class is implemented in the environments like notebook(e.g., ipython, notebook). Args: cls: class """ assert inspect.isclass(cls) if hasattr(cls, '__module__'): cls_module = sys.modules.get(cls.__module__) if getattr(cls_module, '__file__', None): return False return True
f017fea802fc4c98af1282ee51a51304d8ac01d9
705,585
def _pool_tags(hash, name): """Return a dict with "hidden" tags to add to the given cluster.""" return dict(__mrjob_pool_hash=hash, __mrjob_pool_name=name)
de9a9e7faa4d4f9dd3bfe05cb26790ff8ae66397
705,586
def get_nb_build_nodes_and_entities(city, print_out=False): """ Returns number of building nodes and building entities in city Parameters ---------- city : object City object of pycity_calc print_out : bool, optional Print out results (default: False) Returns ------- res_tuple : tuple Results tuple with number of building nodes (int) and number of building entities (nb_b_nodes, nb_buildings) Annotations ----------- building node might also be PV- or wind-farm (not only building entity) """ nb_b_nodes = 0 nb_buildings = 0 for n in city.nodes(): if 'node_type' in city.nodes[n]: if city.nodes[n]['node_type'] == 'building': if 'entity' in city.nodes[n]: if city.nodes[n]['entity']._kind == 'building': nb_buildings += 1 if (city.nodes[n]['entity']._kind == 'building' or city.nodes[n][ 'entity']._kind == 'windenergyconverter' or city.nodes[n]['entity']._kind == 'pv'): nb_b_nodes += 1 if print_out: # pragma: no cover print('Number of building nodes (Buildings, Wind- and PV-Farms):') print(nb_b_nodes) print() print('Number of buildings: ', nb_buildings) print() return (nb_b_nodes, nb_buildings)
ff3b36dcd2ca7cd0be316b573f20a6dd16bd1c1d
705,587
def construct_aircraft_data(args): """ create the set of aircraft data :param args: parser argument class :return: aircraft_name(string), aircraft_data(list) """ aircraft_name = args.aircraft_name aircraft_data = [args.passenger_number, args.overall_length, args.width, args.height, args.fuselage_width, args.fuselage_height, args.max_takeoff_weight, args.max_landing_weight, args.max_zero_fuel_weight, args.cargo_volume, args.cruise_mach, args.cruise_altitude, args.cruise_range, args.lift_by_drag, args.wing_area, args.aspect_ratio, args.rectangle_angle, args.ratio_of_thickness_and_chord, args.vertical_wing_width, args.horizontal_wing_width] return aircraft_name, aircraft_data
da77ae883d67879b9c51a511f46173eb5366aead
705,588
def Oplus_simple(ne): """ """ return ne
7476203cb99ee93dddcf9fda249f5532e908e40f
705,589
def lin_exploit(version): """ The title says it all :) """ kernel = version startno = 119 exploits_2_0 = { 'Segment Limit Privilege Escalation': {'min': '2.0.37', 'max': '2.0.38', 'cve': ' CVE-1999-1166', 'src': 'https://www.exploit-db.com/exploits/19419/'} } exploits_2_2 = { 'ptrace kmod Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'}, 'mremap Privilege Escalation': {'min': '2.2.0', 'max': '2.2.26', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'ptrace setuid Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'}, 'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'}, 'Privileged Process Hijacking Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'}, 'Sendmail Capabilities Privilege Escalation': {'min': '2.2.0', 'max': '2.2.16', 'cve': 'CVE-2000-0506', 'src': 'https://www.exploit-db.com/exploits/20001/'} } exploits_2_4 = { 'ptrace kmod Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'}, 'do_brk Privilege Escalation': {'min': '2.4.0', 'max': '2.4.23', 'cve': 'CVE-2003-0961', 'src': 'https://www.exploit-db.com/exploits/131/'}, 'do_mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.24', 'cve': ' CVE-2003-0985', 'src': 'https://www.exploit-db.com/exploits/145/'}, 'mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.25', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'uselib Privilege Escalation': {'min': '2.4.0', 'max': '2.4.29-rc2', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'}, 'bluez Privilege Escalation': {'min': '2.4.6', 'max': '2.4.30-rc2', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'}, 'System Call Emulation Privilege Escalation': {'min': '2.4.0', 'max': '2.4.37.10', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'}, 'ptrace setuid Privilege Escalation': {'min': '2.4.0', 'max': '2.4.10', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'}, 'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.4.0', 'max': '2.4.4', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'}, 'Privileged Process Hijacking Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'}, 'sock_sendpage Privilege Escalation': {'min': '2.4.4', 'max': '2.4.37.4', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'}, 'pipe.c Privilege Escalation': {'min': '2.4.1', 'max': '2.4.37', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/9844/'}, 'Ptrace Privilege Escalation': {'min': '2.4.0', 'max': '2.4.35.3', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'} } exploits_2_6 = { 'mremap Privilege Escalation': {'min': '2.6.0', 'max': '2.6.2', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'uselib Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'}, 'bluez Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'}, 'SYS_EPoll_Wait Privilege Escalation': {'min': '2.6.0', 'max': '2.6.12', 'cve': 'CVE-2005-0736', 'src': 'https://www.exploit-db.com/exploits/1397/'}, 'logrotate prctl Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2031/'}, 'proc Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2013/'}, 'System Call Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'}, 'BlueTooth Stack Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/4756/'}, 'vmsplice Privilege Escalation': {'min': '2.6.17', 'max': '2.6.24.1', 'cve': 'CVE-2008-0600', 'src': 'https://www.exploit-db.com/exploits/5092/'}, 'ftruncate()/open() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22', 'cve': 'CVE-2008-4210', 'src': 'https://www.exploit-db.com/exploits/6851/'}, 'exit_notify() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.30-rc1', 'cve': 'CVE-2009-1337', 'src': 'https://www.exploit-db.com/exploits/8369/'}, 'UDEV Privilege Escalation': {'min': '2.6.0', 'max': '2.6.40', 'cve': 'CVE-2009-1185', 'src': 'https://www.exploit-db.com/exploits/8478/'}, 'ptrace_attach() Race Condition': {'min': '2.6.0', 'max': '2.6.30-rc4', 'cve': 'CVE-2009-1527', 'src': 'https://www.exploit-db.com/exploits/8673/'}, 'Samba Share Privilege Escalation': {'min': '2.6.0', 'max': '2.6.39', 'cve': 'CVE-2004-0186', 'src': 'https://www.exploit-db.com/exploits/23674/'}, 'ReiserFS xattr Privilege Escalation': {'min': '2.6.0', 'max': '2.6.35', 'cve': 'CVE-2010-1146', 'src': 'https://www.exploit-db.com/exploits/12130/'}, 'sock_sendpage Privilege Escalation': {'min': '2.6.6', 'max': '2.6.30.5', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'}, 'pipe.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-rc6', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/33322/'}, 'Sys_Tee Privilege Escalation': {'min': '2.6.0', 'max': '2.6.17.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/29714/'}, 'Linux Kernel Privilege Escalation': {'min': '2.6.18', 'max': '2.6.18-20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/10613/'}, 'Dirty COW': {'min': '2.6.22', 'max': '4.8.3', 'cve': 'CVE-2016-5195', 'src': 'https://www.exploit-db.com/exploits/40616/'}, 'compat Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36', 'cve': 'CVE-2010-3081', 'src': 'https://www.exploit-db.com/exploits/15024/'}, 'DEC Alpha Linux - Privilege Escalation': {'min': '2.6.28', 'max': '3.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/17391/'}, 'SELinux (RHEL 5) - Privilege Escalation': {'min': '2.6.30', 'max': '2.6.31', 'cve': 'CVE-2009-1897', 'src': 'https://www.exploit-db.com/exploits/9191/'}, 'proc Handling SUID Privilege Escalation': {'min': '2.6.0', 'max': '2.6.38', 'cve': 'CVE-2011-1020', 'src': 'https://www.exploit-db.com/exploits/41770/'}, 'PERF_EVENTS Privilege Escalation': {'min': '2.6.32', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/25444/'}, 'RDS Protocol Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc8', 'cve': 'CVE-2010-3904', 'src': 'https://www.exploit-db.com/exploits/15285/'}, 'Full-Nelson.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37', 'cve': 'CVE-2010-4258', 'src': 'https://www.exploit-db.com/exploits/15704/'}, 'Mempodipper Privilege Escalation': {'min': '2.6.39', 'max': '3.2.2', 'cve': 'CVE-2012-0056', 'src': 'https://www.exploit-db.com/exploits/35161/'}, 'Ext4 move extents ioctl Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-git6', 'cve': 'CVE-2009-4131', 'src': 'https://www.exploit-db.com/exploits/33395/'}, 'Ptrace Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'}, 'udp_sendmsg Privilege Escalation': {'min': '2.6.0', 'max': '2.6.19', 'cve': 'CVE-2009-2698', 'src': 'https://www.exploit-db.com/exploits/9575/'}, 'fasync_helper() Privilege Escalation': {'min': '2.6.28', 'max': '2.6.33-rc4-git1', 'cve': 'CVE-2009-4141', 'src': 'https://www.exploit-db.com/exploits/33523/'}, 'CAP_SYS_ADMIN Privilege Escalation': {'min': '2.6.34', 'max': '2.6.40', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/15916/'}, 'CAN BCM Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc1', 'cve': 'CVE-2010-2959', 'src': 'https://www.exploit-db.com/exploits/14814/'}, 'ia32syscall Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc4-git2', 'cve': 'CVE-2010-3301', 'src': 'https://www.exploit-db.com/exploits/15023/'}, 'Half-Nelson.c Econet Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36.2', 'cve': 'CVE-2010-3848', 'src': 'https://www.exploit-db.com/exploits/17787/'}, 'ACPI custom_method Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37-rc2', 'cve': 'CVE-2010-4347', 'src': 'https://www.exploit-db.com/exploits/15774/'}, 'SGID Privilege Escalation': {'min': '2.6.32.62', 'max': '3.14.8', 'cve': 'CVE-2014-4014', 'src': 'https://www.exploit-db.com/exploits/33824/'}, 'libfutex Privilege Escalation': {'min': '2.6.4', 'max': '3.14.6', 'cve': 'CVE-2014-3153', 'src': 'https://www.exploit-db.com/exploits/35370/'}, 'perf_swevent_init Privilege Escalation': {'min': '2.6.37', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/26131/'}, 'MSR Driver Privilege Escalation': {'min': '2.6', 'max': '3.7.6', 'cve': 'CVE-2013-0268', 'src': 'https://www.exploit-db.com/exploits/27297/'} } exploits_3 = { 'overlayfs Privilege Escalation': {'min': '3.0.0', 'max': '3.19.0', 'cve': 'CVE-2015-1328', 'src': 'https://www.exploit-db.com/exploits/37292/'}, 'CLONE_NEWUSER|CLONE_FS Privilege Escalation': {'min': '3.0', 'max': '3.3.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/38390/'}, 'SO_SNDBUFFORCE & SO_RCVBUFFORCE Local Privilege Escalation': {'min': '3.5', 'max': '4.8.14', 'cve': 'CVE-2016-9793', 'src': 'https://www.exploit-db.com/exploits/41995/'}, 'Raw Mode PTY Echo Race Condition Privilege Escalation': {'min': '3.14-rc1', 'max': '3.16', 'cve': 'CVE-2014-0196', 'src': 'https://www.exploit-db.com/exploits/33516/'}, 'sock_diag_handlers() Privilege Escalation': {'min': '3.3.0', 'max': '3.7.10', 'cve': 'CVE-2013-1763', 'src': 'https://www.exploit-db.com/exploits/24555/'}, 'b43 Wireless Driver Privilege Escalation': {'min': '3.0', 'max': '3.9.4', 'cve': 'CVE-2013-2852', 'src': 'https://www.exploit-db.com/exploits/38559/'}, 'CONFIG_X86_X32=y Privilege Escalation': {'min': '3.4', 'max': '3.13.2', 'cve': 'CVE-2014-0038', 'src': 'https://www.exploit-db.com/exploits/31347/'}, 'Double-free usb-midi SMEP Local Privilege Escalation': {'min': '3.0', 'max': '4.5', 'cve': 'CVE-2016-2384', 'src': 'https://www.exploit-db.com/exploits/41999/'}, 'Remount FUSE Privilege Escalation': {'min': '3.2', 'max': '3.16.1', 'cve': 'CVE-2014-5207', 'src': 'https://www.exploit-db.com/exploits/34923/'}, 'ptrace/sysret Privilege Escalation': {'min': '3.0', 'max': '3.15.4', 'cve': 'CVE-2014-4699', 'src': 'https://www.exploit-db.com/exploits/34134/'}, 'open-time Capability file_ns_capable() Privilege Escalation': {'min': '3.0', 'max': '3.8.9', 'cve': 'CVE-2013-1959', 'src': 'https://www.exploit-db.com/exploits/25450/'}, 'REFCOUNT Overflow/Use-After-Free in Keyrings Privilege Escalation': {'min': '3.8.0', 'max': '4.4.1', 'cve': 'CVE-2016-0728', 'src': 'https://www.exploit-db.com/exploits/39277/'} } exploits_4 = { 'overlayfs Privilege Escalation': {'min': '4.0', 'max': '4.3.3', 'cve': 'CVE-2015-8660', 'src': 'https://www.exploit-db.com/exploits/39166/'}, 'BPF Privilege Escalation': {'min': '4.4.0', 'max': '4.5.5', 'cve': 'CVE-2016-4557', 'src': 'https://www.exploit-db.com/exploits/39772/'}, 'AF_PACKET Race Condition Privilege Escalation': {'min': '4.2.0', 'max': '4.9.0-2', 'cve': 'CVE-2016-8655', 'src': 'https://www.exploit-db.com/exploits/40871/'}, 'DCCP Double-Free Privilege Escalation': {'min': '4.4.0', 'max': '4.9.11', 'cve': 'CVE-2017-6074', 'src': 'https://www.exploit-db.com/exploits/41458/'}, 'Netfilter target_offset Out-of-Bounds Privilege Escalation': {'min': '4.4.0-21-generic', 'max': '4.4.0-31-generic', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/40049/'}, 'IP6T_SO_SET_REPLACE Privilege Escalation': {'min': '4.6.2', 'max': '4.6.3', 'cve': 'CVE-2016-4997', 'src': 'https://www.exploit-db.com/exploits/40489/'}, 'Packet Socket Local Privilege Escalation': {'min': '4.8.0', 'max': '4.10.6', 'cve': 'CVE-2017-7308', 'src': 'https://www.exploit-db.com/exploits/41994/'}, 'UDEV < 232 - Privilege Escalation': {'min': '4.8.0', 'max': '4.9.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/41886/'} } if kernel.startswith('2.2'): for name, exploit in exploits_2_2.items(): # iterate over exploits dict if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.4'): for name, exploit in exploits_2_4.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.6'): for name, exploit in exploits_2_6.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.0'): for name, exploit in exploits_2_0.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('3'): for name, exploit in exploits_3.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('4'): for name, exploit in exploits_4.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue else: return 'No exploits found for this kernel version'
499e21091fb508b26564d06ad119d8b8ea783443
705,590
def cartesian2complex(real, imag): """ Calculate the complex number from the cartesian form: z = z' + i * z". Args: real (float|np.ndarray): The real part z' of the complex number. imag (float|np.ndarray): The imaginary part z" of the complex number. Returns: z (complex|np.ndarray): The complex number: z = z' + i * z". """ return real + 1j * imag
1fd44bc0accff8c9f26edfa84f4fcfafb2323728
705,591
def upper_case(string): """ Returns its argument in upper case. :param string: str :return: str """ return string.upper()
bbf3fc8b856d466ec73229145443566d85a3457a
705,592
import os def createNewLetterSession(letter): """ # Take letter and create next session folder (session id is current max_id + 1) # Return path to session directory """ # Search for last training folder path = "gestures_database/"+letter+"/" last = -1 for r,d,f in os.walk(path): for folder in d: last = max(last, (int(folder))) # Create next data folder for current session path += str(last + 1).zfill(3) os.mkdir(path) path += "/" return path
fc6da33f4a815db30c09bf6477b9707323a6da05
705,594
def total_cost(content_cost, style_cost, alpha, beta): """Return a tensor representing the total cost.""" return alpha * content_cost + beta * style_cost
98d42bd8d62dc8cd7110b2f5eb9a9a4e4eb6bc65
705,595
import codecs def get_line(file_path, line_rule): """ 搜索指定文件的指定行到指定行的内容 :param file_path: 指定文件 :param line_rule: 指定行规则 :return: """ s_line = int(line_rule.split(',')[0]) e_line = int(line_rule.split(',')[1][:-1]) result = [] # with open(file_path) as file: file = codecs.open(file_path, "r", encoding='utf-8', errors='ignore') line_number = 0 for line in file: line_number += 1 if s_line <= line_number <= e_line: result.append(line) return result
a6ccda48f8083e5ff6827306f4abd7f19e8d445c
705,596
def _is_unique_rec_name(info_name): """ helper method to see if we should use the uniqueness recommendation on the fact comparison """ UNIQUE_INFO_SUFFIXES = [".ipv4_addresses", ".ipv6_addresses", ".mac_address"] UNIQUE_INFO_PREFIXES = ["fqdn"] if info_name.startswith("network_interfaces.lo."): return False for prefix in UNIQUE_INFO_PREFIXES: if info_name.startswith(prefix): return True for suffix in UNIQUE_INFO_SUFFIXES: if info_name.endswith(suffix): return True return False
cba744e1e5b6a9612363d2ca12d4751e1894c8ad
705,597
def _json_view_params(shape, affine, vmin, vmax, cut_slices, black_bg=False, opacity=1, draw_cross=True, annotate=True, title=None, colorbar=True, value=True): """ Create a dictionary with all the brainsprite parameters. Returns: params """ # Set color parameters if black_bg: cfont = '#FFFFFF' cbg = '#000000' else: cfont = '#000000' cbg = '#FFFFFF' # Deal with limitations of json dump regarding types if type(vmin).__module__ == 'numpy': vmin = vmin.tolist() # json does not deal with numpy array if type(vmax).__module__ == 'numpy': vmax = vmax.tolist() # json does not deal with numpy array params = {'canvas': '3Dviewer', 'sprite': 'spriteImg', 'nbSlice': {'X': shape[0], 'Y': shape[1], 'Z': shape[2]}, 'overlay': {'sprite': 'overlayImg', 'nbSlice': {'X': shape[0], 'Y': shape[1], 'Z': shape[2]}, 'opacity': opacity}, 'colorBackground': cbg, 'colorFont': cfont, 'crosshair': draw_cross, 'affine': affine.tolist(), 'flagCoordinates': annotate, 'title': title, 'flagValue': value, 'numSlice': {'X': cut_slices[0] - 1, 'Y': cut_slices[1] - 1, 'Z': cut_slices[2] - 1}} if colorbar: params['colorMap'] = {'img': 'colorMap', 'min': vmin, 'max': vmax} return params
50ea71a5a99facf4c472f0c18984d84e23b8e301
705,598
def add_metadata(infile, outfile, sample_metadata): """Add sample-level metadata to a biom file. Sample-level metadata should be in a format akin to http://qiime.org/tutorials/tutorial.html#mapping-file-tab-delimited-txt :param infile: String; name of the biom file to which metadata shall be added :param outfile: String; name of the resulting metadata-enriched biom file :param sample_metadata: String; name of the sample-level metadata tab-delimited text file. Sample attributes are taken from this file. Note: the sample names in the `sample_metadata` file must match the sample names in the biom file. External dependencies - biom-format: http://biom-format.org/ """ return { "name": "biom_add_metadata: " + infile, "actions": [("biom add-metadata" " -i "+infile+ " -o "+outfile+ " -m "+sample_metadata)], "file_dep": [infile], "targets": [outfile] }
e779f876159741de60e99002a90906b151dc7530
705,599
def get_qc_data(sample_prj, p_con, s_con, fc_id=None): """Get qc data for a project, possibly subset by flowcell. :param sample_prj: project identifier :param p_con: object of type <ProjectSummaryConnection> :param s_con: object of type <SampleRunMetricsConnection> :returns: dictionary of qc results """ project = p_con.get_entry(sample_prj) application = project.get("application", None) if project else None samples = s_con.get_samples(fc_id=fc_id, sample_prj=sample_prj) qcdata = {} for s in samples: qcdata[s["name"]]={"sample":s.get("barcode_name", None), "project":s.get("sample_prj", None), "lane":s.get("lane", None), "flowcell":s.get("flowcell", None), "date":s.get("date", None), "application":application, "TOTAL_READS":int(s.get("picard_metrics", {}).get("AL_PAIR", {}).get("TOTAL_READS", -1)), "PERCENT_DUPLICATION":s.get("picard_metrics", {}).get("DUP_metrics", {}).get("PERCENT_DUPLICATION", "-1.0"), "MEAN_INSERT_SIZE":float(s.get("picard_metrics", {}).get("INS_metrics", {}).get("MEAN_INSERT_SIZE", "-1.0").replace(",", ".")), "GENOME_SIZE":int(s.get("picard_metrics", {}).get("HS_metrics", {}).get("GENOME_SIZE", -1)), "FOLD_ENRICHMENT":float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("FOLD_ENRICHMENT", "-1.0").replace(",", ".")), "PCT_USABLE_BASES_ON_TARGET":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_USABLE_BASES_ON_TARGET", "-1.0"), "PCT_TARGET_BASES_10X":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_TARGET_BASES_10X", "-1.0"), "PCT_PF_READS_ALIGNED":s.get("picard_metrics", {}).get("AL_PAIR", {}).get("PCT_PF_READS_ALIGNED", "-1.0"), } target_territory = float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("TARGET_TERRITORY", -1)) pct_labels = ["PERCENT_DUPLICATION", "PCT_USABLE_BASES_ON_TARGET", "PCT_TARGET_BASES_10X", "PCT_PF_READS_ALIGNED"] for l in pct_labels: if qcdata[s["name"]][l]: qcdata[s["name"]][l] = float(qcdata[s["name"]][l].replace(",", ".")) * 100 if qcdata[s["name"]]["FOLD_ENRICHMENT"] and qcdata[s["name"]]["GENOME_SIZE"] and target_territory: qcdata[s["name"]]["PERCENT_ON_TARGET"] = float(qcdata[s["name"]]["FOLD_ENRICHMENT"]/ (float(qcdata[s["name"]]["GENOME_SIZE"]) / float(target_territory))) * 100 return qcdata
f267148f48f86151852e12fa3be8d5f8aefc6b11
705,600
def sql_sanitize(sql_name): """ Return a SQL name (table or column) cleaned of problematic characters. ex. punctuation )(][; whitespace Don't use with values, which can be properly escaped with parameterization. Ideally retaining only alphanumeric char. Credits: Donald Miner, Source: StackOverflow, DateAccessed: 2020-02-20 """ sanitize_name = "".join(char for char in sql_name if char.isalnum()) return sanitize_name
9ce9e0e8bed2348079fb23f2d27c53880fa1c795
705,601
def render_horizontal_fields(*fields_to_render, **kwargs): """Render given fields with optional labels""" labels = kwargs.get('labels', True) media = kwargs.get('media') hidden_fields = [] visible_fields = [] for bound_field in fields_to_render: if bound_field.field.widget.is_hidden: hidden_fields.append(bound_field) else: visible_fields.append(bound_field) return { 'fields_to_render': fields_to_render, 'hidden_fields': hidden_fields, 'visible_fields': visible_fields, 'labels': labels, 'media': media, }
22ac9c05b602c0f65ab2fc348ab9399855780bc3
705,602
def midpoint(rooms): """ Helper function to help find the midpoint between the two rooms. Args: rooms: list of rooms Returns: int: Midpoint """ return rooms[0] + (rooms[0] + rooms[2]) // 2, rooms[1] + (rooms[1] + rooms[3]) // 2
60b3ba53fb15154ff97ab9c6fa3cf1b726bc2df1
705,603
import random def generate_concept_chain(concept_desc, sequential): """ Given a list of availiable concepts, generate a dict with (start, id) pairs giving the start of each concept. Parameters ---------- sequential: bool If true, concept transitions are determined by ID without randomness. """ concept_chain = [] num_samples = 0 more_appearences = True appearence = 0 while more_appearences: concepts_still_to_appear = [] for cID in concept_desc: concept = concept_desc[cID] if concept.appearences > appearence: concepts_still_to_appear.append(concept) more_appearences = len(concepts_still_to_appear) > 0 for concept in concepts_still_to_appear: concept_chain.append(concept.id) num_samples += concept.examples_per_appearence appearence += 1 if not sequential: random.shuffle(concept_chain) return concept_chain, num_samples
fcfeb345d92d627684d04da4c1d445120554bf15
705,604
def doFilter(pTable, proxyService): """ filter candidates by column header candidates - column headers are kept, if they support at least (minSupport * #rows) many cells - only filter for columns that are part of the targets (if activated) subsequently remove: - CTA candidates with less support - CEA candidates that do not support any of the remaining CTA candidates of their column """ # keep track, if this changed anything changed = False # table cols cols = pTable.getCols(unsolved=False) # process each column separately for col in cols: if not col['sel_cand']: continue # check, if we have to process this column at all if not pTable.isTarget(col_id=col['col_id']): continue # grab all cells in this column cells = pTable.getCells(col_id=col['col_id']) beforeCount = len(cells) # get the hierarchy over our candidates hierarchy = proxyService.get_hierarchy_for_lst.send([col['sel_cand']['uri']]) typesSupported = [col['sel_cand']['uri']] for parentList in hierarchy.values(): typesSupported.extend([item['parent'] for item in parentList]) typesSupported = list(set(typesSupported)) # purge the candidate lists # for cell in cells: # candSupport = {} # for cand in cell['cand']: # candSupport[cand['uri']] = 0 # try: # foundTypes = [t for t in cand['types'] if t in typesSupported] # candSupport[cand['uri']] += len(foundTypes) # except KeyError as e: # candSupport[cand['uri']] += 0 # # keep cands with highest support only # maxFreq = max([candSupport[uri] for uri in candSupport.keys()]) # for cand in cell['cand']: # if candSupport[cand['uri']] < maxFreq: # cell['cand'].remove(cand) # purged = [] # # remove all CEA candidates from the cells that are not associated with any remaining type for cell in cells: # add_purged = [] # check if the sel_cand is semantically correct for cand in cell['cand']: try: foundTypes = [t for t in cand['types'] if t in typesSupported] if not foundTypes: # add to purged cells # add_purged.append(cand) cell['cand'].remove(cand) except KeyError as e: # print(e) # add_purged.append(cand) cell['cand'].remove(cand) # if add_purged: # # update the cell # cell['purged_cand'].extend(add_purged) # collect purged candidates # purged.extend(add_purged) # purge the cell-pair list # pTable.purgeCellPairs(purged) # done return changed
8b28f945e94e37302b2086e23f695c40c08b8d7c
705,605
def int_or_float(x): """Convert `x` to either `int` or `float`, preferring `int`. Raises: ValueError : If `x` is not convertible to either `int` or `float` """ try: return int(x) except ValueError: return float(x)
d0a4def320f88655e494f89b7239e47e1ee70d0d
705,606
def nohighlight(nick): """add a ZWNJ to nick to prevent highlight""" return nick[0] + "\u200c" + nick[1:]
1b8d0cafc5df4a442daafdece59af1675ab1de33
705,607
import inspect def obj_src(py_obj, escape_docstring=True): """Get the source for the python object that gets passed in Parameters ---------- py_obj : callable Any python object escape_doc_string : bool If true, prepend the escape character to the docstring triple quotes Returns ------- list Source code lines Raises ------ IOError Raised if the source code cannot be retrieved """ src = inspect.getsource(py_obj) if escape_docstring: src.replace("'''", "\\'''") src.replace('"""', '\\"""') return src # return src.split('\n')
8ce0c7cc7672de5005b5a1c60e6b6cf5fa9ee050
705,608
import random def mutate_word(word): """Introduce a random change into the word: delete, swap, repeat, and add stray character. This may raise a ValueError. """ word = list(word) choice = random.randrange(4) if choice == 0: # Delete a character word.pop(random.randrange(len(word))) elif choice == 1: # Swap two characters index = random.randrange(0, len(word) - 1) word[index], word[index + 1] = word[index + 1], word[index] elif choice == 2: # Repeat a character index = random.randrange(0, len(word)) word.insert(index, word[index]) elif choice == 3: # Insert a stray character char = chr(random.randint(ord('a'), ord('z'))) word.insert(random.randint(0, len(word)), char) return ''.join(word)
f3b45f36893a7541131710ada5f1343387f06797
705,610
def pcc_vector(v1, v2): """Pearson Correlation Coefficient for 2 vectors """ len1 = len(v1) len2 = len(v2) if len1 != len2: return None else: length = len1 avg1 = 1.0 * sum(v1) / len(v1) avg2 = 1.0 * sum(v2) / len(v2) dxy = [(v1[i] - avg1) * (v2[i] - avg2) for i in range(length)] dx2 = [(v1[i] - avg1) ** 2 for i in range(length)] dy2 = [(v2[i] - avg2) ** 2 for i in range(length)] return sum(dxy) / (sum(dx2) * sum(dy2)) ** 0.5
98e5f3cc304a5d844be479d65ab7eeb760a34ba3
705,611
def decode(argument: str) -> tuple[list[int], ...]: """Decode argument string from command line :param argument: argument string :return: pair of list of digits """ char_lists = map(list, argument.split('-')) range_ = tuple(list(map(int, clist)) for clist in char_lists) return range_
d3805396cab52fc09896ca9553f1ac3450f27e99
705,612
import os def join_legacy_read_path(sample_path: str, suffix: int) -> str: """ Create a path string for a sample read file using the old file name convention (eg. reads_1.fastq). :param sample_path: the path to the sample directory :param suffix: the read file suffix :return: the read path """ return os.path.join(sample_path, f"reads_{suffix}.fastq")
b6e12de4edfec05fb8a5fa2363dce284dcfdd5f0
705,613
def wrapper_configuration_get(): # noqa: E501 """gets configuration details on the current wrapper configuration # noqa: E501 :rtype: object """ return 'do some magic!'
85ac6abbf09f93a08295584d7051aad2e8cad8d6
705,614
def d_enter_waste_cooler(W_mass, rho_waste, w_drift): """ Calculates the tube's diameter of enter waste to waste cooler. Parameters ---------- W_mass : float The mass flow rate of waste, [kg/s] rho_waste : float The density of liquid at boilling temperature, [kg/m**3] w_drift :float The speed of steam at the tube, [m/s] Returns ------- d_enter_waste_cooler : float The tube's diameter of enter waste to waste cooler, [m] References ---------- &&& """ return W_mass/(0,785*rho_waste*w_drift)
651c1adc0b90a286c2c8685c389268bc8834ad73
705,615
import re import os import time def approx_version_number(): """ In the event that git is unavailable and the VERSION file is not present this returns a "version number" in the following precedence: - version number from path downloads of viral-ngs from GitHub tagged releases are likely to be extracted into directories containing the version number. If they contain a version number in the form d.d.d, we can use it - modification time of this file (unix timestamp) file modification time for github releases corresponds to when the release archives were created, a rough way to ballpark the release date. If we can't get the version number from the path we can at least use the modification time of this file as a proxy for the true version number - the current time (unix timestamp) the current time is better than not having any version number """ version = "" version_re = re.compile(r"(?:(\d+)\.)?(?:(\d+)\.)?(?:(\d+))") # path relative to version.py viral_ngs_path = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # for tagged releases, it is likely the version number is part of # the viral-ngs root directory name matches = version_re.search(viral_ngs_path) if matches and len([n for n in matches.groups() if n]) == 3: version = ".".join( map(str,matches.groups()) ) else: try: mtime = os.path.getmtime(__file__) except OSError: mtime = 0 if mtime > 0: # if we could get the modification time of the current file, use it version = str(int(mtime)) else: # just use the current time version = str(int(time.time())) return version
16adf3e5c274cf86bc9a7b3b53889e5340021798
705,616
def _x_orientation_rep_dict(x_orientation): """"Helper function to create replacement dict based on x_orientation""" if x_orientation.lower() == 'east' or x_orientation.lower() == 'e': return {'x': 'e', 'y': 'n'} elif x_orientation.lower() == 'north' or x_orientation.lower() == 'n': return {'x': 'n', 'y': 'e'} else: raise ValueError('x_orientation not recognized.')
83434a8aef7003146a19c470b831e8e9cfa85f19
705,617
def _get_option_of_highest_precedence(config, option_name): """looks in the config and returns the option of the highest precedence This assumes that there are options and flags that are equivalent Args: config (_pytest.config.Config): The pytest config object option_name (str): The name of the option Returns: str: The value of the option that is of highest precedence None: no value is present """ # Try to get configs from CLI and ini try: cli_option = config.getoption("--{}".format(option_name)) except ValueError: cli_option = None try: ini_option = config.getini(option_name) except ValueError: ini_option = None highest_precedence = cli_option or ini_option return highest_precedence
4f3bca4ff5b0a1eb04fbdc7a5d22bc09dbc95df6
705,618
import math def isPrime(n): """ check is Prime,for positive integer. 使用试除法 """ if n <= 1: return False if n == 2: return True i = 2 thres = math.ceil(math.sqrt(n)) while i <= thres: if n % i == 0: return False i += 1 return True
458775fbd324dc976c91a035898b3122e6bc1109
705,620
def action_prop(param, val=1): """A param that performs an action""" def fdo(self): self.setter(param, val) return fdo
6a4f6e7e178e62755113d6b93a59534675dfa2dd
705,621
def find_or_create(find, create): """Given a find and a create function, create a resource if it doesn't exist""" result = find() return result if result else create()
ffe608bf2da1b83d662b93266f4309976424300f
705,622
import math def Gsigma(sigma): """Pickle a gaussian function G(x) for given sigma""" def G(x): return (math.e ** (-(x**2)/(2*sigma**2)))/(2 * math.pi* sigma**2)**0.5 return G
77eac3ca8b6ced0063074527b83c50e8681f980d
705,623
def transform(data, transformer): """This hook defines how DataRobot will use the trained object from fit() to transform new data. DataRobot runs this hook when the task is used for scoring inside a blueprint. As an output, this hook is expected to return the transformed data. The input parameters are passed by DataRobot based on dataset and blueprint configuration. Parameters ------- data: pd.DataFrame Data that DataRobot passes for transformation. transformer: Any Trained object, extracted by DataRobot from the artifact created inside fit(). In this example, it's a function Returns ------- pd.DataFrame Returns a dataframe with transformed data. """ return data.apply(transformer)
b52577c0b2a3f3edb1297dcf9c567f9845f04bd5
705,624
def convert_decimal_to_binary(number): """ Parameters ---------- number: int Returns ------- out: str >>> convert_decimal_to_binary(10) '1010' """ return bin(number)[2:]
01a9be2e70c87091adc1d85759075668da9270f2
705,626
def choisir_action(): """Choisir action de cryptage ou de décryptage Entree : - Sortie: True pour cryptage, False pour décryptage""" action_est_crypter = True action = input("Quelle est l'action, crypter ou décrypter ? \n<Entrée> pour crypter, autre touche pour decrypter, ou <Crtl> + Z ou X pour arréter.\n") if action : action_est_crypter = False return action_est_crypter
c0bceb748afb1fc32b865136c4a477f06a6412b2
705,627
import ast import random def t_rename_local_variables(the_ast, all_sites=False): """ Local variables get replaced by holes. """ changed = False candidates = [] for node in ast.walk(the_ast): if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store): if node.id not in [ c.id for c in candidates ]: # print(node.id, node.lineno) candidates.append(node) if len(candidates) == 0: return False, the_ast if not all_sites: selected = [random.choice(candidates)] else: selected = candidates local_var_defs = {} for cnt, s in enumerate(selected, start=1): local_var_defs[s.id] = cnt to_rename = [] for node in ast.walk(the_ast): if isinstance(node, ast.Name) and node.id in local_var_defs: to_rename.append((node, local_var_defs[node.id])) for node, idx in to_rename: changed = True node.id = 'VAR' + str(idx) return changed, the_ast
8faeea81faac55d5d45b897776cd87cb508404a5
705,628
from typing import List def get_scale(notes: List[str]) -> int: """Convert a list of notes to a scale constant. # Args - *notes*: list of notes in the scale. This should be a list of string where each string is a note ABC notation. Sharps should be represented with a pound sign preceding the note e.g. '#A' and flats should be represented with a lower case b preceding the note e.g. 'bB'. # Returns An integer mask used to represent a musical key or scale as an argument to any of the MusicalHash methods. # Raises A ValueError if an invalid string is included in the input list. """ note_map = {'A': 0x1, '#A': 0x2, 'bB': 0x2, 'B': 0x4, 'C': 0x8, '#C': 0x10, 'bD': 0x10, 'D': 0x20, '#D': 0x40, 'bE': 0x40, 'E': 0x80, 'F': 0x100, '#F': 0x200, 'bG': 0x200, 'G': 0x400, '#G': 0x800, 'bA': 0x800} scale = 0x0 for note in notes: try: scale |= note_map[note] except KeyError: raise ValueError( 'The string {} is not a valid musical note'.format(note)) return scale
91cbcc7bfa05df52adf741b85f78beeabf819966
705,629
import math def slurm_format_bytes_ceil(n): """ Format bytes as text. SLURM expects KiB, MiB or Gib, but names it KB, MB, GB. SLURM does not handle Bytes, only starts at KB. >>> slurm_format_bytes_ceil(1) '1K' >>> slurm_format_bytes_ceil(1234) '2K' >>> slurm_format_bytes_ceil(12345678) '13M' >>> slurm_format_bytes_ceil(1234567890) '2G' >>> slurm_format_bytes_ceil(15000000000) '14G' """ if n >= (1024 ** 3): return "%dG" % math.ceil(n / (1024 ** 3)) if n >= (1024 ** 2): return "%dM" % math.ceil(n / (1024 ** 2)) if n >= 1024: return "%dK" % math.ceil(n / 1024) return "1K" % n
ce48c778b9605105ed9b66a55d27796fb90499cc
705,630
def make_word_dict(): """read 'words.txt ' and create word list from it """ word_dict = dict() fin = open('words.txt') for line in fin: word = line.strip() word_dict[word] = '' return word_dict
a4213cf5ff246200c7a55a6d1525d6fd6067e31f
705,631
def toCamelCase(string: str): """ Converts a string to camel case Parameters ---------- string: str The string to convert """ string = str(string) if string.isupper(): return string split = string.split("_") # split by underscore final_split = [] for s in split: final_split.extend(s.split(" ")) # split by space return "".join(l.capitalize() if index > 0 else l for index, l in enumerate(final_split))
5197ad3353f2e88ccf1dfca62aeae59260e016e7
705,632
def rowwidth(view, row): """Returns the number of characters of ``row`` in ``view``. """ return view.rowcol(view.line(view.text_point(row, 0)).end())[1]
f8db1bf6e3d512d1a2bd5eeb059af93e8ac3bc5f
705,633
def retournerTas(x,numéro): """ retournerTas(x,numéro) retourne la partie du tas x qui commence à l'indice numéro """ tasDuBas = x[:numéro] tasDuHaut = x[numéro:] tasDuHaut.reverse() result = tasDuBas + tasDuHaut # print(result) return result
579798cf5fe8bec02109bfd46c5a945faee1a42c
705,634
def sec2msec(sec): """Convert `sec` to milliseconds.""" return int(sec * 1000)
f1b3c0bf60ab56615ed93f295e7716e56c6a1117
705,635
def object_get_HostChilds(obj): """Return List of Objects that have set Host(s) to this object.""" # source: # FreeCAD/src/Mod/Arch/ArchComponent.py # https://github.com/FreeCAD/FreeCAD/blob/master/src/Mod/Arch/ArchComponent.py#L1109 # def getHosts(self,obj) hosts = [] for link in obj.InListRecursive: if hasattr(link, "Host"): if link.Host: if link.Host == obj: hosts.append(link) elif hasattr(link, "Hosts"): if link.Hosts: if obj in link.Hosts: hosts.append(link) return hosts
dccba2ef151207ebaa42728ee1395e1b0ec48e7d
705,636
import torch def collate_fn(batch): """ Collate function for combining Hdf5Dataset returns :param batch: list List of items in a batch :return: tuple Tuple of items to return """ # batch is a list of items numEntries = []; allTensors = []; allLabels = []; for item in batch: assert(len(item) % 2 == 0), "Both labels and tensors are expected"; numEntries.append(len(item) // 2); allTensors.extend(item[: len(item) // 2]); allLabels.extend(item[len(item) // 2:]); # Determine how much to pad each tensor to and pad it; always pad on the right side maxLength = max([t.shape[-1] for t in allTensors]); newAllTensors = []; paddings = []; for t in allTensors: numTotalPad = maxLength - t.shape[-1]; if numTotalPad > 0: pad = (0, numTotalPad); t = torch.nn.functional.pad(t, pad); paddings.append(numTotalPad) else: paddings.append(0); newAllTensors.append(t); allTensors = torch.stack(newAllTensors, dim=0); allLabels = torch.Tensor(allLabels); numEntries = torch.LongTensor(numEntries); allPaddings = torch.LongTensor(paddings); return allTensors, allLabels, allPaddings, numEntries;
b49ec88b4de844787d24140f5ef99ad9a573c6e3
705,637
import torch def sparsity_line(M,tol=1.0e-3,device='cpu'): """Get the line sparsity(%) of M Attributes: M: Tensor - the matrix. tol: Scalar,optional - the threshold to select zeros. device: device, cpu or gpu Returns: spacity: Scalar (%)- the spacity of the matrix. """ if type(M) is not torch.Tensor: M = torch.as_tensor(M,device=device) M1 = torch.where(torch.abs(M)<tol,torch.zeros_like(M),M) M1_sum = torch.sum(M1, 1) nb_nonzero = len(M1_sum.nonzero()) return (1.0-nb_nonzero/M1.shape[0])*100
b8675a768c8686571d1f7709d89e3abeb5b56a80
705,639
import math def tgamma ( x ) : """'tgamma' function taking into account the uncertainties """ fun = getattr ( x , '__tgamma__' , None ) if fun : return fun() return math.gamma ( x )
35c73e2e0a9945cb38beffb6376dd7b7bc6443e9
705,640
import os def get_template_filepath(filename, basepath="templates"): """ Get the full path to the config templates, using a relative path to where the shippy script is stored :param filename: (str) Name of the template file to look for :param basepath: (str) Base directory to search for templates. Default: /templates :return: (str) Path to template if found :raises: (SystemExit) If template file doesn't exist """ local_path = os.path.dirname(__file__) path = os.path.dirname(os.path.abspath(os.path.join(local_path, basepath, filename))) if os.path.isdir(path): return path else: raise SystemExit(f"Could not find template files in: {path}, bailing...")
f1972c3366590449d9d747b1d03153e6fb0f1f2b
705,641
import random def findKthSmallest(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ def partition(left, right, pivot_index): pivot = nums[pivot_index] # 1. move pivot to end nums[pivot_index], nums[right] = nums[right], nums[pivot_index] # 2. move all smaller elements to the left store_index = left for i in range(left, right): if nums[i] < pivot: nums[store_index], nums[i] = nums[i], nums[store_index] store_index += 1 # 3. move pivot to its final place nums[right], nums[store_index] = nums[store_index], nums[right] return store_index def select(left, right, k_smallest): """ Returns the k-th smallest element of list within left..right """ if left == right: # If the list contains only one element, return nums[left] # return that element # select a random pivot_index between pivot_index = random.randint(left, right) # find the pivot position in a sorted list pivot_index = partition(left, right, pivot_index) # the pivot is in its final sorted position if k_smallest == pivot_index: return nums[k_smallest] # go left elif k_smallest < pivot_index: return select(left, pivot_index - 1, k_smallest) # go right else: return select(pivot_index + 1, right, k_smallest) return select(0, len(nums) - 1, k)
d82176bd9539cf36416c5dc3c7da53a99f2a8f62
705,642
def lang_not_found(s): """Is called when the language files aren't found""" return s + "⚙"
064d73e10d6e2aa9436557b38941ed2eb020d7bb
705,643
def same_variable(a, b): """ Cette fonction dit si les deux objets sont en fait le même objet (True) ou non (False) s'ils sont différents (même s'ils contiennent la même information). @param a n'importe quel objet @param b n'importe quel objet @return ``True`` ou ``False`` .. faqref:: :tag: python :title: Qu'est-ce qu'un type immuable ou immutable ? :lid: faq-py-immutable Une variable de type *immuable* ne peut être modifiée. Cela concerne principalement : - ``int``, ``float``, ``str``, ``tuple`` Si une variable est de type *immuable*, lorsqu'on effectue une opération, on créé implicitement une copie de l'objet. Les dictionnaires et les listes sont *modifiables* (ou *mutable*). Pour une variable de ce type, lorsqu'on écrit ``a = b``, ``a`` et ``b`` désigne le même objet même si ce sont deux noms différentes. C'est le même emplacement mémoire accessible paur deux moyens (deux identifiants). Par exemple :: a = (2,3) b = a a += (4,5) print( a == b ) # --> False print(a,b) # --> (2, 3, 4, 5) (2, 3) a = [2,3] b = a a += [4,5] print( a == b ) # --> True print(a,b) # --> [2, 3, 4, 5] [2, 3, 4, 5] Dans le premier cas, le type (``tuple``) est _immutable_, l'opérateur ``+=`` cache implicitement une copie. Dans le second cas, le type (``list``) est _mutable_, l'opérateur ``+=`` évite la copie car la variable peut être modifiée. Même si ``b=a`` est exécutée avant l'instruction suivante, elle n'a **pas** pour effet de conserver l'état de ``a`` avant l'ajout d'élément. Un autre exemple :: a = [1, 2] b = a a [0] = -1 print(a) # --> [-1, 2] print(b) # --> [-1, 2] Pour copier une liste, il faut expliciter la demander :: a = [1, 2] b = list(a) a [0] = -1 print(a) # --> [-1, 2] print(b) # --> [1, 2] La page `Immutable Sequence Types <https://docs.python.org/3/library/stdtypes.html?highlight=immutable#immutable-sequence-types>`_ détaille un peu plus le type qui sont *mutable* et ceux qui sont *immutable*. Parmi les types standards : * **mutable** * `bool <https://docs.python.org/3/library/functions.html#bool>`_ * `int <https://docs.python.org/3/library/functions.html#int>`_, `float <https://docs.python.org/3/library/functions.html#float>`_, `complex <https://docs.python.org/3/library/functions.html#complex>`_ * `str <https://docs.python.org/3/library/functions.html#func-str>`_, `bytes <https://docs.python.org/3/library/functions.html#bytes>`_ * `None <https://docs.python.org/3/library/constants.html?highlight=none#None>`_ * `tuple <https://docs.python.org/3/library/functions.html#func-tuple>`_, `frozenset <https://docs.python.org/3/library/functions.html#func-frozenset>`_ * **immutable**, par défaut tous les autres types dont : * `list <https://docs.python.org/3/library/functions.html#func-list>`_ * `dict <https://docs.python.org/3/library/functions.html#func-dict>`_ * `set <https://docs.python.org/3/library/functions.html#func-set>`_ * `bytearray <https://docs.python.org/3/library/functions.html#bytearray>`_ Une instance de classe est mutable. Il est possible de la rendre immutable par quelques astuces : * `__slots__ <https://docs.python.org/3/reference/datamodel.html?highlight=_slots__#object.__slots__>`_ * `How to Create Immutable Classes in Python <http://www.blog.pythonlibrary.org/2014/01/17/how-to-create-immutable-classes-in-python/>`_ * `Ways to make a class immutable in Python <http://stackoverflow.com/questions/4996815/ways-to-make-a-class-immutable-in-python>`_ * `freeze <https://freeze.readthedocs.org/en/latest/>`_ Enfin, pour les objects qui s'imbriquent les uns dans les autres, une liste de listes, une classe qui incluent des dictionnaires et des listes, on distingue une copie simple d'une copie intégrale (**deepcopy**). Dans le cas d'une liste de listes, la copie simple recopie uniquement la première liste :: import copy l1 = [ [0,1], [2,3] ] l2 = copy.copy(l1) l1 [0][0] = '##' print(l1,l2) # --> [['##', 1], [2, 3]] [['##', 1], [2, 3]] l1 [0] = [10,10] print(l1,l2) # --> [[10, 10], [2, 3]] [['##', 1], [2, 3]] La copie intégrale recopie également les objets inclus :: import copy l1 = [ [0,1], [2,3] ] l2 = copy.deepcopy(l1) l1 [0][0] = '##' print(l1,l2) # --> [['##', 1], [2, 3]] [[0, 1], [2, 3]] Les deux fonctions s'appliquent à tout object Python : `module copy <https://docs.python.org/3/library/copy.html>`_. """ return id(a) == id(b)
0c33a33e01e5457c7216982df580abc90db47d2f
705,644
import math def yolox_semi_warm_cos_lr( lr, min_lr_ratio, warmup_lr_start, total_iters, normal_iters, no_aug_iters, warmup_total_iters, semi_iters, iters_per_epoch, iters_per_epoch_semi, iters, ): """Cosine learning rate with warm up.""" min_lr = lr * min_lr_ratio if iters <= warmup_total_iters: # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start lr = (lr - warmup_lr_start) * pow( iters / float(warmup_total_iters), 2 ) + warmup_lr_start elif iters >= normal_iters + semi_iters: lr = min_lr elif iters <= normal_iters: lr = min_lr + 0.5 * (lr - min_lr) * ( 1.0 + math.cos( math.pi * (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iters) ) ) else: lr = min_lr + 0.5 * (lr - min_lr) * ( 1.0 + math.cos( math.pi * ( normal_iters - warmup_total_iters + (iters - normal_iters) * iters_per_epoch * 1.0 / iters_per_epoch_semi ) / (total_iters - warmup_total_iters - no_aug_iters) ) ) return lr
ac6b1850031a5c36f8de2c7597c374bc401aaee3
705,645
def on_segment(p, r, q, epsilon): """ Given three colinear points p, q, r, and a threshold epsilone, determine if determine if point q lies on line segment pr """ # Taken from http://stackoverflow.com/questions/328107/how-can-you-determine-a-point-is-between-two-other-points-on-a-line-segment crossproduct = (q.y - p.y) * (r.x - p.x) - (q.x - p.x) * (r.y - p.y) if abs(crossproduct) > epsilon: return False # (or != 0 if using integers) dotproduct = (q.x - p.x) * (r.x - p.x) + (q.y - p.y)*(r.y - p.y) if dotproduct < 0: return False squaredlengthba = (r.x - p.x)*(r.x - p.x) + (r.y - p.y)*(r.y - p.y) if dotproduct > squaredlengthba: return False return True
b8517fc9d3c6d916cac698913c35ba4e5d873697
705,646
def ja_nein_vielleicht(*args): """ Ohne Argumente erstellt diese Funktion eine Ja-Nein-Vielleicht Auswahl. Mit einem Argument gibt es den Wert der entsprechenden Auswahl zurück. """ values = { True: "Vermutlich ja", False: "Vermutlich nein", None: "Kann ich noch nicht sagen" } if args: return values[args[0]] else: return [ {True: values[True]}, {False: values[False]}, {None: values[None]} ]
a4e58ab3f2dc9662e1c054ddfd32ff1ae988b438
705,647
def get_group(yaml_dict): """ Return the attributes of the light group :param yaml_dict: :return: """ group_name = list(yaml_dict["groups"].keys())[0] group_dict = yaml_dict["groups"][group_name] # Check group_dict has an id attribute if 'id' not in group_dict.keys(): print("Error, expected to find an 'id' attribute in the group object") return group_dict
db9e027594d3a9a9e0a1838da62316cfe6e0c380
705,648
def return_state_dict(network): """ save model to state_dict """ feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()} classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()} return {"feat_model": feat_model, "classifier": classifier}
c0bcd9bd84f7c722c7de5f52d12cf6762a86e1e0
705,649
def transform(func, geom): """Applies `func` to all coordinates of `geom` and returns a new geometry of the same type from the transformed coordinates. `func` maps x, y, and optionally z to output xp, yp, zp. The input parameters may iterable types like lists or arrays or single values. The output shall be of the same type. Scalars in, scalars out. Lists in, lists out. For example, here is an identity function applicable to both types of input. def id_func(x, y, z=None): return tuple(filter(None, [x, y, z])) g2 = transform(id_func, g1) Using pyproj >= 2.1, this example will accurately project Shapely geometries: import pyproj wgs84 = pyproj.CRS('EPSG:4326') utm = pyproj.CRS('EPSG:32618') project = pyproj.Transformer.from_crs(wgs84, utm, always_xy=True).transform g2 = transform(project, g1) Note that the always_xy kwarg is required here as Shapely geometries only support X,Y coordinate ordering. Lambda expressions such as the one in g2 = transform(lambda x, y, z=None: (x+1.0, y+1.0), g1) also satisfy the requirements for `func`. """ if geom.is_empty: return geom if geom.type in ('Point', 'LineString', 'LinearRing', 'Polygon'): # First we try to apply func to x, y, z sequences. When func is # optimized for sequences, this is the fastest, though zipping # the results up to go back into the geometry constructors adds # extra cost. try: if geom.type in ('Point', 'LineString', 'LinearRing'): return type(geom)(zip(*func(*zip(*geom.coords)))) elif geom.type == 'Polygon': shell = type(geom.exterior)( zip(*func(*zip(*geom.exterior.coords)))) holes = list(type(ring)(zip(*func(*zip(*ring.coords)))) for ring in geom.interiors) return type(geom)(shell, holes) # A func that assumes x, y, z are single values will likely raise a # TypeError, in which case we'll try again. except TypeError: if geom.type in ('Point', 'LineString', 'LinearRing'): return type(geom)([func(*c) for c in geom.coords]) elif geom.type == 'Polygon': shell = type(geom.exterior)( [func(*c) for c in geom.exterior.coords]) holes = list(type(ring)([func(*c) for c in ring.coords]) for ring in geom.interiors) return type(geom)(shell, holes) elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection': return type(geom)([transform(func, part) for part in geom.geoms]) else: raise ValueError('Type %r not recognized' % geom.type)
71bde1500ec8370a7718542ee26181d2aad6591f
705,650
def dict_zip(*dicts): """ Take a series of dicts that share the same keys, and reduce the values for each key as if folding an iterator. """ keyset = set(dicts[0]) for d in dicts: if set(d) != keyset: raise KeyError(f"Mismatched keysets in fold_dicts: {sorted(keyset)}, {sorted(set(d))}") return { key: [d[key] for d in dicts] for key in keyset }
47416641a6451828b78ae6dfd81a48676fcea71f
705,651
import argparse def process_command_line(): """ Parse command line arguments `argv` is a list of arguments, or `None` for ``sys.argv[1:]``. Return a Namespace representing the argument list. """ # Create the parser parser = argparse.ArgumentParser(prog='obflow_6_output', description='Run inpatient OB simulation output processor') # Add arguments parser.add_argument( "output_path", type=str, help="Destination Path for output summary files" ) parser.add_argument( "suffix", type=str, help="String to append to various summary filenames" ) parser.add_argument('--process_logs', dest='process_logs', action='store_true') parser.add_argument( "--stop_log_path", type=str, default=None, help="Path containing stop logs" ) parser.add_argument( "--occ_stats_path", type=str, default=None, help="Path containing occ stats csvs" ) parser.add_argument( "--run_time", type=float, default=None, help="Simulation run time" ) parser.add_argument( "--warmup_time", type=float, default=None, help="Simulation warmup time" ) parser.add_argument('--include_inputs', dest='include_inputs', action='store_true') parser.add_argument( "--scenario_inputs_path", type=str, default=None, help="Filename for scenario inputs" ) #parser.add_argument('--include_qng_approx', dest='include_qng_approx', action='store_true') # do the parsing args = parser.parse_args() return args
5563be1fa3e122222fccd9ca1edfce25907dcc58
705,652
import os def path_splitter(path): """ Split a path into its constituent parts. Might be better written as a recursive function. :param path: The path to split. :return: A list of the path's constituent parts. """ res = [] while True: p = os.path.split(path) if p[0] == path: # Were done, this is an absolute path. res.insert(0, p[0]) break elif p[1] == path: # Were done, this is a relative path. res.insert(0, p[0]) break else: path = p[0] res.insert(0, p[1]) return res
cf9ec119eb302ff45b7835a00e235215110c8dc5
705,654
def check_flush(hand): """Check whether the hand has a flush; returns a boolean.""" if len(hand) == len(hand.by_suit(hand[0].suit)): return True return False
de11f50f11b477e61f284063c7f0da0dda2dd87e
705,655
import torch def binary_accuracy(preds, y): """ Returns accuracy per batch :param preds: prediction logits :param y: target labels :return: accuracy = percentage of correct predictions """ # round predictions to the closest integer rounded_predictions = torch.round(torch.sigmoid(preds)) correct = (rounded_predictions == y).float() acc = correct.sum() / len(correct) return acc
2a321bb9e60a937a879619c2fa3baf1cbe968a33
705,656
import csv def load_taxondump(idpath): """Importing the Acidobacteria taxon IDs""" taxons = {} with open(idpath) as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: taxons[row[1]] = row[0] return taxons
b20c973f97d609b646e5c15be7cc320019f21236
705,657
import re def _to_numeric_range(cell): """ Translate an Excel cell (eg 'A1') into a (col, row) tuple indexed from zero. e.g. 'A1' returns (0, 0) """ match = re.match("^\$?([A-Z]+)\$?(\d+)$", cell.upper()) if not match: raise RuntimeError("'%s' is not a valid excel cell address" % cell) col, row = match.groups() # A = 1 col_digits = map(lambda c: ord(c) - ord("A") + 1, col) col = 0 for digit in col_digits: col = (col * 26) + digit row = int(row) - 1 col = col - 1 return col, row
468f452a7e4d4b045ecbb1a1fc261712fb25f3fc
705,658
def iter_children(param,childlist=[]): """ | Iterator over all sub children of a given parameters. | Returns all childrens names. =============== ================================= ==================================== **Parameters** **Type** **Description** *param* instance of pyqtgraph parameter the root node to be coursed *childlist* list the child list recetion structure =============== ================================= ==================================== Returns ------- childlist : parameter list The list of the children from the given node. Examples -------- >>> import custom_parameter_tree as cpt >>> from pyqtgraph.parametertree import Parameter >>> #Creating the example tree >>> settings=Parameter(name='settings') >>> child1=Parameter(name='child1', value=10) >>> child2=Parameter(name='child2',value=10,visible=True,type='group') >>> child2_1=Parameter(name='child2_1', value=10) >>> child2_2=Parameter(name='child2_2', value=10) >>> child2.addChildren([child2_1,child2_2]) >>> settings.addChildren([child1,child2]) >>> #Get the child list from the param argument >>> childlist=cpt.iter_children(settings) >>> #Verify the integrity of result >>> print(childlist) ['child1', 'child2', 'child2_1', 'child2_2'] """ for child in param.children(): childlist.append(child.name()) if child.type()=='group': childlist.extend(iter_children(child,[])) return childlist
2edbdccc5957cbe6131da70d6dfc24ea67a19e69
705,659
import re def parse_regex(ctx, param, values): """Compile a regex if given. :param click.Context ctx: click command context. :param click.Parameter param: click command parameter (in this case, ``ignore_regex`` from ``-r|--ignore-regiex``). :param list(str) values: list of regular expressions to be compiled. :return: a list of compiled regular expressions. .. versionchanged:: 1.1.3 parameter value (``values``) must be a ``list`` of ``str``s. """ if not values: return return [re.compile(v) for v in values]
b920d5a406ac3b7a8f28bb9125313c90eec5e212
705,661
import os def FileJustRoot(fileName): """ Gets just the root of the file name """ try: return os.path.splitext(fileName)[0] except: return ""
18fed9fbbaa0d5f3f08c89ff36a1f752605c52d2
705,662
def get_query_string(**kwargs): """ Concatenates the non-None keyword arguments to create a query string for ElasticSearch. :return: concatenated query string or None if not arguments were given """ q = ['%s:%s' % (key, value) for key, value in kwargs.items() if value not in (None, '')] return ' AND '.join(q) or None
cc73c157a8975e5df9c98efcd5b10396e5175486
705,663
def add_quotes(path): """Return quotes if needed for spaces on path.""" quotes = '"' if ' ' in path and '"' not in path else '' return '{quotes}{path}{quotes}'.format(quotes=quotes, path=path)
6e65da4512183ef62a0ac22b4c3c74f9e5273fbd
705,664
def find_possible_words(word: str, dictionary: list) -> list: """Return all possible words from word.""" possible_words = [] first_character = word[0] last_character = word[len(word) - 1] for dictionary_entry in dictionary: if (dictionary_entry.startswith(first_character) and dictionary_entry.endswith(last_character)): for character in dictionary_entry: if character in word: continue else: break else: possible_words.append(dictionary_entry) return possible_words
a3e63e6b6b9d8de3ca718cfc8e031bbc34630d50
705,665
def bostock_cat_colors(color_sets = ["set3"]): """ Get almost as many categorical colors as you please. Get more than one of the color brewer sets with ['set1' , 'set2'] Parameters ---------- sets : list list of color sets to return valid options are (set1, set2, set3, pastel1, pastel2, paired, dark, accent, category10) Returns ------- categorical_colors : list list of strings (e.g. ["#e41a1c",...]) Examples -------- >>> bostock_cat_colors(['set3'])[:5] ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3'] >>> bostock_cat_colors(['category10'])[:5] ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd'] Notes ----- list of hex colors can be found here: https://observablehq.com/@d3/color-schemes """ bostock = \ {"set1" : ["#e41a1c","#377eb8","#4daf4a","#984ea3", "#ff7f00","#ffff33","#a65628","#f781bf", "#999999"], "set2" : ["#66c2a5","#fc8d62","#8da0cb","#e78ac3", "#a6d854","#ffd92f","#e5c494","#b3b3b3"], "set3" : ["#8dd3c7","#ffffb3","#bebada","#fb8072", "#80b1d3","#fdb462","#b3de69","#fccde5", "#d9d9d9","#bc80bd","#ccebc5","#ffed6f"], "pastel1" : ["#fbb4ae","#b3cde3","#ccebc5","#decbe4", "#fed9a6","#ffffcc","#e5d8bd","#fddaec", "#f2f2f2"], "pastel2" : ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4", "#e6f5c9","#fff2ae","#f1e2cc","#cccccc"], "paired" : ["#a6cee3","#1f78b4","#b2df8a","#33a02c", "#fb9a99","#e31a1c","#fdbf6f","#ff7f00", "#cab2d6","#6a3d9a","#ffff99","#b15928"], "dark" : ["#1b9e77","#d95f02","#7570b3","#e7298a", "#66a61e","#e6ab02","#a6761d","#666666"], "accent" : ["#7fc97f","#beaed4","#fdc086","#ffff99", "#386cb0","#f0027f","#bf5b17","#666666"], "category10":["#1f77b4","#ff7f0e","#2ca02c","#d62728", "#9467bd","#8c564b","#e377c2","#7f7f7f", "#bcbd22","#17becf"] } l = [bostock[k] for k in color_sets] categorical_colors = [item for sublist in l for item in sublist] return categorical_colors
d01a2c833c3ee4ab1a196184ec4aecdb6cfc97a0
705,666
def _quote_embedded_quotes(text): """ Replace any embedded quotes with two quotes. :param text: the text to quote :return: the quoted text """ result = text if '\'' in text: result = result.replace('\'', '\'\'') if '"' in text: result = result.replace('"', '""') return result
71231e590e025c2ceb7b2dd4fde4465a9ff61a4c
705,668
def exp2(x): """Calculate 2**x""" return 2 ** x
d76d1e344e79ebb05d38a2e7e6ef36b6f367e85b
705,669
def archived_minute(dataSet, year, month, day, hour, minute): """ Input: a dataset and specific minute Output: a list of ride details at that minute or -1 if no ride during that minute """ year = str(year) month = str(month) day = str(day) #Converts hour and minute into 2 digit integers (that are strings) hour = "%02d" % hour minute = "%02d" % minute timeStamp = month+'/'+day+'/'+year+' '+hour+':'+minute+':'+'00' if timeStamp in dataSet: return dataSet[timeStamp] else: return -1
e550cb8ae5fbcfcc2a0b718dc2e4f3372f100015
705,670
def inputRead(c, inps): """ Reads the tokens in the input channels (Queues) given by the list inps using the token rates defined by the list c. It outputs a list where each element is a list of the read tokens. Parameters ---------- c : [int] List of token consumption rates. inps : [Queue] List of channels. Returns ---------- inputs: [List] List of token lists. """ if len(c) != len(inps): raise Exception("Token consumption list and Queue list have different sizes") inputs = [] for i in range(len(c)): aux = [] for j in range(c[i]): aux.append(inps[i].get()) inputs.append(aux) return inputs
ea70548f7da4fae66fe5196734bbf39deb255537
705,671
def myfn(n): """打印hello world 每隔一秒打印一个hello world,共n次 """ if n == 1: print("hello world!") return else: print("hello world!") return myfn(n - 1)
4405e8b4c591c435d43156283c0d8e2aa9860055
705,672
import requests def check_internet_connection(): """Checks if there is a working internet connection.""" url = 'http://www.google.com/' timeout = 5 try: _ = requests.get(url, timeout=timeout) return True except requests.ConnectionError as e: return False return False
5f587e6077377196d2c89b39f5be5d6a2747e093
705,673
def wall_filter(points, img): """ Filters away points that are inside walls. Works by checking where the refractive index is not 1. """ deletion_mask = img[points[:, 0], points[:, 1]] != 1 filtered_points = points[~deletion_mask] return filtered_points
05a34602e8a555eb1f1739f5de910a71514a92ae
705,674
def roq_transform(pressure, loading): """Rouquerol transform function.""" return loading * (1 - pressure)
b69d83579cdb904cc7e3625a371e1f6c0573e44b
705,675
def get_image_urls(ids): """function to map ids to image URLS""" return [f"http://127.0.0.1:8000/{id}" for id in ids]
a70cd4eea39ea277c82ccffac2e9b7d68dd7c801
705,676
import torch def caption_image_batch(encoder, decoder, images, word_map, device, max_length): """ Reads an image and captions it with beam search. :param encoder: encoder model :param decoder: decoder model :param image: image :param word_map: word map :param beam_size: number of sequences to consider at each decode-step :return: caption, weights for visualization """ # Encode encoder_out = encoder(images) # (1, enc_image_size, enc_image_size, encoder_dim) batch_size = encoder_out.size(0) encoder_dim = encoder_out.size(3) # Flatten encoding encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (1, num_pixels, encoder_dim) # Tensor to store top k previous words at each step; now they're just <start> k_prev_words = torch.LongTensor([[word_map['<start>']]] * batch_size) # (k, 1) # Tensor to store top k sequences; now they're just <start> seqs = k_prev_words # (k, 1) # Lists to store completed sequences, their alphas and scores complete_seqs = set() # Start decoding step = 1 h, c = decoder.init_hidden_state(encoder_out) # s is a number less than or equal to k, because sequences are removed from this process once they hit <end> while len(complete_seqs) < batch_size: embeddings = decoder.embedding(k_prev_words.to(device)).squeeze(1) # (s, embed_dim) awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels) gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim) awe = gate * awe h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim) scores = decoder.fc(h) # (s, vocab_size) _, next_word_inds = scores.max(1) next_word_inds = next_word_inds.cpu() # Add new words to sequences, alphas seqs = torch.cat([seqs, next_word_inds.unsqueeze(1)], dim=1) # (s, step+1) # Which sequences are incomplete (didn't reach <end>)? incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if next_word != word_map['<end>']] complete_inds = set(range(batch_size)) - set(incomplete_inds) complete_seqs.update(complete_inds) k_prev_words = next_word_inds.unsqueeze(1) # Break if things have been going on too long if step > max_length: break step += 1 k_end_words = torch.LongTensor([[word_map['<end>']]] * batch_size) # (k, 1) seqs = torch.cat([seqs, k_end_words], dim=1) # (s, step+1) seq_length = [s.tolist().index(word_map['<end>']) for s in seqs] return seq_length
192def399d6b05947df7bac06e90836771a22dda
705,677
def descope_queue_name(scoped_name): """Descope Queue name with '.'. Returns the queue name from the scoped name which is of the form project-id.queue-name """ return scoped_name.split('.')[1]
24de78d12399e0894f495cd5c472b10c2315e4af
705,679
import os def system_info(x): """ Get system info. """ return list(os.uname())
4231e967190daee2ae7c6d9823878bcb0a957bc1
705,680
import torch def rebalance_binary_class(label, mask=None, base_w=1.0): """Binary-class rebalancing.""" weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float()) weight_factor = torch.clamp(weight_factor, min=1e-2) alpha = 1.0 weight = alpha * label*(1-weight_factor)/weight_factor + (1-label) return weight_factor, weight
5adf3a21e4cc4b9e7bf129ecf31cfe37ab7a305a
705,681
def from_base(num_base: int, dec: int) -> float: """Returns value in e.g. ETH (taking e.g. wei as input).""" return float(num_base / (10 ** dec))
447a07b3e282e5104f8dcd50639c658f3013ec7a
705,682
def make_auth_header(auth_token): """Make the authorization headers to communicate with endpoints which implement Auth0 authentication API. Args: auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT (JSON Web Token), its expiry, the scopes granted, and the token type. Returns: headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication required endpoints. """ token_type = auth_token['token_type'] access_token = auth_token['access_token'] headers = { "Content-type": "application/json", "Authorization": "{token_type} {access_token}".format( token_type=token_type, access_token=access_token ), } return headers
e7c9b93cfbda876668068fb871d3abaf06157204
705,683