content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import numpy
def calc_fm_3d_by_density(mult_i, den_i, np, volume, moment_2d, phase_3d):
"""
Calculate magnetic structure factor.
[hkl, points, symmetry]
F_M = V_uc / (Ns * Np) mult_i den_i moment_2d[i, s] * phase_3d[hkl, i, s]
V_uc is volume of unit cell
Ns is the number of symmetry elements
Np is the number of points in unit cell
i is the points in assymetric unit cell
s is the elemnt of symmetries
mult_i is the multiplicity of i point
den_i is the density of i point
moment_2d[i, s] is the moment in assymetric point i at element symmetry s
in local coordinate system
phase_3d[hkl, i, s] is the phase for reflection hkl in the point i for
symmetry element s
Output data:
- f_hkl_1d_1, f_hkl_1d_2, f_hkl_1d_3: [hkl]
"""
# number of symmetry elements
ns = phase_3d.shape[2]
# [ind]
m_rho = (volume*1./float(ns*np))*den_i*mult_i
m_2d_1, m_2d_2, m_2d_3 = moment_2d
# [ind, symm]
t_2d_1, t_2d_2, t_2d_3 = m_rho[:, numpy.newaxis] * \
m_2d_1, m_rho[:, numpy.newaxis] * m_2d_2, m_rho[:, numpy.newaxis] * \
m_2d_3
# [hkl, ind, symm]
f_hkl_3d_1 = t_2d_1[numpy.newaxis, :, :] * phase_3d[:, :, :]
f_hkl_3d_2 = t_2d_2[numpy.newaxis, :, :] * phase_3d[:, :, :]
f_hkl_3d_3 = t_2d_3[numpy.newaxis, :, :] * phase_3d[:, :, :]
return f_hkl_3d_1, f_hkl_3d_2, f_hkl_3d_3 | 40f807f00422af17897ec4fa15c5826e1b73abd8 | 702,642 |
import functools
def dict_to_function(arg_dict):
"""
We need functions for Tensorflow ops, so we will use this function
to dynamically create functions from dictionaries.
"""
def inner_function(lookup, **inner_dict):
return inner_dict[lookup]
new_function = functools.partial(inner_function, **arg_dict)
return new_function | b6bfb0a11393eeb93733cc41fe7395ce99136713 | 702,643 |
def get_data_type(name):
"""Extract the data type name from an ABC(...) type name."""
return name.split('(', 1)[0] | 7565b30e1e2469de929b377fde1f186d28080f94 | 702,644 |
def moving_average(time_series, window_size=20, fwd_fill_to_end=0):
"""
Computes a Simple Moving Average (SMA) function on a time series
:param time_series: a pandas time series input containing numerical values
:param window_size: a window size used to compute the SMA
:param fwd_fill_to_end: index from which computation must stop and propagate last value
:return: Simple Moving Average time series
"""
if fwd_fill_to_end <= 0:
sma = time_series.rolling(window=window_size).mean()
else:
sma = time_series.rolling(window=window_size).mean()
sma[-fwd_fill_to_end:] = sma.iloc[-fwd_fill_to_end]
'''
Moving average feature is empty for the first *n* days, where *n* is the window size,
so I'll use some backfill to fill NaN values
'''
sma.fillna(method='backfill', inplace=True)
return sma | d71931867c419f306824e8b240a9b1bb3fff2fdd | 702,645 |
from pathlib import Path
from typing import Sequence
def load_input(path: Path) -> Sequence[int]:
"""Loads the input data for the puzzle."""
with open(path, "r") as f:
depths = tuple(int(d) for d in f.readlines())
return depths | 35472eadcd2deefbbae332b3811be7d731cb2478 | 702,646 |
def forgiving_state_copy(target_net, source_net):
"""
Handle partial loading when some tensors don't match up in size.
Because we want to use models that were trained off a different
number of classes.
"""
net_state_dict = target_net.state_dict()
loaded_dict = source_net.state_dict()
new_loaded_dict = {}
for k in net_state_dict:
if k in loaded_dict and net_state_dict[k].size() == loaded_dict[k].size():
new_loaded_dict[k] = loaded_dict[k]
print("Matched", k)
else:
print("Skipped loading parameter ", k)
# logging.info("Skipped loading parameter %s", k)
net_state_dict.update(new_loaded_dict)
target_net.load_state_dict(net_state_dict)
return target_net | cea46fdc0fd123517ea2a678968d19e8716ccbdf | 702,647 |
def clean_invite_embed(line):
"""Makes invites not embed"""
return line.replace("discord.gg/", "discord.gg/\u200b") | 05b73197150e892ed2284d9c6ac8b0eebeb492b1 | 702,648 |
import hashlib
import zlib
def calc_hash_crc(filename):
"""Calculate hash and crc32 of selected file"""
data = open(filename, 'rb').read()
fhash = hashlib.sha256(data).hexdigest()
fcrc = zlib.crc32(data)
return {'sha256': fhash, 'crc32' : fcrc} | e36d004b41cd9a9d92cd9f41584fa90fb67b7631 | 702,649 |
def set_accuracy_83(num):
"""Reduce floating point accuracy to 8.3 (xxxxx.xxx).
:param float num: input number
:returns: float with specified accuracy
"""
return float("{:8.3f}".format(num)) | fd1818a81ea7a78c296a85adc3621ab77fbad230 | 702,650 |
def get_hexes_at_radius(centre_col, centre_row, radius):
"""
Function that get a list of all hexes at a certain radius from
a centre hex
"""
if radius == 0:
hex_list = [[centre_col, centre_row]]
return hex_list
if radius == 1:
hex_list = [[centre_col, centre_row - 2],
[centre_col + 1, centre_row - 1],
[centre_col + 1, centre_row + 1],
[centre_col, centre_row + 2],
[centre_col - 1, centre_row + 1],
[centre_col - 1, centre_row - 1]]
return hex_list
if radius == 2:
hex_list = [[centre_col, centre_row - 4],
[centre_col + 1, centre_row - 3],
[centre_col + 2, centre_row - 2],
[centre_col + 2, centre_row],
[centre_col + 2, centre_row + 2],
[centre_col + 1, centre_row + 3],
[centre_col, centre_row + 4],
[centre_col - 1, centre_row + 3],
[centre_col - 2, centre_row + 2],
[centre_col - 2, centre_row],
[centre_col - 2, centre_row - 2],
[centre_col - 1, centre_row - 3]]
return hex_list
if radius == 3:
hex_list = [[centre_col, centre_row - 6],
[centre_col + 1, centre_row - 5],
[centre_col + 2, centre_row - 4],
[centre_col + 3, centre_row - 3],
[centre_col + 3, centre_row - 1],
[centre_col + 3, centre_row + 1],
[centre_col + 3, centre_row + 3],
[centre_col + 2, centre_row + 4],
[centre_col + 1, centre_row + 5],
[centre_col, centre_row + 6],
[centre_col - 1, centre_row + 5],
[centre_col - 2, centre_row + 4],
[centre_col - 3, centre_row + 3],
[centre_col - 3, centre_row + 1],
[centre_col - 3, centre_row - 1],
[centre_col - 3, centre_row - 3],
[centre_col - 2, centre_row - 4],
[centre_col - 1, centre_row - 5]]
return hex_list
return [] | de4b0fd70bcca0978a02ec55f645f600eaca7947 | 702,651 |
def get_staticmethod_func(cm):
"""
Returns the function wrapped by the #staticmethod *cm*.
"""
if hasattr(cm, '__func__'):
return cm.__func__
else:
return cm.__get__(int) | 7f8992db0b90abdb64a82e74c53199b3490792c7 | 702,653 |
import platform
def is_mac():
"""
Checks if we are running on Mac OSX.
:returns: **bool** to indicate if we're on a Mac
"""
return platform.system() == 'Darwin' | 9991bfd017bf9948a75d99d5a1dfeadfd291c803 | 702,655 |
def load_file(path):
"""Loads file and return its content as list.
Args:
path: Path to file.
Returns:
list: Content splited by linebreak.
"""
with open(path, 'r') as arq:
text = arq.read().split('\n')
return text | 348d57ab3050c12181c03c61a4134f2d43cd93cd | 702,656 |
from typing import Type
import enum
def _enum_help(msg: str, e: Type[enum.Enum]) -> str:
"""
Render a `--help`-style string for the given enumeration.
"""
return f"{msg} (choices: {', '.join(str(v) for v in e)})" | e53762798e0ecb324143ee4a05c4152eaf756aad | 702,657 |
from typing import OrderedDict
def array_remove_duplicates(s):
"""removes any duplicated elements in a string array."""
return list(OrderedDict.fromkeys(s)) | ea5a0d620139e691db99f364c38827abe39a16f5 | 702,658 |
def gt_comparison():
""">: Greater than operator."""
class _Comparable:
def __gt__(self, other):
return 'big' in other
return _Comparable() > 'big' and "masperpiece" | 76009a61f47fac7e5abc838bf2fa427ec7268d03 | 702,659 |
import socket
def get_ipv4_for_hostname(hostname, static_mappings={}):
"""Translate a host name to IPv4 address format.
The IPv4 address is returned as a string, such as '100.50.200.5'.
If the host name is an IPv4 address itself it is returned unchanged.
You can provide a dictionnary with static mappings.
Following mappings are added by default:
'127.0.0.1' => '127.0.0.1'
'localhost' => '127.0.0.1'
'localhost.localdomain' => '127.0.0.1'
Args:
hostname (string): hostname.
static_mappings (dict): dictionnary of static mappings
((hostname) string: (ip) string).
Returns:
(string) IPv4 address for the given hostname (None if any problem)
"""
hostname = hostname.lower()
static_mappings.update({'127.0.0.1': '127.0.0.1', 'localhost': '127.0.0.1',
'localhost.localdomain': '127.0.0.1'})
if hostname in static_mappings:
return static_mappings[hostname]
try:
return socket.gethostbyname(hostname)
except Exception:
return None | fd28106380c6a6d2c353c0e8103f15df264117ef | 702,660 |
def season_ts(ds, var, season):
""" calculate timeseries of seasonal averages
Args: ds (xarray.Dataset): dataset
var (str): variable to calculate
season (str): 'DJF', 'MAM', 'JJA', 'SON'
"""
## set months outside of season to nan
ds_season = ds.where(ds['time.season'] == season)
# calculate 3month rolling mean (only middle months of season will have non-nan values)
ds_season = ds_season[var].rolling(min_periods=3, center=True, time=3).mean()
# reduce to one value per year
ds_season = ds_season.groupby('time.year').mean('time')
# FUTURE: remove first year if it has nan?
return ds_season | 6d5b0ddc39762ceca42de6b9228c38f4bf365cd0 | 702,661 |
def getSubString(string, firstChar, secondChar,start=0):
"""
Gives the substring of string between firstChar and secondChar. Starts looking from start. If it is unable to find a substring returns an empty string.
"""
front = string.find(firstChar,start)
back = string.find(secondChar,front+1)
if front > -1 and back > -1:
return (string[front+1:back],back)
else:
return ("",-1) | c845c3c31abce7ed8064cfd16e455c27f1aac806 | 702,662 |
def factorial_iter(num: int) -> int:
"""
Return the factorial of an integer non-negative number.
Parameters
----------
num : int
Raises
------
TypeError
if num is not integer.
ValueError
if num is less than zero.
Returns
-------
int
"""
if not isinstance(num, int):
raise TypeError("an integer number is required")
if num < 0:
raise ValueError("a non-negative integer number is required")
product = 1
for factor in range(2, num + 1):
product *= factor
return product | 36ec433bf02bdef0770f9f9b86feff9afa995eb3 | 702,663 |
def update_Q(Qsa, Qsa_next, reward, alpha, gamma):
""" updates the action-value function estimate using the most recent time step """
return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa)) | f5f54d8e8b9f67c1145d967fd731ea37a4aa1e57 | 702,664 |
import pickle
def load_psnr(loss_file):
"""
load image psnr or optical flow psnr.
:param loss_file: loss file path
:return:
"""
with open(loss_file, 'rb') as reader:
# results {
# 'dataset': the name of dataset
# 'psnr': the psnr of each testing videos,
# }
# psnr_records['psnr'] is np.array, shape(#videos)
# psnr_records[0] is np.array ------> 01.avi
# psnr_records[1] is np.array ------> 02.avi
# ......
# psnr_records[n] is np.array ------> xx.avi
results = pickle.load(reader)
psnrs = results['psnr']
return psnrs | a8c4b6850d3ffe15c566a732eb73a27b6d750e20 | 702,665 |
def in_tcltk_website(text):
"""Receives the tcltk website text string and returns the range where the colors are written."""
start = text.find('<PRE>') + len('<PRE>')
end = text.find('</PRE>')
colors = text[start:end]
return colors | decf15574f4ed00f32aa31427591cb17472ae221 | 702,667 |
def get_diff(l1, l2):
"""
Returns the difference between two lists.
"""
diff = list( list(set(l1) - set(l2)) + list(set(l2) - set(l1)) )
return diff | 16b2083fcd4f61cb86c563ea6773b64e6fbe6006 | 702,668 |
def identical_prediction_lists(prev_prediction_list, curr_prediction_list):
"""
Check if two predictions lists are the same
:param prev_prediction_list: list Last iterations predictions
:param curr_prediction_list: list current iterations predictions
:return: bool false = not identical, true = identical
"""
for x, y in zip(prev_prediction_list, curr_prediction_list):
if x != y:
return False
return True | b74140acb3c5529fb804710d4fd3bfd64bb4f009 | 702,669 |
def preprocess_box(data):
"""Pre-process data to take correct values."""
return [[data[2], data[0]], [data[3], data[1]]] | 9955ece2ae559832a49db32dadc73c3eb3b85c82 | 702,671 |
def _AddressTranslation(rdata, unused_origin):
"""Returns the address of the given rdata.
Args:
rdata: Rdata, The data to be translated.
unused_origin: Name, The origin domain name.
Returns:
str, The address of the given rdata.
"""
return rdata.address | 8be6c393f64dd852af8e8fa0a06e6ddea9d5a25f | 702,672 |
def _get_start_offset(lines) -> int:
"""Get the start offset of the license data."""
i = len(lines) - 1
count = 0
for line in lines[::-1]:
if "-" * 10 in line:
count += 1
if count == 2:
break
i -= 1
return max(i, 0) | 16fb35dad0381276a3cedd1fb5f19b165fb69b03 | 702,674 |
def make_n_grams(seq, n):
""" return iterator """
ngrams = (tuple(seq[i:i+n]) for i in range(len(seq)-n+1))
return ngrams | f026106c2dd548c7390f2dddcdeccc5c40f3ba7b | 702,675 |
import re
def get_defectdojo_date(date):
"""
Returns date as required by DefectDojo.
:param date:
:return: yyyy--mm-dd
"""
regex = r"([0-9]{2})\/([0-9]{2})\/([0-9]{4})"
matches = re.finditer(regex, date, re.MULTILINE)
match = next(enumerate(matches))
date = match[1].groups()
day = date[0]
mon = date[1]
year = date[2]
defectdojo_date = "{year}-{mon}-{day}".format(year=year, mon=mon, day=day)
# print(defectdojo_date)
return defectdojo_date | 983d1ce85cbf5bf8d0f5d6a5416cc2130dd617c1 | 702,678 |
def _FakeService():
""" use if you need a fake service from build """
class e:
execute = lambda : {'sheets':[],}
class v:
def get(range=None):
return []
class g:
def get(spreadsheetId=None, includeGridData=None, range=None):
return e
values = lambda : g
class s:
spreadsheets = lambda : g
return s | 887c14bc0669efc0511efff2ee3301bab15aafcb | 702,679 |
def formatEx(excepInst):
"""
_formatEx_
given a DbdException instance, generate a simple message from it
"""
msg = "%s:%s %s" % (excepInst.__class__.__name__,
excepInst.getErrorMessage(),
excepInst.getErrorCode(),
)
return msg | 6f2a1fda050f4ab9eaf24f5c7f6d1c019e64306e | 702,681 |
def reverse_domain_from_network(ip_network):
"""Create reverse DNS zone name from network address (ipaddress.IPv4Network)"""
prefixlen = ip_network.prefixlen
if prefixlen % 8:
return ip_network.reverse_pointer # classless
else:
return ip_network.network_address.reverse_pointer[(4 - (prefixlen / 8)) * 2:] | 0b0b7bb6bc72cae6625e9bd024c9692253d55c88 | 702,682 |
from bs4 import BeautifulSoup
def get_anchor_href(markup):
"""
Given HTML markup, return a list of hrefs for each anchor tag.
"""
soup = BeautifulSoup(markup, 'lxml')
return ['%s' % link.get('href') for link in soup.find_all('a')] | ec75f36e0b14a1d20452a1b6c1233d789c03cd6b | 702,683 |
async def get_asterisk_chan(response_json):
"""Get the Asterisk Channel from the JSON responses"""
if response_json["type"] in ["PlaybackStarted", "PlaybackFinished"]:
return str(response_json.get("playback", {}).get("target_uri")).split(":")[1]
else:
return response_json.get("channel", {}).get("id") | 951ccc3cdea92cfb630eb24bbd9e2f2333a72f1e | 702,684 |
def get_valid_uuids(uuids: list, full_paths: list, valid_full_paths: list) -> list:
"""Returns valid uuids."""
return [uuid for uuid, full_path in zip(uuids, full_paths) if full_path in valid_full_paths] | f93d77060edd9b38d2f322bacb9e01ae69ef8e4b | 702,685 |
import os
def copy_remote_file(web_file, destination):
"""
Check if exist the destination path, and copy the online resource
file to local.
Args:
:web_file: reference to online file resource to take.
:destination: path to store the file.
"""
size = 0
dir_name = os.path.dirname(destination)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(destination, 'wb') as file_:
chunk_size = 8 * 1024
for chunk in web_file.iter_content(chunk_size=chunk_size):
if chunk:
file_.write(chunk)
size += len(chunk)
return size | 931197924fb2bfdbc8a2df99135822df4c186073 | 702,686 |
def massage_error_code(error_code):
"""Massages error codes for cleaner exception handling.
Args:
int Error code
Returns:
int Error code. If arg is not an integer, will change to 999999
"""
if type(error_code) is not int:
error_code = 999999
return error_code | c9c42e71aa4684e79078673baa9a7ecfe627a169 | 702,687 |
import random
def randomPartition(elems: list, bin_sizes: list) -> list:
"""
Randomly partition list elements into
bins of given sizes
"""
def shuffleList(elems):
random.shuffle(elems)
return elems
elems = shuffleList(elems)
partition = []
start, end = 0, 0
for bin_size in bin_sizes:
end += bin_size
partition.append(elems[start:end])
start += bin_size
return partition | 52d1d16639fa0a4566423255bb29c123879282cb | 702,688 |
def pre_process(line):
"""
Return line after comments and space.
"""
if '#' in line:
line = line[:line.index('#')]
stripped_data = line.strip()
return stripped_data | 63640048cb07376fb73b62cb6b6d2049adec5c17 | 702,689 |
def get_canonical_values_not_in_goals(
slot_cmap: dict[str, dict], domain: str
) -> dict[str, set[str]]:
"""Some canonical values do not appear in the goals so they are in a special field in the
canonical map. This is specific to MultiWOZ 2.1.
Parameters
----------
slot_cmap
Canonical map of a given slot. This contains a `special_keys` field.
domain
For some slots, the canonical map is split by domain, so the domain is necessary
in order to correctly retrieve the cannonical values not in goal.
"""
# for some slots (e.g., hotel-stay) canonical vals outside goals are
# split by domain
new_vals = slot_cmap["special_keys"].get("not_in_goal", {}).get(domain, {})
if not new_vals:
new_vals = slot_cmap["special_keys"].get("not_in_goal", {})
return new_vals | 45ce90fbc86b8ba2be71432234a6c08d853b5e0d | 702,690 |
def file_tag_from_task_file(file: str, cut_num_injs: bool = False) -> str:
"""Returns the file tag from a task output filename.
Args:
file: Filename of task file with or without path.
cut_num_injs: Whether to not include the number of injections per redshift bin.
"""
file_tag = file.replace("_TASK_", "results_").split("results_")[1]
if cut_num_injs:
return file_tag.split("_INJS-PER-ZBIN_")[0]
else:
return file_tag | 2ba0180c1a06f4ae6e47659e3598c2207cca5642 | 702,691 |
import torch
def cmc_score_count(
distances: torch.Tensor, conformity_matrix: torch.Tensor, topk: int = 1
) -> float:
"""
Function to count CMC from distance matrix and conformity matrix.
Args:
distances: distance matrix shape of (n_embeddings_x, n_embeddings_y)
conformity_matrix: binary matrix with 1 on same label pos and 0 otherwise
topk: number of top examples for cumulative score counting
Returns:
cmc score
Examples:
.. code-block:: python
import torch
from catalyst import metrics
metrics.cmc_score_count(
distances=torch.tensor([[1, 2], [2, 1]]),
conformity_matrix=torch.tensor([[0, 1], [1, 0]]),
topk=1,
)
# 0.0
.. code-block:: python
import torch
from catalyst import metrics
metrics.cmc_score_count(
distances=torch.tensor([[1, 0.5, 0.2], [2, 3, 4], [0.4, 3, 4]]),
conformity_matrix=torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
topk=2,
)
# 0.33
"""
perm_matrix = torch.argsort(distances)
position_matrix = torch.argsort(perm_matrix)
conformity_matrix = conformity_matrix.type(torch.bool)
position_matrix[~conformity_matrix] = (
topk + 1
) # value large enough not to be counted
closest = position_matrix.min(dim=1)[0]
k_mask = (closest < topk).type(torch.float)
return k_mask.mean().item() | 5c3d31a6455e1d8c7694a2117637e9e51478bb21 | 702,692 |
def regrid_get_coord_order(f, axis_keys, coord_keys):
"""Get the ordering of the axes for each N-d auxiliary coordinate.
:Parameters:
f: `Field`
The source or destination field.
axis_keys: sequence
A sequence of axis keys.
coord_keys: sequence
A sequence of keys for each of the N-d auxiliary
coordinates.
:Returns:
`list`
A list of lists specifying the ordering of the axes for
each N-d auxiliary coordinate.
"""
coord_axes = [f.get_data_axes(coord_key) for coord_key in coord_keys]
coord_order = [
[coord_axis.index(axis_key) for axis_key in axis_keys]
for coord_axis in coord_axes
]
return coord_order | 28ecbb91d29c2638a07bf255d3daf3819f0475fc | 702,693 |
def getprotobyname(name): # real signature unknown; restored from __doc__
"""
getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)
"""
return 0 | 2f682337380ecb3898042cf8a7af2d9ef5290887 | 702,694 |
import threading
def simple_thread(func, daemon=True):
""" Start function in another thread, discarding return value. """
thread = threading.Thread(target=func)
thread.daemon = daemon
thread.start()
return thread | 870102cf07b92b7cdd56960a7c6da5d8521ee233 | 702,695 |
def add_gms_group(
self,
group_name: str,
parent_pk: str = "",
background_image_file: str = "",
) -> bool:
"""Update appliance group in Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - group
- POST
- /gms/group/new
:param group_name: Name of the group. Must be unique.
:type group_name: str
:param parent_pk: The appliance group identifier,
e.g. ``3.Network``, "" will act as to root group, defaults to ""
:type parent_pk: str, optional
:param background_image_file: Image filename for group,
defaults to ""
:type background_image_file: str, optional
:return: Returns True/False based on successful call.
:rtype: bool
"""
data = {
"name": group_name,
"parentId": parent_pk,
"backgroundImage": background_image_file,
}
return self._post(
"/gms/group/new",
data=data,
expected_status=[204],
return_type="bool",
) | c92cd932070913af2b98608822ad03ff4a59506e | 702,697 |
def func_split_token(str_token):
"""
Splits a VCF info token while guarenteeing 2 tokens
* str_token : String token to split at '='
: String
* return : List of 2 strings
"""
if str_token:
lstr_pieces = str_token.split("=")
i_pieces = len(lstr_pieces)
if i_pieces == 1:
return(lstr_pieces + [""])
if i_pieces == 2:
return lstr_pieces
elif i_pieces > 2:
return [lstr_pieces[0], "=".join(lstr_pieces[1:])]
return ["", ""] | 581b7729ab8dba6f3032b6f1f3bf9bc7dd17c67e | 702,698 |
def format_datetime(obj, constant_hour=False, hour=("00", "00")):
"""Will format a datetime object into a string."""
if not constant_hour:
return "{}{}{}{}{}".format(
obj.year,
"{:02d}".format(obj.month),
"{:02d}".format(obj.day),
"{:02d}".format(obj.hour),
"{:02d}".format(obj.minute)
)
return "{}{}{}{}{}".format(
obj.year,
"{:02d}".format(obj.month),
"{:02d}".format(obj.day),
hour[0], hour[1]
) | 0d8d3d35e9ad3168a0e264f6b4e77248406bf7c6 | 702,699 |
def intersection_pt(triangle_left, triangle_right):
""" get intersection point of two output triangles"""
a, b, c = triangle_left[0], triangle_left[1], triangle_left[2]
d, e, f = triangle_right[0], triangle_right[1], triangle_right[2]
x = (c * (b - c) - d * (e - d)) / (b - c - e + d)
y = (e - d) * (x - d)
return x, y | 051137af7eaa57edae25ff814593deb1a134ea14 | 702,700 |
def populate_sd_dict(herbivore_list):
"""Create and populate the stocking density dictionary, giving the stocking
density of each herbivore type."""
stocking_density_dict = {}
for herb_class in herbivore_list:
stocking_density_dict[herb_class.label] = herb_class.stocking_density
return stocking_density_dict | 87a0b9375a5a419557443506522ab2cf8d34c308 | 702,701 |
from typing import List
import pathlib
def get_files(
catalog: str, recursive: bool, suffix_markup: str, extension: str
) -> List[str]:
"""
Осуществляет поиск в каталоге и выдает список путей найденных файлов.
По-умолчанию, recursive = False, ищет файлы только в каталоге.
extension и suffix_markup - исключают поиск файлов с указанным расширением и суффиксом
"""
if recursive:
files = pathlib.Path(catalog).rglob(f"*{extension}")
else:
files = pathlib.Path(catalog).glob(f"*{extension}")
return [
str(i) for i in list(files) if not str(i).endswith(f"{suffix_markup+extension}")
] | 99f6d591b0548aafb654d05b94271a6d9ae243b6 | 702,703 |
import socket
def ip6_from_bytes(data: bytes) -> str:
"""Converts ip4 address from bytes to string representation.
Keyword arguments:
data -- address bytes to convert
"""
return socket.inet_ntop(socket.AF_INET6, data) | 694384d872b6fe27baa3fd97cba9dbfa7c35f8a9 | 702,704 |
def hash_side_effect(value):
"""Side effect value."""
if "mail_none.gif" in value:
return "633d7356947eec543c50b76a1852f92427f4dca9"
else:
return "133d7356947fec542c50b76b1856f92427f5dca9" | cc791ad289b4a198a0e66916467aae6c015f5676 | 702,705 |
def unless(predicate, function, value):
"""Tests the final argument by passing it to the given predicate function. If
the predicate is not satisfied, the function will return the result of
calling the whenFalseFn function with the same argument. If the predicate
is satisfied, the argument is returned as is"""
if predicate(value):
return value
return function(value) | 9a883fb57b99f5dbc4e8b357dfeb9b88682ef621 | 702,706 |
import logging
def getLogger():
"""Helper for retrieve the logger object
"""
return logging.getLogger(__name__) | 470424ea4b3f6b18225db6994b6b43439f2fd425 | 702,707 |
def pnchunk(darray, maxsize_4d=1000**2, sample_var="u", round_func=round, **kwargs):
""" Chunk `darray` in time while keeping each chunk's size roughly
around `maxsize_4d`. The default `maxsize_4d=1000**2` comes from
xarray's rule of thumb for chunking:
http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
"""
chunk_number = darray[sample_var].size / maxsize_4d
chunk_size = int(round_func(len(darray[sample_var].time) / chunk_number))
return darray.chunk(dict(time=chunk_size)) | 770bc7465168d89bf85800e01d1fcbb8fa0af663 | 702,708 |
def get_all_folder_ids(service, parent_id=None, drive_id=False):
"""
Returns the id of the destination folder name in Google Drive
"""
parent_ids = []
# build query string
if parent_id:
query = 'mimeType = \'application/vnd.google-apps.folder\'' \
f' and \'{parent_id}\' in parents'
else:
query = 'mimeType = \'application/vnd.google-apps.folder\''
try:
if drive_id:
results = service.files().list(q=str(query), supportsAllDrives=True,
includeItemsFromAllDrives=True, corpora="drive",
driveId=drive_id,
fields="files(id, name)").execute()
else:
results = service.files().list(q=str(query),
fields="files(id, name)").execute()
except Exception as e:
print(f'Failed to fetch folder ids for folder {parent_id}')
raise(e)
folders = results.get('files', [])
return set(folder['id'] for folder in folders) | 56f1ae373533efcf991572aab9816540620fd949 | 702,710 |
import errno
def _convert_errno_parm(code_should_be):
""" Convert the code_should_be value to an integer
If code_should_be isn't an integer, then try to use
the code_should_be value as a errno "name" and extract
the errno value from the errno module.
"""
try:
code = int(code_should_be)
except ValueError:
try:
code = getattr(errno, code_should_be)
except AttributeError:
raise AssertionError("code_should_be '%s' is not a valid errno name" % code_should_be)
return code | 949c9f17539d885a0fc4a51f3358fc3695c42e22 | 702,711 |
import re
def find_all_starts_regex(seq):
""" Find the starting index of all start codons in a lowercase seq """
regex_start = re.compile('atg')
# Find the indices of all start codons
starts = []
for match in regex_start.finditer(seq):
starts.append(match.start())
return tuple(starts) | 5dd16b36352bbf41005fdf008606e503a050a175 | 702,713 |
def update_blurs(blur_o, blur_d, routing):
"""Signal when all the input fields are full (or at least visited?) and a routing option has been picked."""
if routing == 'slope':
c = 0
elif routing == 'balance':
c = 1
else:
c = 2
if (not blur_o) or (not blur_d):
return 0
else:
return int(blur_o) + int(blur_d) + c | 092ad87e42d682a1369dca41ddc1dab491bd7525 | 702,714 |
def _add(x):
"""
Add all elements of a list.
:param x: List to sum
:return: Sum of all elements in the input list
"""
return sum(x) | 19d42b51dbd07a992f3256b8b0d69c1f4043fa89 | 702,715 |
def _assign_link_resids(res_link, match):
"""
Given a link at residue level (`res_link`) and
a dict (`match`) specifying to which resids
the res_link nodes map, create a correspondence
dict that maps the higher resolution nodes to
the resids specified in` match`. Each `res_link`
node by definition can only map to one resid provided
in `match`. The lower resolution nodes associated
to a particular res_link node are stored in the 'graph'
attribute of res_link.
Note that the nodes in that graph are consecutive
and uniquely specify to a single node in the graph
specifying all residues at higher resolution.
Parameters:
-----------
res_link: :class:`nx.Graph`
must have a 'graph' attribute for each node
that itself is a graph of the atoms represented
match: dict
dict matching a resid to the node_key of res_link
Returns:
--------
:type:dict
correspondence of high resolution nodes to resid
"""
link_node_to_resid = {}
for resid, link_node in match.items():
for node in res_link.nodes[link_node]['graph']:
link_node_to_resid[node] = resid
return link_node_to_resid | cfb315a15ebba6e086f340dc7e6db27eadee1d2b | 702,716 |
def _get_pos_neg_loss(cls_loss, labels):
"""get pos neg loss"""
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_pos_loss = (labels > 0).astype(cls_loss.dtype) * cls_loss.view(batch_size, -1)
cls_neg_loss = (labels == 0).astype(cls_loss.dtype) * cls_loss.view(batch_size, -1)
cls_pos_loss = cls_pos_loss.sum() / batch_size
cls_neg_loss = cls_neg_loss.sum() / batch_size
else:
cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
cls_neg_loss = cls_loss[..., 0].sum() / batch_size
return cls_pos_loss, cls_neg_loss | 1e7c9444c8604e4c0e438f3d80826a2142a5398b | 702,717 |
def test_agegroup_pop(df):
"""
Compare the age group populations with manually computed
sums from the Excel files
"""
def assert_pop(df, sex, state_name, year, age_group):
return(df[sex][(df.StateName == state_name) &
(df.Year == year) &
(df.AgeGroup == age_group)])
assert_pop(df, 'Males', 'Chiapas', 2019, '80-84') == 16986
assert_pop(df, 'Males', 'Michoacán', 2030, '15-19') == 204978
assert_pop(df, 'Females', 'Nuevo León', 1998, '30-34') == 157366
assert_pop(df, 'Total', 'Tabasco', 2005, '50-54') == 38958 + 38868 | a9e4d5a30eef670b916ea13cf5ef328234863cf6 | 702,719 |
def checkrows(sudoku):
"""
Checks if each row contains each value only once
"""
size = len(sudoku)
row_num = 0
for row in sudoku:
numbercontained = [False for x in range(size)]
for value in row:
# if placeholder, ignore it
if value in range(size):
if numbercontained[value]:
return False
else:
numbercontained[value] = True
row_num += 1
return True | 3266eb936b0f3f1e22bd16cd40fbf61753876ce1 | 702,720 |
def list_is_unique(ls):
"""Check if every element in a list is unique
Parameters
----------
ls: list
Returns
-------
bool
"""
return len(ls) == len(set(ls)) | a0bc92c2e00b48d80af39020622f31059845fc94 | 702,721 |
import csv
def read_queue(project):
"""Read queue csv file
Args:
project (string): project, i.e. CMIP5/CMIP6
Returns:
rows (dict): - prsenting each record stored in the file
dids (set): dataset_ids stored in the file
"""
rows={}
dids=set()
# open csv file and read data in dictionary with dataset_id as key
try:
with open("/g/data/hh5/clef/tables/" + project + "_clef_table.csv","r") as csvfile:
table_read = csv.reader(csvfile)
# for each row save did-var (to distinguish CMIP5) and separate set of unique dids
for row in table_read:
if project == 'CMIP5':
rows[(row[1],row[0])] = row[2]
dids.add(row[1])
elif project == 'CMIP6':
rows[(row[0])] = row[1]
dids.add(row[0])
except FileNotFoundError:
# Queue not available
pass
return rows, dids | 6e6fe3a08d22e5984c962eaba3cf446456e3cb59 | 702,723 |
def compute_transpose(x):
""" given a matrix, computes the transpose """
xt=[([0]*len(x)) for k in x[0]]
for i, x_row in enumerate(x):
for j, b in enumerate(x_row):
xt[j][i]=x[i][j]
return xt | 22988bc9802deaf1bc07182a5e85c56d54c94436 | 702,724 |
def sort_articles(articles):
"""Sort articles based on score.
:param articles:
:return:
"""
if len(articles) < 1:
raise ValueError('No news')
sorted_articles = sorted(articles, key=lambda x: x.score, reverse=True)
return sorted_articles | f8cbd9a1437a463e49269e65a440d51fdc7c0dd1 | 702,725 |
def WR(df, n):
"""威廉指标"""
hn = df["high"].rolling(n).max()
ln = df["low"].rolling(n).min()
df["wr"] = (hn - df["close"]) / (hn - ln) * (-100)
return df | 38ed4b05d970b4359d50103b7a54a91780dff3aa | 702,726 |
def extract_mocha_summary(lines):
"""
Example mocha summary lines (both lines can be missing if no tests passed/failed):
✔ 3 tests completed
✖ 1 test failed
"""
passes, fails = 0, 0
for line in lines:
if line and line[0] == '✔':
passes = int(line.split()[1])
elif line and line[0] == '✖':
fails = int(line.split()[1])
return {
'total_tests': passes + fails,
'passed_tests': passes,
'failed_tests': fails
} | 53d671c3d18f0421cbb512835a84f71a9588f947 | 702,727 |
import os
def gen_all_datasets(dir):
"""Looks through all .mat files in a directory, or just returns that file if it's only one."""
if dir.endswith(".mat"):
(r, f) = os.path.split(dir)
(f, e) = os.path.splitext(f)
return [(r, f)]
file_list = []
for r,d,f in os.walk(dir):
for files in f:
if files.endswith(".mat"):
file_list.append((r, files.split('.')[-2]))
file_list.sort()
return file_list | 8f652cf1710d4aff814201575a3927f944a2fdb6 | 702,728 |
def replace_text(text):
"""
Android资源文件英文的双引号或单引号需要加斜杠,否则会报错,中文的双引号和单引号不需要
:param text:
:return:
"""
temp_text = text.replace(r' \ "', r' \"').replace(r' / ', r'/').replace(r'% ', r' %') \
.replace(r' $ ', r'$').replace(r'$ ', r'$').replace(r'¥ ', r'¥ ').replace(r'¥ ', r'¥ ').replace(r"'", r"\'").replace(r'"', r'\"').replace("\\\\", "\\")
return temp_text | cddb558dabb34442fe6e5118229e3ea092c144d2 | 702,729 |
def factorial(n):
""" Returns the factorial of n.
e.g. factorial(7) = 7x6x5x4x3x2x1 = 5040
"""
answer = 1
for i in range(n, 1, -1):
answer = answer * i
return answer | 98c0530e4e0f1de8c1c0f622e7d62a5feb042bcb | 702,730 |
def vfun(self, parr="", func="", par1="", con1="", con2="", con3="",
**kwargs):
"""Performs a function on a single array parameter.
APDL Command: *VFUN
Parameters
----------
parr
The name of the resulting numeric array parameter vector. See *SET
for name restrictions.
func
Function to be performed:
Arccosine: ACOS(Par1). - Arcsine: ASIN(Par1).
Par1 is sorted in ascending order. *VCOL, *VMASK, *VCUM, and *VLEN,,NINC do not apply. *VLEN,NROW does apply. - Arctangent: ATAN(Par1).
Compress: Selectively compresses data set. "True" (*VMASK) values of Par1 (or row positions to be considered according to the NINC value on the *VLEN command) are written in compressed form to ParR, starting at the specified position. - Copy: Par1 copied to ParR.
Cosine: COS(Par1). - Hyperbolic cosine: COSH(Par1).
Direction cosines of the principal stresses (nX9). Par1 contains the nX6 component stresses for the n locations of the calculations. - Par1 is sorted in descending order. *VCOL, *VMASK, *VCUM, and *VLEN,,NINC do
not apply. *VLEN,NROW does apply.
Euler angles of the principal stresses (nX3). Par1 contains the nX6 component stresses for the n locations of the calculations. - Exponential: EXP(Par1).
Expand: Reverse of the COMP function. All elements of Par1 (starting at the position specified) are written in expanded form to corresponding "true" (*VMASK) positions (or row positions to be considered according to the NINC value on the *VLEN command) of ParR. - Natural logarithm: LOG(Par1).
Common logarithm: LOG10(Par1). - Nearest integer: 2.783 becomes 3.0, -1.75 becomes -2.0.
Logical complement: values 0.0 (false) become 1.0 (true). Values > 0.0 (true) become 0.0 (false). - Principal stresses (nX5). Par1 contains the nX6 component stresses for the n
locations of the calculations.
Power function: Par1**CON1. Exponentiation of any negative number in the vector Par1 to a non-integer power is performed by exponentiating the positive number and prepending the minus sign. For example, -4**2.3 is -(4**2.3). - Sine: SIN(Par1).
Hyperbolic sine: SINH(Par1). - Square root: SQRT(Par1).
Tangent: TAN(Par1). - Hyperbolic tangent: TANH(Par1).
Tangent to a path at a point: the slope at a point is determined by linear interpolation half way between the previous and next points. Points are assumed to be in the global Cartesian coordinate system. Path points are specified in array Par1 (having 3 consecutive columns of data, with the columns containing the x, y, and z coordinate locations, respectively, of the points). Only the starting row index and the column index for the x coordinates are specified, such as A(1,1). The y and z coordinates of the vector are assumed to begin in the corresponding next columns, such as A(1,2) and A(1,3). The tangent result, ParR, must also have 3 consecutive columns of data and will contain the tangent direction vector (normalized to 1.0); such as 1,0,0 for an x-direction vector. - Normal to a path and an input vector at a point: determined from the cross-
product of the calculated tangent vector (see
TANG) and the input direction vector (with the i,
j, and k components input as CON1, CON2, and
CON3). Points are assumed to be in the global
Cartesian coordinate system. Path points are
specified in array Par1 (having 3 consecutive
columns of data, with the columns containing the
x, y, and z coordinate locations, respectively,
of the points). Only the starting row index and
the column index for the x coordinates are
specified, such as A(1,1). The y and z
coordinates of the vector are assumed to begin in
the corresponding next columns, such as A(1,2)
and A(1,3). The normal result, ParR, must also
have 3 consecutive columns of data and will
contain the normal direction vector (normalized
to 1.0); such as 1,0,0 for an x-direction vector.
Transforms global Cartesian coordinates of a point to the coordinates of a specified system: points to be transformed are specified in array Par1 (having 3 consecutive columns of data, with the columns containing the x, y, and z global Cartesian coordinate locations, respectively, of the points). Only the starting row index and the column index for the x coordinates are specified, such as A(1,1). The y and z coordinates of the vector are assumed to begin in the corresponding next columns, such as A(1,2) and A(1,3). Results are transformed to coordinate system CON1 (which may be any valid coordinate system number, such as 1,2,11,12, etc.). The transformed result, ParR, must also have 3 consecutive columns of data and will contain the corresponding transformed coordinate locations. - Transforms specified coordinates of a point to global Cartesian coordinates:
points to be transformed are specified in array
Par1 (having 3 consecutive columns of data, with
the columns containing the local coordinate
locations (x, y, z or r, θ, z or etc.) of the
points). Only the starting row index and the
column index for the x coordinates are specified,
such as A(1,1). The y and z coordinates (or θ
and z, or etc.) of the vector are assumed to
begin in the corresponding next columns, such as
A(1,2) and A(1,3). Local coordinate locations
are assumed to be in coordinate system CON1
(which may be any valid coordinate system number,
such as 1,2,11,12, etc.). The transformed
result, ParR, must also have 3 consecutive
columns of data, with the columns containing the
global Cartesian x, y, and z coordinate
locations, respectively.
par1
Array parameter vector in the operation.
con1, con2, con3
Constants (used only with the PWR, NORM, LOCAL, and GLOBAL
functions).
Notes
-----
Operates on one input array parameter vector and produces one output
array parameter vector according to:
ParR = f(Par1)
where the functions (f) are described below. Functions are based on
the standard FORTRAN definitions where possible. Out-of-range function
results (or results with exponents whose magnitudes are approximately
greater than 32 or less than -32) produce a zero value. Input and
output for angular functions may be radians (default) or degrees
[*AFUN]. ParR may be the same as Par1. Starting array element numbers
must be defined for each array parameter vector if it does not start at
the first location. For example, *VFUN,A,SQRT,B(5) takes the square
root of the fifth element of B and stores the result in the first
element of A. Operations continue on successive array elements [*VLEN,
*VMASK] with the default being all successive elements. Absolute
values and scale factors may be applied to all parameters [*VABS,
*VFACT]. Results may be cumulative [*VCUM]. Skipping array elements
via *VMASK or *VLEN for the TANG and NORM functions skips only the
writing of the results (skipped array element data are used in all
calculations). See the *VOPER command for detail s . / p >
p > T h i s c o m m a n d i s
v a l i d i n a n y p r o c e
s s o r . / p > / d i v > d i v c
l a s s = " r e f s e c t 1 " t i
t l e = " M e n u P a t h s " > a
n a m e = " d 0 e 2 9 2 8 5 8 " > /
a > h 2 > M e n u P a t h s / h 2
> t a b l e b o r d e r = " 0 "
s u m m a r y = " S i m p l e l i
s t " c l a s s = " s i m p l e l
i s t " > t r > t d > s p a n c l
a s s = " g u i m e n u " > s t r o
n g > U t i l i t y M e n u &g t ;
P a r a m e t e r s &g t ; A r r a y
O p e r a t i o n s &g t ; V e c t o
r F u n c t i o n s / s t r o n g
> / s p a n > / t d > / t r > / t a
b l e > / d i v > / d i v > h r > p
c l a s s = " l e g a l f o o t e r
" > s m a l l > i > R e l e a s e
1 6 . 2 - &c o p y ; S A S I
P , I n c . A l l r i g h t s
r e s e r v e d . / i > / s m a l l
> / p > / b o d y > / h t m l >
"""
command = f"*VFUN,{parr},{func},{par1},{con1},{con2},{con3}"
return self.run(command, **kwargs) | dba87cb721ba79a797c357160a2ebed425b3239f | 702,731 |
def reverse_colors(frame):
"""
Reverse the order of colors in a frame from RGB to BGR and from BGR to
RGB.
"""
return frame[:, :, ::-1] | 210deca283c373d02c0d114daad2e23ba822b3ab | 702,732 |
def extract_geohash_from_path(paths):
"""
:param paths: Sentinel2 paths;Shape = (batch_size, no_of_timestamps=3)
:return: 1d list of geohashes for which imputation is made
"""
return [i[i.find('9q'):i.find('9q')+5] for i in paths] | 4920e611bbb1e3f9c1ee2aef4efb93832a9b0c9b | 702,733 |
from typing import Optional
def algo_parts(name: str) -> tuple[str, Optional[int]]:
"""Return a tuple of an algorithm's name and optional number suffix.
Example::
>>> algo_parts('rot13')
('rot', 13)
>>> algo_parts('whirlpool')
('whirlpool', None)
"""
base_algo = name.rstrip('0123456789')
try:
bits = int(name[len(base_algo):])
except ValueError:
bits = None
return base_algo, bits | c3231da2bc3f96091b6f07d38fdd2caa414e59a1 | 702,734 |
import os
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append(compiler.library_dir_option(dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option(dir)
if isinstance(opt, list):
lib_opts = lib_opts + opt
else:
lib_opts.append(opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split(lib)
if lib_dir:
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file:
lib_opts.append(lib_file)
else:
compiler.warn("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append(compiler.library_option (lib))
return lib_opts | a615afce1d73b2d07652e53f4e15d06310a1e893 | 702,735 |
def calculate_rsi(analysis_df, column, window):
"""
Calculates relative stength index.
Args:
analysis_df: Pandas dataframe with a closing price column
column: String representing the name of the closing price column
window: Integer representing the number of periods used in the RSI calculation
Returns:
Pandas dataframe containing RSI
"""
delta = analysis_df[column]
up_periods = delta.copy()
up_periods[delta<=0] = 0.0
down_periods = abs(delta.copy())
down_periods[delta>0] = 0.0
rs_up = up_periods.rolling(window, min_periods=1).mean()
rs_down = down_periods.rolling(window, min_periods=1).mean()
rsi = 100 - 100/(1+rs_up/rs_down)
# Impute nan rows
rsi = rsi.fillna(method="bfill")
return rsi | 7c32751bc4aeb5583caa69397f1c19b88a208039 | 702,736 |
def has_slide_type(cell, slide_type):
"""
Select cells that have a given slide type
:param cell: Cell object to select
:param slide_type: Slide Type(s): '-', 'skip', 'slide', 'subslide', 'fragment', 'notes'
:type slide_type: str / set / list
:return: a bool object (True if cell should be selected)
"""
if isinstance(slide_type, str):
slide_type = {slide_type}
return all(f(cell) for f in [lambda c: 'slideshow' in c.metadata,
lambda c: 'slide_type' in c.metadata['slideshow'],
lambda c: c.metadata['slideshow']['slide_type'] in slide_type]) | edb1323331317d53502179fe357c151a5b59af0b | 702,737 |
def auto_expand(list_or_value):
"""Given a list return it, given a scalar return a corresponding broadcasted 'infinite list'"""
if isinstance(list_or_value, list):
return list_or_value
else:
class Expanded:
def __init__(self, value):
self.value = value
def __getitem__(self, i):
return self.value
return Expanded(list_or_value) | d56288adf0c0275e8539c9ba12026bb23a27fb64 | 702,738 |
import sqlite3
def query(x,db,v=True):
""" A function that takes in a query and returns/ prints the result"""
conn = sqlite3.connect(db)
curs = conn.cursor()
my_result = list(curs.execute(x).fetchall())
curs.close()
conn.commit()
if v is True:
print(my_result)
return my_result | 8b914224429bdfd8d167327bf6cc0e3afaf94b3c | 702,739 |
def build_url(selector, child_key, parent_key):
"""Return url string that's conditional to `selector` ("site" or "area")."""
if selector == "site":
return "https://%s.craigslist.org/" % child_key
return "https://%s.craigslist.org/%s/" % (parent_key, child_key) | f8f00d4f9d20c3312f2b36d014365dbcf08242bc | 702,740 |
def get_config(arguments=None, events=None):
"""
Retruns a pre-formatted configuration block for supervisor
"""
if arguments is None:
arguments = ''
if events is None:
events = 'PROCESS_STATE'
configuration_string = '''
[eventlistener:logstash-notifier]
command = ./logstash_notifier/__init__.py --coverage %(arguments)s
events = %(events)s
'''
return configuration_string % {'arguments': arguments, 'events': events} | cf548a31393a05a4f7a474b193e5752c5406233d | 702,742 |
def sieve_of_eratosphenes(n: int) -> list[int]:
"""
Finds prime numbers <= n using the sieve of Eratosphenes algorithm.
:param n: the upper limit of sorted list of primes starting with 2
:return: sorted list of primes
Integers greater than 2 are considered good input that gives
meaningful results:
>>> sieve_of_eratosphenes(17)
[2, 3, 5, 7, 11, 13, 17]
>>> sieve_of_eratosphenes(16)
[2, 3, 5, 7, 11, 13]
Other integers are considered relatively good input that
results in empty list:
>>> sieve_of_eratosphenes(0)
[]
>>> sieve_of_eratosphenes(-7)
[]
Other types cause TypeError and so are considered bad input:
>>> sieve_of_eratosphenes("m")
Traceback (most recent call last):
...
TypeError: can only concatenate str (not "int") to str
>>> sieve_of_eratosphenes(2.3)
Traceback (most recent call last):
...
TypeError: can't multiply sequence by non-int of type 'float'
"""
sieve = [True] * (n+1)
for i in range(2, n+1):
if sieve[i] and i*i < n:
for k in range(i*i, n+1):
if not k%i:
sieve[k] = False
prime_numbers = []
for i in range(2, n+1):
if sieve[i]:
prime_numbers.append(i)
return prime_numbers | 4dbfdffe0ff6e360361daccdcd39fb7fb3d09a03 | 702,743 |
def xor_decipher(text, key):
"""
Decipher a message using XOR.
text -- a list of integers corresponding to the ASCII value of characters.
key -- a list of characters used as keys.
"""
deciphered = []
key_length = len(key)
key_ascii = [ord(_k) for _k in key]
for i, _ascii in enumerate(text):
deciphered.append(chr(_ascii ^ key_ascii[i % key_length]))
return "".join(deciphered) | 99edcb7a08f9c22772305d70f5d4830828c2bbbf | 702,744 |
import string
import random
def name_generator(size=9, chars=string.ascii_uppercase + string.digits):
"""
this method is for generating a randndom name for the downloaded files
@param size: number of random characters
@param chars: type of the random characters
"""
return ''.join(random.choice(chars) for x in range(size)) | afe91c39ed5d985f93e1aa4ab72bf7ff7c1b75e0 | 702,745 |
def recovery_secret_to_ksk(recovery_secret):
"""Turn secret and salt to the URI.
>>> recovery_secret_to_ksk("0123.4567!ABCD")
'[email protected]!ABCD'
"""
if isinstance(recovery_secret, bytes):
recovery_secret = recovery_secret.decode("utf-8")
# fix possible misspellings (the recovery_secret is robust against them)
for err, fix in ["l1", "I1", "O0", "_-", "*+"]:
recovery_secret = recovery_secret.replace(err, fix)
ksk = "KSK@babcom-recovery-" + recovery_secret
return ksk | a8832a9e970e4728dcb5f779fd4463abf142e69e | 702,746 |
import click
from datetime import datetime
def validate_end_date(ctx, param, value):
"""
Validator for the 'click' command line interface, checking whether the entered date as argument 'start_date' is
later than argument 'end_date' and 'end_date' argument is later than current date.
"""
# option when the user enters the '--end_date' parameter earlier than '--start_date'
if ctx.params.get('start_date') is not None:
if ctx.params.get('start_date') > value:
raise click.BadParameter(f'the date value must be equal or later than: '
f'{(ctx.params.get("start_date")).date()}')
if value > datetime.now():
raise click.BadParameter(f'the date value must be equal or earlier than {(datetime.now().date())}')
return value | b1416fdc614aad53ffab537275b7ae2cb15eddce | 702,747 |
def transform_uppercase(val, mode=None):
"""
Convert to uppercase
<dotted>|uppercase string to uppercase
<dotted>|uppercase:force string to uppercase or raises
"""
try:
return val.upper()
except TypeError:
if mode == 'force':
raise
return val | f043674932bf900c0654b1be64ac80b6f4ec54fa | 702,748 |
def string_pair_list_to_dictionary_no_json(spl):
"""
Covert a mongodb_store_msgs/StringPairList into a dictionary, ignoring content
:Args:
| spl (StringPairList): The list of (key, value) to pairs convert
:Returns:
| dict: resulting dictionary
"""
return dict((pair.first, pair.second) for pair in spl) | fdcaa23eed195d389456a6ceb0fdde2d06d932d6 | 702,749 |
import re
def sanitize(inStr):
"""Hide any sensitive info from the alert"""
# Key-value pairs of patterns with what to replace them with
patterns = {
"https\:\/\/oauth2\:[\d\w]{64}@gitlab\.pavlovia\.org\/.*\.git": "[[OAUTH key hidden]]" # Remove any oauth keys
}
# Replace each pattern
for pattern, repl in patterns.items():
inStr = re.sub(pattern, repl, inStr)
return inStr | 62c55f7d0af7c458d66e426fc9366c8ec84379df | 702,750 |
def get_techniques_of_tactic(tactic, techniques):
"""Given a tactic and a full list of techniques, return techniques that
appear inside of tactic
"""
techniques_list = []
for technique in techniques:
for phase in technique['kill_chain_phases']:
if phase['phase_name'] == tactic['x_mitre_shortname']:
techniques_list.append(technique)
techniques_list = sorted(techniques_list, key=lambda k: k['name'].lower())
return techniques_list | 15efe8788bc4e45170f9d02c452482422ec8cf9f | 702,751 |
import sys
def inNativeByteOrder(im):
""" Put image in native byte order."""
if ((im.dtype.byteorder=='<') & (sys.byteorder=='big')) | ((im.dtype.byteorder=='>') & (sys.byteorder=='little')):
return im.byteswap(inplace=True).newbyteorder()
else:
return im | ccb7c05f068c00de429a99ddc98f5876a0c5de5c | 702,752 |
import torch
def make_positions(tensor, padding_idx, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
mask = tensor.ne(padding_idx).long()
return torch.cumsum(mask, dim=1) * mask + padding_idx | f59fad86a23ff76f184c0dd6a21a92722f4817f5 | 702,755 |
def port_to_ip_mapping(index):
"""
A user defined mapping port_id (kni) to ipv4.
"""
return {"vEth0_{}".format(index): "192.167.10.{}".format(index + 1)} | 81d981bc8742e1295cb279d6b47d0e97f012b679 | 702,756 |
def humanize_list(elements):
""""
splits a list and add punctuations to it elements
"""
humanize_string = ''
if len(elements) > 1:
for element in elements:
if element == elements[len(elements)-2]: # second to last item
humanize_string = humanize_string + element.username+' and '
elif element == elements[len(elements)-1]: # last item
humanize_string = humanize_string + element.username
else:
humanize_string = humanize_string + element.username+', '
return humanize_string
else:
return humanize_string + elements[0].username | 3172ac89f763d7faca4ce31c41611ca6b6731896 | 702,757 |
import typing
def ark(row: typing.Mapping[str, str]) -> str:
"""The item ARK (Archival Resource Key)
Args:
row: An input CSV record.
Returns:
The item ARK.
"""
ark_prefix = "ark:/"
if row["Item ARK"].startswith(ark_prefix, 0):
return row["Item ARK"]
return ark_prefix + row["Item ARK"] | ea428fcddf26a5bfdad3ec1c4906e765b72b1f47 | 702,758 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.