body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def obtain_verbosity() -> int:
'Returns a verbosity level according to the set log level.'
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
verbosity = 0
if (log_level == 'DEBUG'):
verbosity = 2
if (log_level == 'INFO'):
verbosity = 1
return verbosity | -1,020,806,017,583,918,700 | Returns a verbosity level according to the set log level. | rasa/utils/common.py | obtain_verbosity | karen-white/rasa | python | def obtain_verbosity() -> int:
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
verbosity = 0
if (log_level == 'DEBUG'):
verbosity = 2
if (log_level == 'INFO'):
verbosity = 1
return verbosity |
def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
'Sorts a list of dictionaries by their first key.'
return sorted(dicts, key=(lambda d: list(d.keys())[0])) | 2,533,327,906,987,913,700 | Sorts a list of dictionaries by their first key. | rasa/utils/common.py | sort_list_of_dicts_by_first_key | karen-white/rasa | python | def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
return sorted(dicts, key=(lambda d: list(d.keys())[0])) |
def write_global_config_value(name: Text, value: Any) -> None:
'Read global Rasa configuration.'
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
try:
os.makedirs(os.path.dirname(config_path), exist_ok=True)
c = read_global_config(config_path)
c[name] = value
rasa.core.utils.dump_obj_as_yaml_to_file(rasa.constants.GLOBAL_USER_CONFIG_PATH, c)
except Exception as e:
logger.warning(f'Failed to write global config. Error: {e}. Skipping.') | 8,603,069,271,412,444,000 | Read global Rasa configuration. | rasa/utils/common.py | write_global_config_value | karen-white/rasa | python | def write_global_config_value(name: Text, value: Any) -> None:
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
try:
os.makedirs(os.path.dirname(config_path), exist_ok=True)
c = read_global_config(config_path)
c[name] = value
rasa.core.utils.dump_obj_as_yaml_to_file(rasa.constants.GLOBAL_USER_CONFIG_PATH, c)
except Exception as e:
logger.warning(f'Failed to write global config. Error: {e}. Skipping.') |
def read_global_config_value(name: Text, unavailable_ok: bool=True) -> Any:
'Read a value from the global Rasa configuration.'
def not_found():
if unavailable_ok:
return None
else:
raise ValueError(f"Configuration '{name}' key not found.")
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
if (not os.path.exists(config_path)):
return not_found()
c = read_global_config(config_path)
if (name in c):
return c[name]
else:
return not_found() | 2,701,605,427,803,925,000 | Read a value from the global Rasa configuration. | rasa/utils/common.py | read_global_config_value | karen-white/rasa | python | def read_global_config_value(name: Text, unavailable_ok: bool=True) -> Any:
def not_found():
if unavailable_ok:
return None
else:
raise ValueError(f"Configuration '{name}' key not found.")
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
if (not os.path.exists(config_path)):
return not_found()
c = read_global_config(config_path)
if (name in c):
return c[name]
else:
return not_found() |
def update_existing_keys(original: Dict[(Any, Any)], updates: Dict[(Any, Any)]) -> Dict[(Any, Any)]:
'Iterate through all the updates and update a value in the original dictionary.\n\n If the updates contain a key that is not present in the original dict, it will\n be ignored.'
updated = original.copy()
for (k, v) in updates.items():
if (k in updated):
updated[k] = v
return updated | 4,146,280,643,972,020,700 | Iterate through all the updates and update a value in the original dictionary.
If the updates contain a key that is not present in the original dict, it will
be ignored. | rasa/utils/common.py | update_existing_keys | karen-white/rasa | python | def update_existing_keys(original: Dict[(Any, Any)], updates: Dict[(Any, Any)]) -> Dict[(Any, Any)]:
'Iterate through all the updates and update a value in the original dictionary.\n\n If the updates contain a key that is not present in the original dict, it will\n be ignored.'
updated = original.copy()
for (k, v) in updates.items():
if (k in updated):
updated[k] = v
return updated |
def run_in_loop(f: Coroutine[(Any, Any, T)], loop: Optional[asyncio.AbstractEventLoop]=None) -> T:
"Execute the awaitable in the passed loop.\n\n If no loop is passed, the currently existing one is used or a new one is created\n if no loop has been started in the current context.\n\n After the awaitable is finished, all remaining tasks on the loop will be\n awaited as well (background tasks).\n\n WARNING: don't use this if there are never ending background tasks scheduled.\n in this case, this function will never return.\n\n Args:\n f: function to execute\n loop: loop to use for the execution\n\n Returns:\n return value from the function\n "
if (loop is None):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(f)
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
return result | -6,657,367,820,576,859,000 | Execute the awaitable in the passed loop.
If no loop is passed, the currently existing one is used or a new one is created
if no loop has been started in the current context.
After the awaitable is finished, all remaining tasks on the loop will be
awaited as well (background tasks).
WARNING: don't use this if there are never ending background tasks scheduled.
in this case, this function will never return.
Args:
f: function to execute
loop: loop to use for the execution
Returns:
return value from the function | rasa/utils/common.py | run_in_loop | karen-white/rasa | python | def run_in_loop(f: Coroutine[(Any, Any, T)], loop: Optional[asyncio.AbstractEventLoop]=None) -> T:
"Execute the awaitable in the passed loop.\n\n If no loop is passed, the currently existing one is used or a new one is created\n if no loop has been started in the current context.\n\n After the awaitable is finished, all remaining tasks on the loop will be\n awaited as well (background tasks).\n\n WARNING: don't use this if there are never ending background tasks scheduled.\n in this case, this function will never return.\n\n Args:\n f: function to execute\n loop: loop to use for the execution\n\n Returns:\n return value from the function\n "
if (loop is None):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(f)
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
return result |
def forward(self, word, sentence_length):
'\n :param word:\n :param sentence_length:\n :param desorted_indices:\n :return:\n '
(word, sentence_length, desorted_indices) = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
(x, _) = self.bilstm(packed_embed)
(x, _) = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit | 452,789,069,687,181,630 | :param word:
:param sentence_length:
:param desorted_indices:
:return: | models/BiLSTM.py | forward | Ahmed2xD/NER-with-bilstm-CRF-CNN | python | def forward(self, word, sentence_length):
'\n :param word:\n :param sentence_length:\n :param desorted_indices:\n :return:\n '
(word, sentence_length, desorted_indices) = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
(x, _) = self.bilstm(packed_embed)
(x, _) = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit |
def __init__(self, vocab_size, emb_size, hidden_size, out_size):
':\n vocab_size:\n emb_size:\n hidden_size:\n out_size:\n '
super(BiLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size, batch_first=True, bidirectional=True)
self.lin = nn.Linear((2 * hidden_size), out_size) | -7,144,852,210,901,468,000 | :
vocab_size:
emb_size:
hidden_size:
out_size: | models/BiLSTM.py | __init__ | Ahmed2xD/NER-with-bilstm-CRF-CNN | python | def __init__(self, vocab_size, emb_size, hidden_size, out_size):
':\n vocab_size:\n emb_size:\n hidden_size:\n out_size:\n '
super(BiLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size, batch_first=True, bidirectional=True)
self.lin = nn.Linear((2 * hidden_size), out_size) |
def all_variations(word: str) -> list:
'\n Produce all single-character leet variations of a string\n '
ans = ['']
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [(x + y) for x in ans for y in leet_letter]
return ans | -6,274,948,480,852,052,000 | Produce all single-character leet variations of a string | encode.py | all_variations | deut-erium/BASEic-steganography | python | def all_variations(word: str) -> list:
'\n \n '
ans = []
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [(x + y) for x in ans for y in leet_letter]
return ans |
def variation_gen(word: str):
'\n Produces all single-character leet variations of a string\n\n Args:\n word: a 3 character string to generate all variations\n\n Returns:\n generator: generator for all possible leet variations\n '
return product(*(ASCII_SUBS[i] for i in word)) | -5,077,353,261,636,185,000 | Produces all single-character leet variations of a string
Args:
word: a 3 character string to generate all variations
Returns:
generator: generator for all possible leet variations | encode.py | variation_gen | deut-erium/BASEic-steganography | python | def variation_gen(word: str):
'\n Produces all single-character leet variations of a string\n\n Args:\n word: a 3 character string to generate all variations\n\n Returns:\n generator: generator for all possible leet variations\n '
return product(*(ASCII_SUBS[i] for i in word)) |
def all_valid_variations(word: str) -> list:
'\n Returns all leet variations of a triplet which result in a\n Base32 only charset words on base64 encoding\n\n Args:\n word: An english triplet\n Returns:\n list: of all valid variations\n '
result = []
for variation in variation_gen(word):
if all(((i in B32_CHARSET) for i in b64encode(''.join(variation).encode()))):
result.append(''.join(variation))
return result | -6,583,012,057,907,718,000 | Returns all leet variations of a triplet which result in a
Base32 only charset words on base64 encoding
Args:
word: An english triplet
Returns:
list: of all valid variations | encode.py | all_valid_variations | deut-erium/BASEic-steganography | python | def all_valid_variations(word: str) -> list:
'\n Returns all leet variations of a triplet which result in a\n Base32 only charset words on base64 encoding\n\n Args:\n word: An english triplet\n Returns:\n list: of all valid variations\n '
result = []
for variation in variation_gen(word):
if all(((i in B32_CHARSET) for i in b64encode(.join(variation).encode()))):
result.append(.join(variation))
return result |
def valid_variation(word: str) -> str:
'\n Generates a single valid variation\n\n Args:\n word: the triplet to generate a variation from\n Returns:\n str: A valid variation of `word` or None otherwise\n '
for variation in variation_gen(word):
if all(((i in B32_CHARSET) for i in b64encode(''.join(variation).encode()))):
return ''.join(variation)
return None | -5,859,122,286,654,836,000 | Generates a single valid variation
Args:
word: the triplet to generate a variation from
Returns:
str: A valid variation of `word` or None otherwise | encode.py | valid_variation | deut-erium/BASEic-steganography | python | def valid_variation(word: str) -> str:
'\n Generates a single valid variation\n\n Args:\n word: the triplet to generate a variation from\n Returns:\n str: A valid variation of `word` or None otherwise\n '
for variation in variation_gen(word):
if all(((i in B32_CHARSET) for i in b64encode(.join(variation).encode()))):
return .join(variation)
return None |
def transform(strng: str) -> str:
'\n Transform the string to only lower alpha and numerics and spaces\n Converts uppercase to lower case and strips all other characters except\n space\n '
for char in (string.punctuation + string.whitespace[1:]):
strng = strng.replace(char, '')
return (strng.lower() + (' ' * (8 - (len(strng) % 8)))) | 2,339,621,541,417,914,400 | Transform the string to only lower alpha and numerics and spaces
Converts uppercase to lower case and strips all other characters except
space | encode.py | transform | deut-erium/BASEic-steganography | python | def transform(strng: str) -> str:
'\n Transform the string to only lower alpha and numerics and spaces\n Converts uppercase to lower case and strips all other characters except\n space\n '
for char in (string.punctuation + string.whitespace[1:]):
strng = strng.replace(char, )
return (strng.lower() + (' ' * (8 - (len(strng) % 8)))) |
def master_encode(strng: str) -> bytes:
'\n Encodes a string to its leet equivalent (sans punctuation) which when\n base64 encoded contains only base32 characters\n '
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result = ''
i = 0
while (i < len(strng)):
try:
current = strng[i:(i + 3)]
if (current in NON_LEET):
if ((current[:2] + ' ') not in NON_LEET):
result += valid_variation((current[:2] + ' '))
i += 2
elif ((current[0] + ' ') not in NON_LEET):
result += valid_variation((current[0] + ' '))
i += 1
elif (' {} '.format(current[0]) not in NON_LEET):
result += valid_variation(' {} '.format(current[0]))
i += 1
elif (' {}'.format(current[0]) not in NON_LEET):
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode()) | 3,318,641,059,723,770,400 | Encodes a string to its leet equivalent (sans punctuation) which when
base64 encoded contains only base32 characters | encode.py | master_encode | deut-erium/BASEic-steganography | python | def master_encode(strng: str) -> bytes:
'\n Encodes a string to its leet equivalent (sans punctuation) which when\n base64 encoded contains only base32 characters\n '
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result =
i = 0
while (i < len(strng)):
try:
current = strng[i:(i + 3)]
if (current in NON_LEET):
if ((current[:2] + ' ') not in NON_LEET):
result += valid_variation((current[:2] + ' '))
i += 2
elif ((current[0] + ' ') not in NON_LEET):
result += valid_variation((current[0] + ' '))
i += 1
elif (' {} '.format(current[0]) not in NON_LEET):
result += valid_variation(' {} '.format(current[0]))
i += 1
elif (' {}'.format(current[0]) not in NON_LEET):
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode()) |
def write_slurm_sh(id, command_line, queue_name='learnfair', nodes=1, gpu_per_node=8, wall_time=((3 * 24) * 60), username='wang3702', CPU_PER_GPU=10):
'\n Args:\n id: running id\n command_line: command line\n outlog_path: saving path\n Returns:\n\n '
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime('%H:%M:%S')
dependency_handler_path = os.path.join(os.getcwd(), 'ops')
dependency_handler_path = os.path.join(dependency_handler_path, 'handler.txt')
run_path = os.path.join(os.getcwd(), 'log')
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path, (('slurm_job_' + str(id)) + '.sh'))
output_path = os.path.join(run_path, (((('output_' + str(id)) + '_') + str((formatted_today + now))) + '.log'))
error_path = os.path.join(run_path, (((('error_' + str(id)) + '_') + str((formatted_today + now))) + '.log'))
with open(batch_file, 'w') as file:
file.write('#!/bin/sh\n')
file.write(('#SBATCH --job-name=%s\n' % id))
file.write(('#SBATCH --output=%s\n' % output_path))
file.write(('#SBATCH --error=%s\n' % error_path))
file.write(('#SBATCH --partition=%s\n' % queue_name))
file.write('#SBATCH --signal=USR1@600\n')
file.write(('#SBATCH --nodes=%d\n' % nodes))
file.write('#SBATCH --ntasks-per-node=1\n')
file.write('#SBATCH --mem=350G\n')
file.write(('#SBATCH --gpus=%d\n' % (nodes * gpu_per_node)))
file.write(('#SBATCH --gpus-per-node=%d\n' % gpu_per_node))
file.write(('#SBATCH --cpus-per-task=%d\n' % (CPU_PER_GPU * gpu_per_node)))
file.write(('#SBATCH --time=%d\n' % wall_time))
file.write(('#SBATCH --mail-user=%[email protected]\n' % username))
file.write('#SBATCH --mail-type=FAIL\n')
file.write('#SBATCH --mail-type=end \n')
file.write('#SBATCH --constraint="volta"\n')
report_info = ('%s job failed; \t' % id)
report_info += ('log path: %s; \t' % output_path)
report_info += ('error record path: %s\t' % error_path)
report_info += ('command line path: %s\t' % batch_file)
file.write(('#SBATCH --comment="%s"\n' % report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write('module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n')
file.write('/private/home/wang3702/anaconda3/bin/conda init\n')
file.write('CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n')
file.write('conda activate pytorch2\n')
file.write((command_line + ' &\n'))
file.write('wait $!\n')
file.write('set +x \n')
file.write('echo ..::Job Finished, but No, AGI is to BE Solved::.. \n')
os.system(('sbatch ' + batch_file)) | -4,796,757,946,245,007,000 | Args:
id: running id
command_line: command line
outlog_path: saving path
Returns: | run_slurm.py | write_slurm_sh | wang3702/barlowtwins | python | def write_slurm_sh(id, command_line, queue_name='learnfair', nodes=1, gpu_per_node=8, wall_time=((3 * 24) * 60), username='wang3702', CPU_PER_GPU=10):
'\n Args:\n id: running id\n command_line: command line\n outlog_path: saving path\n Returns:\n\n '
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime('%H:%M:%S')
dependency_handler_path = os.path.join(os.getcwd(), 'ops')
dependency_handler_path = os.path.join(dependency_handler_path, 'handler.txt')
run_path = os.path.join(os.getcwd(), 'log')
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path, (('slurm_job_' + str(id)) + '.sh'))
output_path = os.path.join(run_path, (((('output_' + str(id)) + '_') + str((formatted_today + now))) + '.log'))
error_path = os.path.join(run_path, (((('error_' + str(id)) + '_') + str((formatted_today + now))) + '.log'))
with open(batch_file, 'w') as file:
file.write('#!/bin/sh\n')
file.write(('#SBATCH --job-name=%s\n' % id))
file.write(('#SBATCH --output=%s\n' % output_path))
file.write(('#SBATCH --error=%s\n' % error_path))
file.write(('#SBATCH --partition=%s\n' % queue_name))
file.write('#SBATCH --signal=USR1@600\n')
file.write(('#SBATCH --nodes=%d\n' % nodes))
file.write('#SBATCH --ntasks-per-node=1\n')
file.write('#SBATCH --mem=350G\n')
file.write(('#SBATCH --gpus=%d\n' % (nodes * gpu_per_node)))
file.write(('#SBATCH --gpus-per-node=%d\n' % gpu_per_node))
file.write(('#SBATCH --cpus-per-task=%d\n' % (CPU_PER_GPU * gpu_per_node)))
file.write(('#SBATCH --time=%d\n' % wall_time))
file.write(('#SBATCH --mail-user=%[email protected]\n' % username))
file.write('#SBATCH --mail-type=FAIL\n')
file.write('#SBATCH --mail-type=end \n')
file.write('#SBATCH --constraint="volta"\n')
report_info = ('%s job failed; \t' % id)
report_info += ('log path: %s; \t' % output_path)
report_info += ('error record path: %s\t' % error_path)
report_info += ('command line path: %s\t' % batch_file)
file.write(('#SBATCH --comment="%s"\n' % report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write('module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n')
file.write('/private/home/wang3702/anaconda3/bin/conda init\n')
file.write('CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n')
file.write('conda activate pytorch2\n')
file.write((command_line + ' &\n'))
file.write('wait $!\n')
file.write('set +x \n')
file.write('echo ..::Job Finished, but No, AGI is to BE Solved::.. \n')
os.system(('sbatch ' + batch_file)) |
@staticmethod
def generate_private_key():
'\n Static function to generate a 16 byte random key.\n\n :return: the key as an integer\n '
return int.from_bytes(os.urandom(16), byteorder='big') | -7,436,298,459,082,489,000 | Static function to generate a 16 byte random key.
:return: the key as an integer | homekit/crypto/srp.py | generate_private_key | jlusiardi/homekit_client | python | @staticmethod
def generate_private_key():
'\n Static function to generate a 16 byte random key.\n\n :return: the key as an integer\n '
return int.from_bytes(os.urandom(16), byteorder='big') |
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool=True) -> bool:
'Check whether the `operand` is valid for arithmetic operations against numerics.'
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return ((not isinstance(operand, bool)) or allow_bool)
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return (isinstance(operand.spark.data_type, NumericType) or (allow_bool and isinstance(operand.spark.data_type, BooleanType)))
else:
return False | -1,508,399,129,130,615,800 | Check whether the `operand` is valid for arithmetic operations against numerics. | python/pyspark/pandas/data_type_ops/base.py | is_valid_operand_for_numeric_arithmetic | Chinazhanhuli/spark | python | def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool=True) -> bool:
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return ((not isinstance(operand, bool)) or allow_bool)
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return (isinstance(operand.spark.data_type, NumericType) or (allow_bool and isinstance(operand.spark.data_type, BooleanType)))
else:
return False |
def transform_boolean_operand_to_numeric(operand: Any, *, spark_type: Optional[DataType]=None) -> Any:
'Transform boolean operand to numeric.\n\n If the `operand` is:\n - a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.\n - a boolean literal, transform to the int value.\n Otherwise, return the operand as it is.\n '
from pyspark.pandas.base import IndexOpsMixin
if (isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType)):
assert spark_type, 'spark_type must be provided if the operand is a boolean IndexOpsMixin'
assert isinstance(spark_type, NumericType), 'spark_type must be NumericType'
dtype = spark_type_to_pandas_dtype(spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype)
return operand._with_new_scol(operand.spark.column.cast(spark_type), field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type))
elif isinstance(operand, bool):
return int(operand)
else:
return operand | 2,346,997,419,219,878,000 | Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is. | python/pyspark/pandas/data_type_ops/base.py | transform_boolean_operand_to_numeric | Chinazhanhuli/spark | python | def transform_boolean_operand_to_numeric(operand: Any, *, spark_type: Optional[DataType]=None) -> Any:
'Transform boolean operand to numeric.\n\n If the `operand` is:\n - a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.\n - a boolean literal, transform to the int value.\n Otherwise, return the operand as it is.\n '
from pyspark.pandas.base import IndexOpsMixin
if (isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType)):
assert spark_type, 'spark_type must be provided if the operand is a boolean IndexOpsMixin'
assert isinstance(spark_type, NumericType), 'spark_type must be NumericType'
dtype = spark_type_to_pandas_dtype(spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype)
return operand._with_new_scol(operand.spark.column.cast(spark_type), field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type))
elif isinstance(operand, bool):
return int(operand)
else:
return operand |
def _as_categorical_type(index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType) -> IndexOpsLike:
'Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`.'
assert isinstance(dtype, CategoricalDtype)
if (dtype.categories is None):
(codes, uniques) = index_ops.factorize()
return codes._with_new_scol(codes.spark.column, field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)))
else:
categories = dtype.categories
if (len(categories) == 0):
scol = SF.lit((- 1))
else:
kvs = chain(*[(SF.lit(category), SF.lit(code)) for (code, category) in enumerate(categories)])
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit((- 1)))
return index_ops._with_new_scol(scol.cast(spark_type), field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type, nullable=False)) | 7,511,054,177,139,476,000 | Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`. | python/pyspark/pandas/data_type_ops/base.py | _as_categorical_type | Chinazhanhuli/spark | python | def _as_categorical_type(index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType) -> IndexOpsLike:
assert isinstance(dtype, CategoricalDtype)
if (dtype.categories is None):
(codes, uniques) = index_ops.factorize()
return codes._with_new_scol(codes.spark.column, field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)))
else:
categories = dtype.categories
if (len(categories) == 0):
scol = SF.lit((- 1))
else:
kvs = chain(*[(SF.lit(category), SF.lit(code)) for (code, category) in enumerate(categories)])
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit((- 1)))
return index_ops._with_new_scol(scol.cast(spark_type), field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type, nullable=False)) |
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[(str, type, Dtype)]) -> IndexOpsLike:
'Cast `index_ops` to BooleanType Spark type, given `dtype`.'
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)) | -11,685,405,522,449,440 | Cast `index_ops` to BooleanType Spark type, given `dtype`. | python/pyspark/pandas/data_type_ops/base.py | _as_bool_type | Chinazhanhuli/spark | python | def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[(str, type, Dtype)]) -> IndexOpsLike:
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)) |
def _as_string_type(index_ops: IndexOpsLike, dtype: Union[(str, type, Dtype)], *, null_str: str=str(None)) -> IndexOpsLike:
'Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,\n representing null Spark column. Note that `null_str` is for non-extension dtypes only.\n '
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)) | -4,034,449,229,953,681,000 | Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column. Note that `null_str` is for non-extension dtypes only. | python/pyspark/pandas/data_type_ops/base.py | _as_string_type | Chinazhanhuli/spark | python | def _as_string_type(index_ops: IndexOpsLike, dtype: Union[(str, type, Dtype)], *, null_str: str=str(None)) -> IndexOpsLike:
'Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,\n representing null Spark column. Note that `null_str` is for non-extension dtypes only.\n '
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)) |
def _as_other_type(index_ops: IndexOpsLike, dtype: Union[(str, type, Dtype)], spark_type: DataType) -> IndexOpsLike:
'Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.\n\n Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.\n '
from pyspark.pandas.internal import InternalField
need_pre_process = (isinstance(dtype, CategoricalDtype) or isinstance(spark_type, BooleanType) or isinstance(spark_type, StringType))
assert (not need_pre_process), 'Pre-processing is needed before the type casting.'
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype)) | -1,062,975,493,042,347,900 | Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType. | python/pyspark/pandas/data_type_ops/base.py | _as_other_type | Chinazhanhuli/spark | python | def _as_other_type(index_ops: IndexOpsLike, dtype: Union[(str, type, Dtype)], spark_type: DataType) -> IndexOpsLike:
'Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.\n\n Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.\n '
from pyspark.pandas.internal import InternalField
need_pre_process = (isinstance(dtype, CategoricalDtype) or isinstance(spark_type, BooleanType) or isinstance(spark_type, StringType))
assert (not need_pre_process), 'Pre-processing is needed before the type casting.'
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype)) |
def _sanitize_list_like(operand: Any) -> None:
'Raise TypeError if operand is list-like.'
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError(('The operation can not be applied to %s.' % type(operand).__name__)) | 6,998,762,152,895,740,000 | Raise TypeError if operand is list-like. | python/pyspark/pandas/data_type_ops/base.py | _sanitize_list_like | Chinazhanhuli/spark | python | def _sanitize_list_like(operand: Any) -> None:
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError(('The operation can not be applied to %s.' % type(operand).__name__)) |
def restore(self, col: pd.Series) -> pd.Series:
'Restore column when to_pandas.'
return col | 2,203,781,437,692,319,500 | Restore column when to_pandas. | python/pyspark/pandas/data_type_ops/base.py | restore | Chinazhanhuli/spark | python | def restore(self, col: pd.Series) -> pd.Series:
return col |
def prepare(self, col: pd.Series) -> pd.Series:
'Prepare column when from_pandas.'
return col.replace({np.nan: None}) | -38,392,911,196,063,610 | Prepare column when from_pandas. | python/pyspark/pandas/data_type_ops/base.py | prepare | Chinazhanhuli/spark | python | def prepare(self, col: pd.Series) -> pd.Series:
return col.replace({np.nan: None}) |
def __init__(self, parent):
'\n :param parent: The model parent.\n :type parent: :class:`~PySide.QtGui.QObject`\n '
super(WorkAreaButton, self).__init__(parent)
self._normal_icon = QtGui.QIcon()
self._normal_icon.addPixmap(QtGui.QPixmap(':/tk_multi_infopanel/pin.png'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self._current_work_area_icon = QtGui.QIcon()
self._current_work_area_icon.addPixmap(QtGui.QPixmap(':/tk_multi_infopanel/pin_blue.png'), QtGui.QIcon.Disabled, QtGui.QIcon.Off)
self.setIcon(self._normal_icon)
self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self._bundle = sgtk.platform.current_bundle()
self._entity_type = None
self._entity_id = None
self._is_static = False
self._caption = 'Set Work Area'
self._width = 120
self.clicked.connect(self._on_click)
self.setVisible(False) | -4,389,756,420,258,198,000 | :param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject` | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | __init__ | JoanAzpeitia/lp_sg | python | def __init__(self, parent):
'\n :param parent: The model parent.\n :type parent: :class:`~PySide.QtGui.QObject`\n '
super(WorkAreaButton, self).__init__(parent)
self._normal_icon = QtGui.QIcon()
self._normal_icon.addPixmap(QtGui.QPixmap(':/tk_multi_infopanel/pin.png'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self._current_work_area_icon = QtGui.QIcon()
self._current_work_area_icon.addPixmap(QtGui.QPixmap(':/tk_multi_infopanel/pin_blue.png'), QtGui.QIcon.Disabled, QtGui.QIcon.Off)
self.setIcon(self._normal_icon)
self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self._bundle = sgtk.platform.current_bundle()
self._entity_type = None
self._entity_id = None
self._is_static = False
self._caption = 'Set Work Area'
self._width = 120
self.clicked.connect(self._on_click)
self.setVisible(False) |
def set_up(self, entity_type, entity_id):
'\n Sets up the button for a given entity.\n\n :param entity_type: Entity type to set up button for\n :param entity_id: Entity id to set up button for\n '
self._entity_id = entity_id
self._entity_type = entity_type
if (not self._bundle.get_setting('enable_context_switch')):
return
context = self._bundle.context
context_entity = (context.task or context.entity or context.project or None)
self.setVisible(True)
self.setEnabled(True)
self.setIcon(self._normal_icon)
self._is_static = False
if (context_entity and (context_entity['type'] == entity_type) and (context_entity['id'] == entity_id)):
self.setPopupMode(QtGui.QToolButton.DelayedPopup)
self.setToolTip('This is your current work area.\nThe work you do will be associated with this item in Shotgun.')
self.setIcon(self._current_work_area_icon)
self.setEnabled(False)
self._is_static = True
elif (entity_type in self.NON_WORK_AREA_TYPES):
self.setToolTip('This cannot be a work area.')
self.setEnabled(False)
self._is_static = True
elif (entity_type == 'Task'):
self._caption = 'Set Work Area'
self.setToolTip('Click to set your work area to the current task.')
else:
self._caption = 'Pick Work Area'
self.setToolTip('Click to select a task.')
self._init_default_state() | -7,748,087,610,528,656,000 | Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | set_up | JoanAzpeitia/lp_sg | python | def set_up(self, entity_type, entity_id):
'\n Sets up the button for a given entity.\n\n :param entity_type: Entity type to set up button for\n :param entity_id: Entity id to set up button for\n '
self._entity_id = entity_id
self._entity_type = entity_type
if (not self._bundle.get_setting('enable_context_switch')):
return
context = self._bundle.context
context_entity = (context.task or context.entity or context.project or None)
self.setVisible(True)
self.setEnabled(True)
self.setIcon(self._normal_icon)
self._is_static = False
if (context_entity and (context_entity['type'] == entity_type) and (context_entity['id'] == entity_id)):
self.setPopupMode(QtGui.QToolButton.DelayedPopup)
self.setToolTip('This is your current work area.\nThe work you do will be associated with this item in Shotgun.')
self.setIcon(self._current_work_area_icon)
self.setEnabled(False)
self._is_static = True
elif (entity_type in self.NON_WORK_AREA_TYPES):
self.setToolTip('This cannot be a work area.')
self.setEnabled(False)
self._is_static = True
elif (entity_type == 'Task'):
self._caption = 'Set Work Area'
self.setToolTip('Click to set your work area to the current task.')
else:
self._caption = 'Pick Work Area'
self.setToolTip('Click to select a task.')
self._init_default_state() |
def _init_default_state(self):
'\n Sets up the default collapsed state of the button\n '
self.setText('')
self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setProperty('is_expanded', False)
self.style().unpolish(self)
self.style().polish(self) | -1,946,905,218,715,459,300 | Sets up the default collapsed state of the button | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | _init_default_state | JoanAzpeitia/lp_sg | python | def _init_default_state(self):
'\n \n '
self.setText()
self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setProperty('is_expanded', False)
self.style().unpolish(self)
self.style().polish(self) |
def _on_click(self):
'\n Executed when the button is clicked\n '
self.change_work_area.emit(self._entity_type, self._entity_id) | -6,190,946,438,780,453,000 | Executed when the button is clicked | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | _on_click | JoanAzpeitia/lp_sg | python | def _on_click(self):
'\n \n '
self.change_work_area.emit(self._entity_type, self._entity_id) |
def enterEvent(self, evt):
'\n QT Mouse enter event\n '
if (not self._is_static):
self.setText(self._caption)
self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setProperty('is_expanded', True)
self.style().unpolish(self)
self.style().polish(self)
return super(WorkAreaButton, self).enterEvent(evt) | -9,044,123,478,574,124,000 | QT Mouse enter event | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | enterEvent | JoanAzpeitia/lp_sg | python | def enterEvent(self, evt):
'\n \n '
if (not self._is_static):
self.setText(self._caption)
self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setProperty('is_expanded', True)
self.style().unpolish(self)
self.style().polish(self)
return super(WorkAreaButton, self).enterEvent(evt) |
def leaveEvent(self, evt):
'\n QT Mouse leave event\n '
if (not self._is_static):
QtCore.QTimer.singleShot(300, self._init_default_state)
return super(WorkAreaButton, self).leaveEvent(evt) | -7,516,456,122,290,220,000 | QT Mouse leave event | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | leaveEvent | JoanAzpeitia/lp_sg | python | def leaveEvent(self, evt):
'\n \n '
if (not self._is_static):
QtCore.QTimer.singleShot(300, self._init_default_state)
return super(WorkAreaButton, self).leaveEvent(evt) |
def __init__(self, parent):
'\n :param right_side_offset: Right hand side offset in pixels\n :param bottom_offset: Bottom offset in pixels\n :param parent: The model parent.\n :type parent: :class:`~PySide.QtGui.QObject`\n '
super(FloatingWorkAreaButton, self).__init__(parent)
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter) | -1,418,046,261,842,745,900 | :param right_side_offset: Right hand side offset in pixels
:param bottom_offset: Bottom offset in pixels
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject` | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | __init__ | JoanAzpeitia/lp_sg | python | def __init__(self, parent):
'\n :param right_side_offset: Right hand side offset in pixels\n :param bottom_offset: Bottom offset in pixels\n :param parent: The model parent.\n :type parent: :class:`~PySide.QtGui.QObject`\n '
super(FloatingWorkAreaButton, self).__init__(parent)
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter) |
def set_up(self, entity_type, entity_id):
'\n Sets up the button for a given entity.\n\n :param entity_type: Entity type to set up button for\n :param entity_id: Entity id to set up button for\n '
if (entity_type in self.NON_WORK_AREA_TYPES):
self.setVisible(False)
else:
super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id) | -9,065,387,404,164,934,000 | Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | set_up | JoanAzpeitia/lp_sg | python | def set_up(self, entity_type, entity_id):
'\n Sets up the button for a given entity.\n\n :param entity_type: Entity type to set up button for\n :param entity_id: Entity id to set up button for\n '
if (entity_type in self.NON_WORK_AREA_TYPES):
self.setVisible(False)
else:
super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id) |
def __position_widget(self):
'\n Moves the widget to the bottom-right corner of the parent widget.\n '
self.move(((self.parentWidget().width() - self.width()) - self.RIGHT_OFFSET), ((self.parentWidget().height() - self.height()) - self.BOTTOM_OFFSET)) | -5,805,314,831,962,238,000 | Moves the widget to the bottom-right corner of the parent widget. | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | __position_widget | JoanAzpeitia/lp_sg | python | def __position_widget(self):
'\n \n '
self.move(((self.parentWidget().width() - self.width()) - self.RIGHT_OFFSET), ((self.parentWidget().height() - self.height()) - self.BOTTOM_OFFSET)) |
def _init_default_state(self):
'\n Sets up the default collapsed state of the button\n '
super(FloatingWorkAreaButton, self)._init_default_state()
self.__position_widget() | 3,282,286,380,657,026,000 | Sets up the default collapsed state of the button | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | _init_default_state | JoanAzpeitia/lp_sg | python | def _init_default_state(self):
'\n \n '
super(FloatingWorkAreaButton, self)._init_default_state()
self.__position_widget() |
def enterEvent(self, evt):
'\n QT Mouse enter event\n '
status = super(FloatingWorkAreaButton, self).enterEvent(evt)
if (not self._is_static):
self.__position_widget()
return status | 4,044,323,142,160,784,000 | QT Mouse enter event | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | enterEvent | JoanAzpeitia/lp_sg | python | def enterEvent(self, evt):
'\n \n '
status = super(FloatingWorkAreaButton, self).enterEvent(evt)
if (not self._is_static):
self.__position_widget()
return status |
def _on_parent_resized(self):
'\n Special slot hooked up to the event filter.\n When associated widget is resized this slot is being called.\n '
self.__position_widget() | -7,982,805,845,304,782,000 | Special slot hooked up to the event filter.
When associated widget is resized this slot is being called. | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | _on_parent_resized | JoanAzpeitia/lp_sg | python | def _on_parent_resized(self):
'\n Special slot hooked up to the event filter.\n When associated widget is resized this slot is being called.\n '
self.__position_widget() |
def eventFilter(self, obj, event):
'\n Event filter implementation.\n For information, see the QT docs:\n http://doc.qt.io/qt-4.8/qobject.html#eventFilter\n\n This will emit the resized signal (in this class)\n whenever the linked up object is being resized.\n\n :param obj: The object that is being watched for events\n :param event: Event object that the object has emitted\n :returns: Always returns False to indicate that no events\n should ever be discarded by the filter.\n '
if (event.type() == QtCore.QEvent.Resize):
self.resized.emit()
return False | -8,797,665,117,994,636,000 | Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
This will emit the resized signal (in this class)
whenever the linked up object is being resized.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter. | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | eventFilter | JoanAzpeitia/lp_sg | python | def eventFilter(self, obj, event):
'\n Event filter implementation.\n For information, see the QT docs:\n http://doc.qt.io/qt-4.8/qobject.html#eventFilter\n\n This will emit the resized signal (in this class)\n whenever the linked up object is being resized.\n\n :param obj: The object that is being watched for events\n :param event: Event object that the object has emitted\n :returns: Always returns False to indicate that no events\n should ever be discarded by the filter.\n '
if (event.type() == QtCore.QEvent.Resize):
self.resized.emit()
return False |
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
'\n Transform np creation function into blocked version\n '
if ('shape' not in kwargs):
(shape, args) = (args[0], args[1:])
else:
shape = kwargs.pop('shape')
if isinstance(shape, Array):
raise TypeError('Dask array input not supported. Please use tuple, list, or a 1D numpy array instead.')
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed['shape']
dtype = parsed['dtype']
chunks = parsed['chunks']
name = parsed['name']
kwargs = parsed['kwargs']
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(name, func, shape, chunks)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get('meta', None)) | 8,226,832,800,374,302,000 | Transform np creation function into blocked version | dask/array/wrap.py | wrap_func_shape_as_first_arg | BlueOwlDev/dask | python | def wrap_func_shape_as_first_arg(func, *args, **kwargs):
'\n \n '
if ('shape' not in kwargs):
(shape, args) = (args[0], args[1:])
else:
shape = kwargs.pop('shape')
if isinstance(shape, Array):
raise TypeError('Dask array input not supported. Please use tuple, list, or a 1D numpy array instead.')
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed['shape']
dtype = parsed['dtype']
chunks = parsed['chunks']
name = parsed['name']
kwargs = parsed['kwargs']
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(name, func, shape, chunks)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get('meta', None)) |
def wrap_func_like(func, *args, **kwargs):
'\n Transform np creation function into blocked version\n '
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get('shape', x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed['shape']
dtype = parsed['dtype']
chunks = parsed['chunks']
name = parsed['name']
kwargs = parsed['kwargs']
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for (i, s) in enumerate(list(shapes)):
kw[i]['shape'] = s
vals = (((partial(func, dtype=dtype, **k),) + args) for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype)) | -5,930,016,839,543,346,000 | Transform np creation function into blocked version | dask/array/wrap.py | wrap_func_like | BlueOwlDev/dask | python | def wrap_func_like(func, *args, **kwargs):
'\n \n '
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get('shape', x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed['shape']
dtype = parsed['dtype']
chunks = parsed['chunks']
name = parsed['name']
kwargs = parsed['kwargs']
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for (i, s) in enumerate(list(shapes)):
kw[i]['shape'] = s
vals = (((partial(func, dtype=dtype, **k),) + args) for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype)) |
def wrap_func_like_safe(func, func_like, *args, **kwargs):
'\n Safe implementation for wrap_func_like(), attempts to use func_like(),\n if the shape keyword argument, falls back to func().\n '
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs) | 5,567,341,765,180,916,000 | Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func(). | dask/array/wrap.py | wrap_func_like_safe | BlueOwlDev/dask | python | def wrap_func_like_safe(func, func_like, *args, **kwargs):
'\n Safe implementation for wrap_func_like(), attempts to use func_like(),\n if the shape keyword argument, falls back to func().\n '
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs) |
def broadcast_trick(func):
'\n Provide a decorator to wrap common numpy function with a broadcast trick.\n\n Dask arrays are currently immutable; thus when we know an array is uniform,\n we can replace the actual data by a single value and have all elements point\n to it, thus reducing the size.\n\n >>> x = np.broadcast_to(1, (100,100,100))\n >>> x.base.nbytes\n 8\n\n Those array are not only more efficient locally, but dask serialisation is\n aware of the _real_ size of those array and thus can send them around\n efficiently and schedule accordingly.\n\n Note that those array are read-only and numpy will refuse to assign to them,\n so should be safe.\n '
inner = _broadcast_trick_inner(func)
if (func.__doc__ is not None):
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith('_like_safe'):
inner.__name__ = inner.__name__[:(- 10)]
return inner | 1,003,119,542,952,994,200 | Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe. | dask/array/wrap.py | broadcast_trick | BlueOwlDev/dask | python | def broadcast_trick(func):
'\n Provide a decorator to wrap common numpy function with a broadcast trick.\n\n Dask arrays are currently immutable; thus when we know an array is uniform,\n we can replace the actual data by a single value and have all elements point\n to it, thus reducing the size.\n\n >>> x = np.broadcast_to(1, (100,100,100))\n >>> x.base.nbytes\n 8\n\n Those array are not only more efficient locally, but dask serialisation is\n aware of the _real_ size of those array and thus can send them around\n efficiently and schedule accordingly.\n\n Note that those array are read-only and numpy will refuse to assign to them,\n so should be safe.\n '
inner = _broadcast_trick_inner(func)
if (func.__doc__ is not None):
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith('_like_safe'):
inner.__name__ = inner.__name__[:(- 10)]
return inner |
def test_get_tags_multi(self):
'Test get_tags with multi-tag file'
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, ['Artist'], ['Album'], ['Title'], 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], 'Album')
self.assertEqual(tags['artist'], 'Artist')
self.assertEqual(tags['title'], 'Title')
self.assertEqual(tags['lyrics'], 'Lyrics') | 798,313,862,882,296,400 | Test get_tags with multi-tag file | test/test_misc.py | test_get_tags_multi | abulimov/lyricstagger | python | def test_get_tags_multi(self):
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, ['Artist'], ['Album'], ['Title'], 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], 'Album')
self.assertEqual(tags['artist'], 'Artist')
self.assertEqual(tags['title'], 'Title')
self.assertEqual(tags['lyrics'], 'Lyrics') |
def test_get_tags_single(self):
'Test get_tags with single-tag file'
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, 'Artist', 'Album', 'Title', 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], 'Album')
self.assertEqual(tags['artist'], 'Artist')
self.assertEqual(tags['title'], 'Title')
self.assertEqual(tags['lyrics'], 'Lyrics') | 3,508,435,473,668,544,500 | Test get_tags with single-tag file | test/test_misc.py | test_get_tags_single | abulimov/lyricstagger | python | def test_get_tags_single(self):
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, 'Artist', 'Album', 'Title', 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], 'Album')
self.assertEqual(tags['artist'], 'Artist')
self.assertEqual(tags['title'], 'Title')
self.assertEqual(tags['lyrics'], 'Lyrics') |
def test_get_tags_broken(self):
'Test get_tags with broken tags'
audio = fakers.BrokenFile('audio/ogg', {'test': 'Test', 'album': 'Album', 'title': 'Title'})
tags = misc.get_tags(audio)
self.assertEqual(tags, None) | -8,760,199,685,019,230,000 | Test get_tags with broken tags | test/test_misc.py | test_get_tags_broken | abulimov/lyricstagger | python | def test_get_tags_broken(self):
audio = fakers.BrokenFile('audio/ogg', {'test': 'Test', 'album': 'Album', 'title': 'Title'})
tags = misc.get_tags(audio)
self.assertEqual(tags, None) |
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_empty_ok(self):
'Test edit_lyrics with empty lyrics and correct edit'
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, '') | 1,851,476,499,266,383,000 | Test edit_lyrics with empty lyrics and correct edit | test/test_misc.py | test_edit_lyrics_empty_ok | abulimov/lyricstagger | python | @mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_empty_ok(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, ) |
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_empty_fail(self):
'Test edit_lyrics with empty lyrics and errored edit'
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None) | -676,919,345,495,653,200 | Test edit_lyrics with empty lyrics and errored edit | test/test_misc.py | test_edit_lyrics_empty_fail | abulimov/lyricstagger | python | @mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_empty_fail(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None) |
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_nonempty_ok(self):
'Test edit_lyrics with non-empty lyrics and correct edit'
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, 'Lyrics') | 114,619,421,927,214,100 | Test edit_lyrics with non-empty lyrics and correct edit | test/test_misc.py | test_edit_lyrics_nonempty_ok | abulimov/lyricstagger | python | @mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_nonempty_ok(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, 'Lyrics') |
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_nonempty_fail(self):
'Test edit_lyrics with non-empty lyrics and errored edit'
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None) | -6,727,033,119,380,178,000 | Test edit_lyrics with non-empty lyrics and errored edit | test/test_misc.py | test_edit_lyrics_nonempty_fail | abulimov/lyricstagger | python | @mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_nonempty_fail(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None) |
def compute_hash(features, hash_matrix, hash_vector):
'Compute hash values for features using the hash function (A * x + c) mod 2.\n\n Args:\n features: NumPy float array of shape (n, d), the features to hash.\n hash_matrix: NumPy float array of shape (num_feature_bits, num_hash_bits),\n a random matrix A to construct the hash function.\n hash_vector: NumPy float array of shape (1, num_hash_bits),\n a random vector c to construct the hash function.\n\n Returns:\n NumPy float array of shape (n, 1) containing the hashed values in [0, 1].\n '
def convert_int_to_bin(x, dimension):
return '{:b}'.format(x).zfill(dimension)[(- dimension):]
convert_int_to_bin = np.vectorize(convert_int_to_bin)
convert_bin_to_int = np.vectorize((lambda x: int(x, 2)))
num_features = features.shape[0]
(num_feature_bits, num_hash_bits) = hash_matrix.shape
feature_sum_str = [''.join(x) for x in features.astype('str')]
feature_sum_hex = [hashlib.md5(s).hexdigest() for s in feature_sum_str]
feature_sum_int = [int(h, 16) for h in feature_sum_hex]
feature_sum_bin = convert_int_to_bin(feature_sum_int, dimension=num_feature_bits)
feature_sum_bin_matrix = np.array([[int(c) for c in s] for s in feature_sum_bin])
feature_hashed = (np.dot(feature_sum_bin_matrix, hash_matrix) + np.repeat(hash_vector, repeats=num_features, axis=0))
feature_hashed_bits = np.mod(feature_hashed, 2)
feature_hashed_bit_char = convert_int_to_bin(feature_hashed_bits, 1)
feature_hashed_bit_str = [''.join(s) for s in feature_hashed_bit_char]
feature_hashed_int = convert_bin_to_int(feature_hashed_bit_str)
hashed_val = ((feature_hashed_int * 1.0) / (2 ** num_hash_bits))
return hashed_val.reshape((- 1), 1) | 4,618,662,458,481,842,000 | Compute hash values for features using the hash function (A * x + c) mod 2.
Args:
features: NumPy float array of shape (n, d), the features to hash.
hash_matrix: NumPy float array of shape (num_feature_bits, num_hash_bits),
a random matrix A to construct the hash function.
hash_vector: NumPy float array of shape (1, num_hash_bits),
a random vector c to construct the hash function.
Returns:
NumPy float array of shape (n, 1) containing the hashed values in [0, 1]. | stochastic_to_deterministic/hashing.py | compute_hash | 3rd/google-research | python | def compute_hash(features, hash_matrix, hash_vector):
'Compute hash values for features using the hash function (A * x + c) mod 2.\n\n Args:\n features: NumPy float array of shape (n, d), the features to hash.\n hash_matrix: NumPy float array of shape (num_feature_bits, num_hash_bits),\n a random matrix A to construct the hash function.\n hash_vector: NumPy float array of shape (1, num_hash_bits),\n a random vector c to construct the hash function.\n\n Returns:\n NumPy float array of shape (n, 1) containing the hashed values in [0, 1].\n '
def convert_int_to_bin(x, dimension):
return '{:b}'.format(x).zfill(dimension)[(- dimension):]
convert_int_to_bin = np.vectorize(convert_int_to_bin)
convert_bin_to_int = np.vectorize((lambda x: int(x, 2)))
num_features = features.shape[0]
(num_feature_bits, num_hash_bits) = hash_matrix.shape
feature_sum_str = [.join(x) for x in features.astype('str')]
feature_sum_hex = [hashlib.md5(s).hexdigest() for s in feature_sum_str]
feature_sum_int = [int(h, 16) for h in feature_sum_hex]
feature_sum_bin = convert_int_to_bin(feature_sum_int, dimension=num_feature_bits)
feature_sum_bin_matrix = np.array([[int(c) for c in s] for s in feature_sum_bin])
feature_hashed = (np.dot(feature_sum_bin_matrix, hash_matrix) + np.repeat(hash_vector, repeats=num_features, axis=0))
feature_hashed_bits = np.mod(feature_hashed, 2)
feature_hashed_bit_char = convert_int_to_bin(feature_hashed_bits, 1)
feature_hashed_bit_str = [.join(s) for s in feature_hashed_bit_char]
feature_hashed_int = convert_bin_to_int(feature_hashed_bit_str)
hashed_val = ((feature_hashed_int * 1.0) / (2 ** num_hash_bits))
return hashed_val.reshape((- 1), 1) |
def main(argv):
'Example usage of hash function.'
del argv
num_feature_bits = 128
num_hash_bits = 32
hash_matrix = (np.random.rand(num_feature_bits, num_hash_bits) > 0.5).astype('int')
hash_vector = (np.random.rand(1, num_hash_bits) > 0.5).astype('int')
num_examples = 10
dimension = 4
features = np.random.normal(size=(num_examples, dimension)).astype(np.float32)
hash_val = compute_hash(features, hash_matrix, hash_vector)
print('Feature matrix:')
print(features)
print('\nHashed values:')
print(hash_val) | 1,849,269,074,562,170,600 | Example usage of hash function. | stochastic_to_deterministic/hashing.py | main | 3rd/google-research | python | def main(argv):
del argv
num_feature_bits = 128
num_hash_bits = 32
hash_matrix = (np.random.rand(num_feature_bits, num_hash_bits) > 0.5).astype('int')
hash_vector = (np.random.rand(1, num_hash_bits) > 0.5).astype('int')
num_examples = 10
dimension = 4
features = np.random.normal(size=(num_examples, dimension)).astype(np.float32)
hash_val = compute_hash(features, hash_matrix, hash_vector)
print('Feature matrix:')
print(features)
print('\nHashed values:')
print(hash_val) |
def test_bad_cert():
'Make sure that the client detects that the test cert is self signed.'
with mocks.Server() as server:
try:
assemblyline_client.get_client(server.address)
assert False
except assemblyline_client.ClientError as ce:
assert (('CERTIFICATE_VERIFY_FAILED' in str(ce)) or ('certificate verify failed' in str(ce))) | 1,326,728,576,807,671,800 | Make sure that the client detects that the test cert is self signed. | test/test_v3_client.py | test_bad_cert | IanLee1521/assemblyline_client | python | def test_bad_cert():
with mocks.Server() as server:
try:
assemblyline_client.get_client(server.address)
assert False
except assemblyline_client.ClientError as ce:
assert (('CERTIFICATE_VERIFY_FAILED' in str(ce)) or ('certificate verify failed' in str(ce))) |
def test_noauth():
'The test server should let us login with no authentication.'
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False)
assert (len(server.logins) == 1) | 3,750,868,549,800,948,700 | The test server should let us login with no authentication. | test/test_v3_client.py | test_noauth | IanLee1521/assemblyline_client | python | def test_noauth():
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False)
assert (len(server.logins) == 1) |
def test_noauth_submit(mocker):
'Submit a file and ensure that the same file is unpacked.'
with mocks.Server() as server:
client = assemblyline_client.get_client(server.address, verify=False)
submits = server.submits
client.submit(path='readme.txt', contents=b'abc123')
assert (len(submits) == 1)
assert (b64decode(submits[0]['binary']) == b'abc123')
assert (submits[0]['name'] == 'readme.txt')
submits.pop()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('assemblyline_client.v3_client.open', mock.mock_open(read_data=b'abc123'), create=True)
client.submit(path='readme.txt')
assert (len(submits) == 1)
assert (b64decode(submits[0]['binary']) == b'abc123')
assert (submits[0]['name'] == 'readme.txt')
submits.pop() | 5,355,378,751,244,939,000 | Submit a file and ensure that the same file is unpacked. | test/test_v3_client.py | test_noauth_submit | IanLee1521/assemblyline_client | python | def test_noauth_submit(mocker):
with mocks.Server() as server:
client = assemblyline_client.get_client(server.address, verify=False)
submits = server.submits
client.submit(path='readme.txt', contents=b'abc123')
assert (len(submits) == 1)
assert (b64decode(submits[0]['binary']) == b'abc123')
assert (submits[0]['name'] == 'readme.txt')
submits.pop()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('assemblyline_client.v3_client.open', mock.mock_open(read_data=b'abc123'), create=True)
client.submit(path='readme.txt')
assert (len(submits) == 1)
assert (b64decode(submits[0]['binary']) == b'abc123')
assert (submits[0]['name'] == 'readme.txt')
submits.pop() |
def test_encrypt_password_auth():
'Send an encryped password and decrypt it.'
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, auth=('username', 'password'))
assert (len(server.logins) == 1)
assert (server.logins[0]['user'] == 'username')
assert (server.logins[0]['password'] != 'password')
assert (server.private_key.decrypt(b64decode(server.logins[0]['password']), 'ERROR') == b'password') | -5,742,509,680,655,393,000 | Send an encryped password and decrypt it. | test/test_v3_client.py | test_encrypt_password_auth | IanLee1521/assemblyline_client | python | def test_encrypt_password_auth():
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, auth=('username', 'password'))
assert (len(server.logins) == 1)
assert (server.logins[0]['user'] == 'username')
assert (server.logins[0]['password'] != 'password')
assert (server.private_key.decrypt(b64decode(server.logins[0]['password']), 'ERROR') == b'password') |
def test_encrypt_apikey_auth():
'Send an encryped apikey and decrypt it.'
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, apikey=('username', 'ANAPIKEY'))
assert (len(server.logins) == 1)
assert (server.logins[0]['user'] == 'username')
assert (server.logins[0]['apikey'] != 'ANAPIKEY')
assert (server.private_key.decrypt(b64decode(server.logins[0]['apikey']), 'ERROR') == b'ANAPIKEY') | 3,811,134,333,721,106,000 | Send an encryped apikey and decrypt it. | test/test_v3_client.py | test_encrypt_apikey_auth | IanLee1521/assemblyline_client | python | def test_encrypt_apikey_auth():
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, apikey=('username', 'ANAPIKEY'))
assert (len(server.logins) == 1)
assert (server.logins[0]['user'] == 'username')
assert (server.logins[0]['apikey'] != 'ANAPIKEY')
assert (server.private_key.decrypt(b64decode(server.logins[0]['apikey']), 'ERROR') == b'ANAPIKEY') |
def get(*, db_session, task_id: int) -> Optional[Task]:
'Get a single task by ID.'
return db_session.query(Task).filter((Task.id == task_id)).first() | -183,606,507,448,265,760 | Get a single task by ID. | src/dispatch/task/service.py | get | WouldYouKindly/dispatch | python | def get(*, db_session, task_id: int) -> Optional[Task]:
return db_session.query(Task).filter((Task.id == task_id)).first() |
def get_by_resource_id(*, db_session, resource_id: str) -> Optional[Task]:
'Get a single task by resource id.'
return db_session.query(Task).filter((Task.resource_id == resource_id)).first() | 6,038,717,619,318,557,000 | Get a single task by resource id. | src/dispatch/task/service.py | get_by_resource_id | WouldYouKindly/dispatch | python | def get_by_resource_id(*, db_session, resource_id: str) -> Optional[Task]:
return db_session.query(Task).filter((Task.resource_id == resource_id)).first() |
def get_all(*, db_session) -> List[Optional[Task]]:
'Return all tasks.'
return db_session.query(Task) | -7,304,799,750,105,965,000 | Return all tasks. | src/dispatch/task/service.py | get_all | WouldYouKindly/dispatch | python | def get_all(*, db_session) -> List[Optional[Task]]:
return db_session.query(Task) |
def get_all_by_incident_id(*, db_session, incident_id: int) -> List[Optional[Task]]:
'Get all tasks by incident id.'
return db_session.query(Task).filter((Task.incident_id == incident_id)) | -6,121,668,309,695,642,000 | Get all tasks by incident id. | src/dispatch/task/service.py | get_all_by_incident_id | WouldYouKindly/dispatch | python | def get_all_by_incident_id(*, db_session, incident_id: int) -> List[Optional[Task]]:
return db_session.query(Task).filter((Task.incident_id == incident_id)) |
def get_all_by_incident_id_and_status(*, db_session, incident_id: int, status: str) -> List[Optional[Task]]:
'Get all tasks by incident id and status.'
return db_session.query(Task).filter((Task.incident_id == incident_id)).filter((Task.status == status)) | 6,681,191,692,443,516,000 | Get all tasks by incident id and status. | src/dispatch/task/service.py | get_all_by_incident_id_and_status | WouldYouKindly/dispatch | python | def get_all_by_incident_id_and_status(*, db_session, incident_id: int, status: str) -> List[Optional[Task]]:
return db_session.query(Task).filter((Task.incident_id == incident_id)).filter((Task.status == status)) |
def get_overdue_tasks(*, db_session) -> List[Optional[Task]]:
'Returns all tasks that have not been resolved and are past due date.'
return db_session.query(Task).filter((Task.status == TaskStatus.open)).filter((Task.reminders == True)).filter((Task.resolve_by < datetime.utcnow())).filter(or_(((Task.last_reminder_at + timedelta(days=1)) < datetime.utcnow()), (Task.last_reminder_at == None))).all() | -7,599,305,192,850,656,000 | Returns all tasks that have not been resolved and are past due date. | src/dispatch/task/service.py | get_overdue_tasks | WouldYouKindly/dispatch | python | def get_overdue_tasks(*, db_session) -> List[Optional[Task]]:
return db_session.query(Task).filter((Task.status == TaskStatus.open)).filter((Task.reminders == True)).filter((Task.resolve_by < datetime.utcnow())).filter(or_(((Task.last_reminder_at + timedelta(days=1)) < datetime.utcnow()), (Task.last_reminder_at == None))).all() |
def create(*, db_session, task_in: TaskCreate) -> Task:
'Create a new task.'
incident = incident_service.get(db_session=db_session, incident_id=task_in.incident.id)
tickets = [ticket_service.get_or_create_by_weblink(db_session=db_session, weblink=t.weblink, resource_type='task-ticket') for t in task_in.tickets]
assignees = []
for i in task_in.assignees:
assignee = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=incident.id, user_email=i.individual.email)
if assignee:
assignees.append(assignee)
creator_email = None
if (not task_in.creator):
creator_email = task_in.owner.individual.email
else:
creator_email = task_in.creator.individual.email
creator = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=incident.id, user_email=creator_email)
if (not assignees):
assignees.append(creator)
if task_in.owner:
owner = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=incident.id, user_email=task_in.owner.individual.email)
else:
owner = incident.commander
task = Task(**task_in.dict(exclude={'assignees', 'owner', 'incident', 'creator', 'tickets'}), creator=creator, owner=owner, assignees=assignees, incident=incident, tickets=tickets)
event_service.log(db_session=db_session, source='Dispatch Core App', description='New incident task created', details={'weblink': task.weblink}, incident_id=incident.id)
db_session.add(task)
db_session.commit()
return task | -480,220,439,217,027,840 | Create a new task. | src/dispatch/task/service.py | create | WouldYouKindly/dispatch | python | def create(*, db_session, task_in: TaskCreate) -> Task:
incident = incident_service.get(db_session=db_session, incident_id=task_in.incident.id)
tickets = [ticket_service.get_or_create_by_weblink(db_session=db_session, weblink=t.weblink, resource_type='task-ticket') for t in task_in.tickets]
assignees = []
for i in task_in.assignees:
assignee = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=incident.id, user_email=i.individual.email)
if assignee:
assignees.append(assignee)
creator_email = None
if (not task_in.creator):
creator_email = task_in.owner.individual.email
else:
creator_email = task_in.creator.individual.email
creator = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=incident.id, user_email=creator_email)
if (not assignees):
assignees.append(creator)
if task_in.owner:
owner = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=incident.id, user_email=task_in.owner.individual.email)
else:
owner = incident.commander
task = Task(**task_in.dict(exclude={'assignees', 'owner', 'incident', 'creator', 'tickets'}), creator=creator, owner=owner, assignees=assignees, incident=incident, tickets=tickets)
event_service.log(db_session=db_session, source='Dispatch Core App', description='New incident task created', details={'weblink': task.weblink}, incident_id=incident.id)
db_session.add(task)
db_session.commit()
return task |
def update(*, db_session, task: Task, task_in: TaskUpdate, sync_external: bool=True) -> Task:
'Update an existing task.'
assignees = []
for i in task_in.assignees:
assignees.append(incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=task.incident.id, user_email=i.individual.email))
task.assignees = assignees
if task_in.owner:
task.owner = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=task.incident.id, user_email=task_in.owner.individual.email)
update_data = task_in.dict(skip_defaults=True, exclude={'assignees', 'owner', 'creator', 'incident', 'tickets'})
for field in update_data.keys():
setattr(task, field, update_data[field])
drive_task_plugin = plugin_service.get_active(db_session=db_session, plugin_type='task')
if drive_task_plugin:
if sync_external:
try:
if task.incident.incident_document:
file_id = task.incident.incident_document.resource_id
drive_task_plugin.instance.update(file_id, task.resource_id, resolved=task.status)
except Exception:
if task.incident.incident_review_document:
file_id = task.incident.incident_review_document.resource_id
drive_task_plugin.instance.update(file_id, task.resource_id, resolved=task.status)
db_session.add(task)
db_session.commit()
return task | -9,175,065,129,514,167,000 | Update an existing task. | src/dispatch/task/service.py | update | WouldYouKindly/dispatch | python | def update(*, db_session, task: Task, task_in: TaskUpdate, sync_external: bool=True) -> Task:
assignees = []
for i in task_in.assignees:
assignees.append(incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=task.incident.id, user_email=i.individual.email))
task.assignees = assignees
if task_in.owner:
task.owner = incident_flows.incident_add_or_reactivate_participant_flow(db_session=db_session, incident_id=task.incident.id, user_email=task_in.owner.individual.email)
update_data = task_in.dict(skip_defaults=True, exclude={'assignees', 'owner', 'creator', 'incident', 'tickets'})
for field in update_data.keys():
setattr(task, field, update_data[field])
drive_task_plugin = plugin_service.get_active(db_session=db_session, plugin_type='task')
if drive_task_plugin:
if sync_external:
try:
if task.incident.incident_document:
file_id = task.incident.incident_document.resource_id
drive_task_plugin.instance.update(file_id, task.resource_id, resolved=task.status)
except Exception:
if task.incident.incident_review_document:
file_id = task.incident.incident_review_document.resource_id
drive_task_plugin.instance.update(file_id, task.resource_id, resolved=task.status)
db_session.add(task)
db_session.commit()
return task |
def delete(*, db_session, task_id: int):
'Delete an existing task.'
task = db_session.query(Task).filter((Task.id == task_id)).first()
db_session.delete(task)
db_session.commit() | 5,685,970,886,979,586,000 | Delete an existing task. | src/dispatch/task/service.py | delete | WouldYouKindly/dispatch | python | def delete(*, db_session, task_id: int):
task = db_session.query(Task).filter((Task.id == task_id)).first()
db_session.delete(task)
db_session.commit() |
def handle(self, *args, **options):
'\n finished when raise CommandError, exit code = 1.\n other exit code = 0\n '
_retcode = 1
_dbname = 'default'
try:
print(('settings.ENV_MODE = %s' % settings.ENV_MODE))
print(('settings.DATABASES = %s' % settings.DATABASES))
_id = int(args[0])
_name = args[1]
print(('id: %s, name:%s' % (_id, _name)))
qs = Thing.objects.filter(id=_id)
_nowdt = timezone.now()
if (0 < len(qs)):
print('do update.')
_r = qs[0]
_r.name = _name
_r.update_at = _nowdt
_r.save(using=_dbname)
else:
print('do insert.')
if (_id < 1):
_id = None
_t = Thing(id=_id, name=_name, create_at=_nowdt, update_at=_nowdt)
_t.save(using=_dbname)
except:
print(('EXCEPT: %s(%s)' % (sys.exc_info()[0], sys.exc_info()[1])))
print('finished(ng)')
raise CommandError('ng')
print('finished(ok)')
sys.exit(0) | -1,360,971,241,329,978,000 | finished when raise CommandError, exit code = 1.
other exit code = 0 | python-django/djmultidb/app1/management/commands/set_thing.py | handle | dictoss/proto | python | def handle(self, *args, **options):
'\n finished when raise CommandError, exit code = 1.\n other exit code = 0\n '
_retcode = 1
_dbname = 'default'
try:
print(('settings.ENV_MODE = %s' % settings.ENV_MODE))
print(('settings.DATABASES = %s' % settings.DATABASES))
_id = int(args[0])
_name = args[1]
print(('id: %s, name:%s' % (_id, _name)))
qs = Thing.objects.filter(id=_id)
_nowdt = timezone.now()
if (0 < len(qs)):
print('do update.')
_r = qs[0]
_r.name = _name
_r.update_at = _nowdt
_r.save(using=_dbname)
else:
print('do insert.')
if (_id < 1):
_id = None
_t = Thing(id=_id, name=_name, create_at=_nowdt, update_at=_nowdt)
_t.save(using=_dbname)
except:
print(('EXCEPT: %s(%s)' % (sys.exc_info()[0], sys.exc_info()[1])))
print('finished(ng)')
raise CommandError('ng')
print('finished(ok)')
sys.exit(0) |
def __init__(self, iqn=None, nqn=None, portal=None, wwn=None):
'\n Keyword args:\n iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).\n nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).\n portal (str): IP and port number (or `null` if target is not iSCSI).\n wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel).\n '
if (iqn is not None):
self.iqn = iqn
if (nqn is not None):
self.nqn = nqn
if (portal is not None):
self.portal = portal
if (wwn is not None):
self.wwn = wwn | -6,009,425,147,918,108,000 | Keyword args:
iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).
nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).
portal (str): IP and port number (or `null` if target is not iSCSI).
wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel). | pypureclient/flasharray/FA_2_11/models/port_common.py | __init__ | Flav-STOR-WL/py-pure-client | python | def __init__(self, iqn=None, nqn=None, portal=None, wwn=None):
'\n Keyword args:\n iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).\n nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).\n portal (str): IP and port number (or `null` if target is not iSCSI).\n wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel).\n '
if (iqn is not None):
self.iqn = iqn
if (nqn is not None):
self.nqn = nqn
if (portal is not None):
self.portal = portal
if (wwn is not None):
self.wwn = wwn |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for (key, value) in self.items():
result[key] = value
return result | -6,012,878,246,140,936,000 | Returns the model properties as a dict | pypureclient/flasharray/FA_2_11/models/port_common.py | to_dict | Flav-STOR-WL/py-pure-client | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | pypureclient/flasharray/FA_2_11/models/port_common.py | to_str | Flav-STOR-WL/py-pure-client | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | pypureclient/flasharray/FA_2_11/models/port_common.py | __repr__ | Flav-STOR-WL/py-pure-client | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, PortCommon)):
return False
return (self.__dict__ == other.__dict__) | 3,073,561,753,814,808,600 | Returns true if both objects are equal | pypureclient/flasharray/FA_2_11/models/port_common.py | __eq__ | Flav-STOR-WL/py-pure-client | python | def __eq__(self, other):
if (not isinstance(other, PortCommon)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | pypureclient/flasharray/FA_2_11/models/port_common.py | __ne__ | Flav-STOR-WL/py-pure-client | python | def __ne__(self, other):
return (not (self == other)) |
def _terminate_processes() -> None:
"Kill all spawned processes.\n\n Processes to be killed must be appended to `utils.processes_to_kill`\n as they are spawned.\n\n An important caveat: since there's no supported way to kill another\n thread in Python, this function cannot stop other threads from\n continuing to execute while it kills the processes that they've\n spawned. This may occasionally lead to unexpected behaviour.\n "
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split('=')[1] for arg in args if ('--cidfile' in str(arg))]
if cidfile:
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen(['docker', 'kill', inp_stream.read()], shell=False)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() | -4,192,651,991,820,420,600 | Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour. | cwltool/main.py | _terminate_processes | suecharo/cwltool | python | def _terminate_processes() -> None:
"Kill all spawned processes.\n\n Processes to be killed must be appended to `utils.processes_to_kill`\n as they are spawned.\n\n An important caveat: since there's no supported way to kill another\n thread in Python, this function cannot stop other threads from\n continuing to execute while it kills the processes that they've\n spawned. This may occasionally lead to unexpected behaviour.\n "
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split('=')[1] for arg in args if ('--cidfile' in str(arg))]
if cidfile:
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen(['docker', 'kill', inp_stream.read()], shell=False)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() |
def _signal_handler(signum: int, _: Any) -> None:
"Kill all spawned processes and exit.\n\n Note that it's possible for another thread to spawn a process after\n all processes have been killed, but before Python exits.\n\n Refer to the docstring for _terminate_processes() for other caveats.\n "
_terminate_processes()
sys.exit(signum) | -3,318,794,166,413,561,000 | Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats. | cwltool/main.py | _signal_handler | suecharo/cwltool | python | def _signal_handler(signum: int, _: Any) -> None:
"Kill all spawned processes and exit.\n\n Note that it's possible for another thread to spawn a process after\n all processes have been killed, but before Python exits.\n\n Refer to the docstring for _terminate_processes() for other caveats.\n "
_terminate_processes()
sys.exit(signum) |
def generate_example_input(inptype: Optional[CWLOutputType], default: Optional[CWLOutputType]) -> Tuple[(Any, str)]:
'Convert a single input schema into an example.'
example = None
comment = ''
defaults = {'null': 'null', 'Any': 'null', 'boolean': False, 'int': 0, 'long': 0, 'float': 0.1, 'double': 0.1, 'string': 'a_string', 'File': ruamel.yaml.comments.CommentedMap([('class', 'File'), ('path', 'a/file/path')]), 'Directory': ruamel.yaml.comments.CommentedMap([('class', 'Directory'), ('path', 'a/directory/path')])}
if isinstance(inptype, MutableSequence):
optional = False
if ('null' in inptype):
inptype.remove('null')
optional = True
if (len(inptype) == 1):
(example, comment) = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f'{comment} (optional)'
else:
comment = 'optional'
else:
example = CommentedSeq()
for (index, entry) in enumerate(inptype):
(value, e_comment) = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = 'optional'
elif (isinstance(inptype, Mapping) and ('type' in inptype)):
if (inptype['type'] == 'array'):
first_item = cast(MutableSequence[CWLObjectType], inptype['items'])[0]
items_len = len(cast(Sized, inptype['items']))
if ((items_len == 1) and ('type' in first_item) and (first_item['type'] == 'enum')):
example = first_item['symbols']
if ('name' in first_item):
comment = 'array of type "{}".'.format(first_item['name'])
else:
(value, comment) = generate_example_input(inptype['items'], None)
comment = ('array of ' + comment)
if (items_len == 1):
example = [value]
else:
example = value
if (default is not None):
example = default
elif (inptype['type'] == 'enum'):
symbols = cast(List[str], inptype['symbols'])
if (default is not None):
example = default
elif ('default' in inptype):
example = inptype['default']
elif (len(cast(Sized, inptype['symbols'])) == 1):
example = symbols[0]
else:
example = '{}_enum_value'.format(inptype.get('name', 'valid'))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif (inptype['type'] == 'record'):
example = ruamel.yaml.comments.CommentedMap()
if ('name' in inptype):
comment = '"{}" record type.'.format(inptype['name'])
else:
comment = 'Anonymous record type.'
for field in cast(List[CWLObjectType], inptype['fields']):
(value, f_comment) = generate_example_input(field['type'], None)
example.insert(0, shortname(cast(str, field['name'])), value, f_comment)
elif ('default' in inptype):
example = inptype['default']
comment = 'default value of type "{}".'.format(inptype['type'])
else:
example = defaults.get(cast(str, inptype['type']), str(inptype))
comment = 'type "{}".'.format(inptype['type'])
elif (not default):
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return (example, comment) | 1,580,915,058,386,904,000 | Convert a single input schema into an example. | cwltool/main.py | generate_example_input | suecharo/cwltool | python | def generate_example_input(inptype: Optional[CWLOutputType], default: Optional[CWLOutputType]) -> Tuple[(Any, str)]:
example = None
comment =
defaults = {'null': 'null', 'Any': 'null', 'boolean': False, 'int': 0, 'long': 0, 'float': 0.1, 'double': 0.1, 'string': 'a_string', 'File': ruamel.yaml.comments.CommentedMap([('class', 'File'), ('path', 'a/file/path')]), 'Directory': ruamel.yaml.comments.CommentedMap([('class', 'Directory'), ('path', 'a/directory/path')])}
if isinstance(inptype, MutableSequence):
optional = False
if ('null' in inptype):
inptype.remove('null')
optional = True
if (len(inptype) == 1):
(example, comment) = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f'{comment} (optional)'
else:
comment = 'optional'
else:
example = CommentedSeq()
for (index, entry) in enumerate(inptype):
(value, e_comment) = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = 'optional'
elif (isinstance(inptype, Mapping) and ('type' in inptype)):
if (inptype['type'] == 'array'):
first_item = cast(MutableSequence[CWLObjectType], inptype['items'])[0]
items_len = len(cast(Sized, inptype['items']))
if ((items_len == 1) and ('type' in first_item) and (first_item['type'] == 'enum')):
example = first_item['symbols']
if ('name' in first_item):
comment = 'array of type "{}".'.format(first_item['name'])
else:
(value, comment) = generate_example_input(inptype['items'], None)
comment = ('array of ' + comment)
if (items_len == 1):
example = [value]
else:
example = value
if (default is not None):
example = default
elif (inptype['type'] == 'enum'):
symbols = cast(List[str], inptype['symbols'])
if (default is not None):
example = default
elif ('default' in inptype):
example = inptype['default']
elif (len(cast(Sized, inptype['symbols'])) == 1):
example = symbols[0]
else:
example = '{}_enum_value'.format(inptype.get('name', 'valid'))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif (inptype['type'] == 'record'):
example = ruamel.yaml.comments.CommentedMap()
if ('name' in inptype):
comment = '"{}" record type.'.format(inptype['name'])
else:
comment = 'Anonymous record type.'
for field in cast(List[CWLObjectType], inptype['fields']):
(value, f_comment) = generate_example_input(field['type'], None)
example.insert(0, shortname(cast(str, field['name'])), value, f_comment)
elif ('default' in inptype):
example = inptype['default']
comment = 'default value of type "{}".'.format(inptype['type'])
else:
example = defaults.get(cast(str, inptype['type']), str(inptype))
comment = 'type "{}".'.format(inptype['type'])
elif (not default):
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return (example, comment) |
def realize_input_schema(input_types: MutableSequence[Union[(str, CWLObjectType)]], schema_defs: MutableMapping[(str, CWLObjectType)]) -> MutableSequence[Union[(str, CWLObjectType)]]:
'Replace references to named typed with the actual types.'
for (index, entry) in enumerate(input_types):
if isinstance(entry, str):
if ('#' in entry):
(_, input_type_name) = entry.split('#')
else:
input_type_name = entry
if (input_type_name in schema_defs):
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if (isinstance(entry['type'], str) and ('#' in entry['type'])):
(_, input_type_name) = entry['type'].split('#')
if (input_type_name in schema_defs):
entry['type'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], schema_defs[input_type_name]), schema_defs))
if isinstance(entry['type'], MutableSequence):
entry['type'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], entry['type']), schema_defs))
if isinstance(entry['type'], Mapping):
entry['type'] = cast(CWLOutputAtomType, realize_input_schema([cast(CWLObjectType, entry['type'])], schema_defs))
if (entry['type'] == 'array'):
items = (entry['items'] if (not isinstance(entry['items'], str)) else [entry['items']])
entry['items'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], items), schema_defs))
if (entry['type'] == 'record'):
entry['fields'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], entry['fields']), schema_defs))
return input_types | 7,038,634,373,549,778,000 | Replace references to named typed with the actual types. | cwltool/main.py | realize_input_schema | suecharo/cwltool | python | def realize_input_schema(input_types: MutableSequence[Union[(str, CWLObjectType)]], schema_defs: MutableMapping[(str, CWLObjectType)]) -> MutableSequence[Union[(str, CWLObjectType)]]:
for (index, entry) in enumerate(input_types):
if isinstance(entry, str):
if ('#' in entry):
(_, input_type_name) = entry.split('#')
else:
input_type_name = entry
if (input_type_name in schema_defs):
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if (isinstance(entry['type'], str) and ('#' in entry['type'])):
(_, input_type_name) = entry['type'].split('#')
if (input_type_name in schema_defs):
entry['type'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], schema_defs[input_type_name]), schema_defs))
if isinstance(entry['type'], MutableSequence):
entry['type'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], entry['type']), schema_defs))
if isinstance(entry['type'], Mapping):
entry['type'] = cast(CWLOutputAtomType, realize_input_schema([cast(CWLObjectType, entry['type'])], schema_defs))
if (entry['type'] == 'array'):
items = (entry['items'] if (not isinstance(entry['items'], str)) else [entry['items']])
entry['items'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], items), schema_defs))
if (entry['type'] == 'record'):
entry['fields'] = cast(CWLOutputAtomType, realize_input_schema(cast(MutableSequence[Union[(str, CWLObjectType)]], entry['fields']), schema_defs))
return input_types |
def generate_input_template(tool: Process) -> CWLObjectType:
'Generate an example input object for the given CWL process.'
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(List[MutableMapping[(str, str)]], realize_input_schema(tool.tool['inputs'], tool.schemaDefs)):
name = shortname(inp['id'])
(value, comment) = generate_example_input(inp['type'], inp.get('default', None))
template.insert(0, name, value, comment)
return template | 4,096,496,993,969,059,300 | Generate an example input object for the given CWL process. | cwltool/main.py | generate_input_template | suecharo/cwltool | python | def generate_input_template(tool: Process) -> CWLObjectType:
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(List[MutableMapping[(str, str)]], realize_input_schema(tool.tool['inputs'], tool.schemaDefs)):
name = shortname(inp['id'])
(value, comment) = generate_example_input(inp['type'], inp.get('default', None))
template.insert(0, name, value, comment)
return template |
def make_relative(base: str, obj: CWLObjectType) -> None:
'Relativize the location URI of a File or Directory object.'
uri = cast(str, obj.get('location', obj.get('path')))
if ((':' in uri.split('/')[0]) and (not uri.startswith('file://'))):
pass
elif uri.startswith('file://'):
uri = uri_file_path(uri)
obj['location'] = os.path.relpath(uri, base) | 1,790,002,679,242,000,600 | Relativize the location URI of a File or Directory object. | cwltool/main.py | make_relative | suecharo/cwltool | python | def make_relative(base: str, obj: CWLObjectType) -> None:
uri = cast(str, obj.get('location', obj.get('path')))
if ((':' in uri.split('/')[0]) and (not uri.startswith('file://'))):
pass
elif uri.startswith('file://'):
uri = uri_file_path(uri)
obj['location'] = os.path.relpath(uri, base) |
def printdeps(obj: CWLObjectType, document_loader: Loader, stdout: Union[(TextIO, StreamWriter)], relative_deps: str, uri: str, basedir: Optional[str]=None, nestdirs: bool=True) -> None:
'Print a JSON representation of the dependencies of the CWL document.'
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if (relative_deps == 'primary'):
base = (basedir if basedir else os.path.dirname(uri_file_path(str(uri))))
elif (relative_deps == 'cwd'):
base = os.getcwd()
visit_class(deps, ('File', 'Directory'), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout) | 5,417,058,225,552,555,000 | Print a JSON representation of the dependencies of the CWL document. | cwltool/main.py | printdeps | suecharo/cwltool | python | def printdeps(obj: CWLObjectType, document_loader: Loader, stdout: Union[(TextIO, StreamWriter)], relative_deps: str, uri: str, basedir: Optional[str]=None, nestdirs: bool=True) -> None:
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if (relative_deps == 'primary'):
base = (basedir if basedir else os.path.dirname(uri_file_path(str(uri))))
elif (relative_deps == 'cwd'):
base = os.getcwd()
visit_class(deps, ('File', 'Directory'), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout) |
def find_deps(obj: CWLObjectType, document_loader: Loader, uri: str, basedir: Optional[str]=None, nestdirs: bool=True) -> CWLObjectType:
'Find the dependencies of the CWL document.'
deps = {'class': 'File', 'location': uri, 'format': CWL_IANA}
def loadref(base: str, uri: str) -> Union[(CommentedMap, CommentedSeq, str, None)]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps((basedir if basedir else uri), obj, {'$import', 'run'}, {'$include', '$schemas', 'location'}, loadref, nestdirs=nestdirs)
if (sfs is not None):
deps['secondaryFiles'] = cast(MutableSequence[CWLOutputAtomType], mergedirs(sfs))
return deps | -6,490,368,193,751,929,000 | Find the dependencies of the CWL document. | cwltool/main.py | find_deps | suecharo/cwltool | python | def find_deps(obj: CWLObjectType, document_loader: Loader, uri: str, basedir: Optional[str]=None, nestdirs: bool=True) -> CWLObjectType:
deps = {'class': 'File', 'location': uri, 'format': CWL_IANA}
def loadref(base: str, uri: str) -> Union[(CommentedMap, CommentedSeq, str, None)]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps((basedir if basedir else uri), obj, {'$import', 'run'}, {'$include', '$schemas', 'location'}, loadref, nestdirs=nestdirs)
if (sfs is not None):
deps['secondaryFiles'] = cast(MutableSequence[CWLOutputAtomType], mergedirs(sfs))
return deps |
def print_pack(loadingContext: LoadingContext, uri: str) -> str:
'Return a CWL serialization of the CWL document in JSON.'
packed = pack(loadingContext, uri)
if (len(cast(Sized, packed['$graph'])) > 1):
return json_dumps(packed, indent=4, default=str)
return json_dumps(cast(MutableSequence[CWLObjectType], packed['$graph'])[0], indent=4, default=str) | -4,740,007,930,551,627,000 | Return a CWL serialization of the CWL document in JSON. | cwltool/main.py | print_pack | suecharo/cwltool | python | def print_pack(loadingContext: LoadingContext, uri: str) -> str:
packed = pack(loadingContext, uri)
if (len(cast(Sized, packed['$graph'])) > 1):
return json_dumps(packed, indent=4, default=str)
return json_dumps(cast(MutableSequence[CWLObjectType], packed['$graph'])[0], indent=4, default=str) |
def setup_loadingContext(loadingContext: Optional[LoadingContext], runtimeContext: RuntimeContext, args: argparse.Namespace) -> LoadingContext:
'Prepare a LoadingContext from the given arguments.'
if (loadingContext is None):
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(loadingContext.fetcher_constructor, enable_dev=args.enable_dev, doc_cache=args.doc_cache)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = (args.disable_js_validation or (not args.do_validate))
loadingContext.construct_tool_object = getdefault(loadingContext.construct_tool_object, workflow.default_make_tool)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if (loadingContext.do_update is None):
loadingContext.do_update = (not (args.pack or args.print_subgraph))
return loadingContext | -5,290,655,372,574,574,000 | Prepare a LoadingContext from the given arguments. | cwltool/main.py | setup_loadingContext | suecharo/cwltool | python | def setup_loadingContext(loadingContext: Optional[LoadingContext], runtimeContext: RuntimeContext, args: argparse.Namespace) -> LoadingContext:
if (loadingContext is None):
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(loadingContext.fetcher_constructor, enable_dev=args.enable_dev, doc_cache=args.doc_cache)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = (args.disable_js_validation or (not args.do_validate))
loadingContext.construct_tool_object = getdefault(loadingContext.construct_tool_object, workflow.default_make_tool)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if (loadingContext.do_update is None):
loadingContext.do_update = (not (args.pack or args.print_subgraph))
return loadingContext |
def make_template(tool: Process) -> None:
'Make a template CWL input object for the give Process.'
def my_represent_none(self: Any, data: Any) -> Any:
"Force clean representation of 'null'."
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
ruamel.yaml.representer.RoundTripRepresenter.add_representer(type(None), my_represent_none)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(generate_input_template(tool), sys.stdout) | -4,128,920,573,643,353,600 | Make a template CWL input object for the give Process. | cwltool/main.py | make_template | suecharo/cwltool | python | def make_template(tool: Process) -> None:
def my_represent_none(self: Any, data: Any) -> Any:
"Force clean representation of 'null'."
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
ruamel.yaml.representer.RoundTripRepresenter.add_representer(type(None), my_represent_none)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(generate_input_template(tool), sys.stdout) |
def inherit_reqshints(tool: Process, parent: Process) -> None:
'Copy down requirements and hints from ancestors of a given process.'
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if (parent_req['class'] == tool_req['class']):
found = True
break
if (not found):
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if (parent_hint['class'] == tool_req['class']):
found = True
break
if (not found):
for tool_hint in tool.hints:
if (parent_hint['class'] == tool_hint['class']):
found = True
break
if (not found):
tool.hints.append(parent_hint) | 6,301,341,007,447,737,000 | Copy down requirements and hints from ancestors of a given process. | cwltool/main.py | inherit_reqshints | suecharo/cwltool | python | def inherit_reqshints(tool: Process, parent: Process) -> None:
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if (parent_req['class'] == tool_req['class']):
found = True
break
if (not found):
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if (parent_hint['class'] == tool_req['class']):
found = True
break
if (not found):
for tool_hint in tool.hints:
if (parent_hint['class'] == tool_hint['class']):
found = True
break
if (not found):
tool.hints.append(parent_hint) |
def choose_target(args: argparse.Namespace, tool: Process, loading_context: LoadingContext) -> Optional[Process]:
'Walk the Workflow, extract the subset matches all the args.targets.'
if (loading_context.loader is None):
raise Exception('loading_context.loader cannot be None')
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool['id'])
if url.fragment:
extracted = get_subgraph([((tool.tool['id'] + '/') + r) for r in args.target], tool, loading_context)
else:
extracted = get_subgraph([loading_context.loader.fetcher.urljoin(tool.tool['id'], ('#' + r)) for r in args.target], tool, loading_context)
else:
_logger.error('Can only use --target on Workflows')
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted['id']] = extracted
tool = make_tool(extracted['id'], loading_context)
else:
raise Exception('Missing loading_context.loader.idx!')
return tool | -661,831,237,632,761,300 | Walk the Workflow, extract the subset matches all the args.targets. | cwltool/main.py | choose_target | suecharo/cwltool | python | def choose_target(args: argparse.Namespace, tool: Process, loading_context: LoadingContext) -> Optional[Process]:
if (loading_context.loader is None):
raise Exception('loading_context.loader cannot be None')
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool['id'])
if url.fragment:
extracted = get_subgraph([((tool.tool['id'] + '/') + r) for r in args.target], tool, loading_context)
else:
extracted = get_subgraph([loading_context.loader.fetcher.urljoin(tool.tool['id'], ('#' + r)) for r in args.target], tool, loading_context)
else:
_logger.error('Can only use --target on Workflows')
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted['id']] = extracted
tool = make_tool(extracted['id'], loading_context)
else:
raise Exception('Missing loading_context.loader.idx!')
return tool |
def choose_step(args: argparse.Namespace, tool: Process, loading_context: LoadingContext) -> Optional[Process]:
'Walk the given Workflow and extract just args.single_step.'
if (loading_context.loader is None):
raise Exception('loading_context.loader cannot be None')
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool['id'])
if url.fragment:
step_id = ((tool.tool['id'] + '/') + args.single_step)
else:
step_id = loading_context.loader.fetcher.urljoin(tool.tool['id'], ('#' + args.single_step))
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error('Can only use --single-step on Workflows')
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted['id']] = cast(Union[(CommentedMap, CommentedSeq, str, None)], cmap(extracted))
tool = make_tool(extracted['id'], loading_context)
else:
raise Exception('Missing loading_context.loader.idx!')
return tool | 7,015,021,952,710,861,000 | Walk the given Workflow and extract just args.single_step. | cwltool/main.py | choose_step | suecharo/cwltool | python | def choose_step(args: argparse.Namespace, tool: Process, loading_context: LoadingContext) -> Optional[Process]:
if (loading_context.loader is None):
raise Exception('loading_context.loader cannot be None')
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool['id'])
if url.fragment:
step_id = ((tool.tool['id'] + '/') + args.single_step)
else:
step_id = loading_context.loader.fetcher.urljoin(tool.tool['id'], ('#' + args.single_step))
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error('Can only use --single-step on Workflows')
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted['id']] = cast(Union[(CommentedMap, CommentedSeq, str, None)], cmap(extracted))
tool = make_tool(extracted['id'], loading_context)
else:
raise Exception('Missing loading_context.loader.idx!')
return tool |
def choose_process(args: argparse.Namespace, tool: Process, loadingContext: LoadingContext) -> Optional[Process]:
'Walk the given Workflow and extract just args.single_process.'
if (loadingContext.loader is None):
raise Exception('loadingContext.loader cannot be None')
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool['id'])
if url.fragment:
step_id = ((tool.tool['id'] + '/') + args.single_process)
else:
step_id = loadingContext.loader.fetcher.urljoin(tool.tool['id'], ('#' + args.single_process))
(extracted, workflow_step) = get_process(tool, step_id, loadingContext)
else:
_logger.error('Can only use --single-process on Workflows')
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted['id']] = extracted
new_tool = make_tool(extracted['id'], loadingContext)
else:
raise Exception('Missing loadingContext.loader.idx!')
inherit_reqshints(new_tool, workflow_step)
return new_tool | -9,181,339,676,168,697,000 | Walk the given Workflow and extract just args.single_process. | cwltool/main.py | choose_process | suecharo/cwltool | python | def choose_process(args: argparse.Namespace, tool: Process, loadingContext: LoadingContext) -> Optional[Process]:
if (loadingContext.loader is None):
raise Exception('loadingContext.loader cannot be None')
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool['id'])
if url.fragment:
step_id = ((tool.tool['id'] + '/') + args.single_process)
else:
step_id = loadingContext.loader.fetcher.urljoin(tool.tool['id'], ('#' + args.single_process))
(extracted, workflow_step) = get_process(tool, step_id, loadingContext)
else:
_logger.error('Can only use --single-process on Workflows')
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted['id']] = extracted
new_tool = make_tool(extracted['id'], loadingContext)
else:
raise Exception('Missing loadingContext.loader.idx!')
inherit_reqshints(new_tool, workflow_step)
return new_tool |
def check_working_directories(runtimeContext: RuntimeContext) -> Optional[int]:
'Make any needed working directories.'
for dirprefix in ('tmpdir_prefix', 'tmp_outdir_prefix', 'cachedir'):
if (getattr(runtimeContext, dirprefix) and (getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX)):
sl = ('/' if (getattr(runtimeContext, dirprefix).endswith('/') or (dirprefix == 'cachedir')) else '')
setattr(runtimeContext, dirprefix, (os.path.abspath(getattr(runtimeContext, dirprefix)) + sl))
if (not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix)))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception('Failed to create directory.')
return 1
return None | 829,236,959,816,447,000 | Make any needed working directories. | cwltool/main.py | check_working_directories | suecharo/cwltool | python | def check_working_directories(runtimeContext: RuntimeContext) -> Optional[int]:
for dirprefix in ('tmpdir_prefix', 'tmp_outdir_prefix', 'cachedir'):
if (getattr(runtimeContext, dirprefix) and (getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX)):
sl = ('/' if (getattr(runtimeContext, dirprefix).endswith('/') or (dirprefix == 'cachedir')) else )
setattr(runtimeContext, dirprefix, (os.path.abspath(getattr(runtimeContext, dirprefix)) + sl))
if (not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix)))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception('Failed to create directory.')
return 1
return None |
def print_targets(tool: Process, stdout: Union[(TextIO, StreamWriter)], loading_context: LoadingContext, prefix: str='') -> None:
'Recursively find targets for --subgraph and friends.'
for f in ('outputs', 'inputs'):
if tool.tool[f]:
_logger.info('%s %s%s targets:', prefix[:(- 1)], f[0].upper(), f[1:(- 1)])
print((' ' + '\n '.join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]])), file=stdout)
if ('steps' in tool.tool):
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info('%s steps targets:', prefix[:(- 1)])
for t in tool.tool['steps']:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[(str, Process, Dict[(str, Any)])] = t['run']
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/") | -1,148,331,140,121,797,000 | Recursively find targets for --subgraph and friends. | cwltool/main.py | print_targets | suecharo/cwltool | python | def print_targets(tool: Process, stdout: Union[(TextIO, StreamWriter)], loading_context: LoadingContext, prefix: str=) -> None:
for f in ('outputs', 'inputs'):
if tool.tool[f]:
_logger.info('%s %s%s targets:', prefix[:(- 1)], f[0].upper(), f[1:(- 1)])
print((' ' + '\n '.join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]])), file=stdout)
if ('steps' in tool.tool):
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info('%s steps targets:', prefix[:(- 1)])
for t in tool.tool['steps']:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[(str, Process, Dict[(str, Any)])] = t['run']
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/") |
def find_default_container(builder: HasReqsHints, default_container: Optional[str]=None, use_biocontainers: Optional[bool]=None) -> Optional[str]:
'Find a container.'
if ((not default_container) and use_biocontainers):
default_container = get_container_from_software_requirements(use_biocontainers, builder)
return default_container | -5,100,366,564,022,011,000 | Find a container. | cwltool/main.py | find_default_container | suecharo/cwltool | python | def find_default_container(builder: HasReqsHints, default_container: Optional[str]=None, use_biocontainers: Optional[bool]=None) -> Optional[str]:
if ((not default_container) and use_biocontainers):
default_container = get_container_from_software_requirements(use_biocontainers, builder)
return default_container |
def windows_check() -> None:
'See if we are running on MS Windows and warn about the lack of support.'
if (os.name == 'nt'):
warnings.warn("The CWL reference runner (cwltool) no longer supports running CWL workflows natively on MS Windows as its previous MS Windows support was incomplete and untested. Instead, please see https://pypi.org/project/cwltool/#ms-windows-users for instructions on running cwltool via Windows Subsystem for Linux 2 (WSL2). If don't need to execute CWL documents, then you can ignore this warning, but please consider migrating to https://pypi.org/project/cwl-utils/ for your CWL document processing needs.") | -825,251,966,511,962,600 | See if we are running on MS Windows and warn about the lack of support. | cwltool/main.py | windows_check | suecharo/cwltool | python | def windows_check() -> None:
if (os.name == 'nt'):
warnings.warn("The CWL reference runner (cwltool) no longer supports running CWL workflows natively on MS Windows as its previous MS Windows support was incomplete and untested. Instead, please see https://pypi.org/project/cwltool/#ms-windows-users for instructions on running cwltool via Windows Subsystem for Linux 2 (WSL2). If don't need to execute CWL documents, then you can ignore this warning, but please consider migrating to https://pypi.org/project/cwl-utils/ for your CWL document processing needs.") |
def run(*args: Any, **kwargs: Any) -> None:
'Run cwltool.'
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes() | -4,568,453,382,566,762,500 | Run cwltool. | cwltool/main.py | run | suecharo/cwltool | python | def run(*args: Any, **kwargs: Any) -> None:
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes() |
def __init__(self) -> None:
'Use the default formatter with our custom formatstring.'
super().__init__('[%(asctime)sZ] %(message)s') | -2,602,375,492,646,969,000 | Use the default formatter with our custom formatstring. | cwltool/main.py | __init__ | suecharo/cwltool | python | def __init__(self) -> None:
super().__init__('[%(asctime)sZ] %(message)s') |
def my_represent_none(self: Any, data: Any) -> Any:
"Force clean representation of 'null'."
return self.represent_scalar('tag:yaml.org,2002:null', 'null') | 3,276,453,461,130,759,700 | Force clean representation of 'null'. | cwltool/main.py | my_represent_none | suecharo/cwltool | python | def my_represent_none(self: Any, data: Any) -> Any:
return self.represent_scalar('tag:yaml.org,2002:null', 'null') |
@property
@abc.abstractmethod
def annotation_urls(self):
'Dictionary passed to the DownloadManager to download annotations.\n\n An example:\n {"test_annotations": "https://somewebpage.com/data/openimages/test.txt"}\n\n Returns:\n A dictionary whose values are the URLs to download the annotations of the\n dataset, and the keys are some short string identifying the URL.\n This dictionary is passed to the DownloadManager.\n ' | -8,632,046,831,819,798,000 | Dictionary passed to the DownloadManager to download annotations.
An example:
{"test_annotations": "https://somewebpage.com/data/openimages/test.txt"}
Returns:
A dictionary whose values are the URLs to download the annotations of the
dataset, and the keys are some short string identifying the URL.
This dictionary is passed to the DownloadManager. | tensorflow_datasets/object_detection/open_images_challenge2019.py | annotation_urls | 8bitmp3/datasets | python | @property
@abc.abstractmethod
def annotation_urls(self):
'Dictionary passed to the DownloadManager to download annotations.\n\n An example:\n {"test_annotations": "https://somewebpage.com/data/openimages/test.txt"}\n\n Returns:\n A dictionary whose values are the URLs to download the annotations of the\n dataset, and the keys are some short string identifying the URL.\n This dictionary is passed to the DownloadManager.\n ' |
def __init__(self, component_config: Dict[(Text, Any)]=None) -> None:
'Construct a new tokenizer using the WhitespaceTokenizer framework.'
super().__init__(component_config)
self.intent_tokenization_flag = self.component_config.get('intent_tokenization_flag', False)
self.intent_split_symbol = self.component_config.get('intent_split_symbol', '_') | -2,541,679,691,764,993,500 | Construct a new tokenizer using the WhitespaceTokenizer framework. | rasa/nlu/tokenizers/tokenizer.py | __init__ | Ali-vohra/final_project | python | def __init__(self, component_config: Dict[(Text, Any)]=None) -> None:
super().__init__(component_config)
self.intent_tokenization_flag = self.component_config.get('intent_tokenization_flag', False)
self.intent_split_symbol = self.component_config.get('intent_split_symbol', '_') |
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
'Tokenizes the text of the provided attribute of the incoming message.'
raise NotImplementedError | -1,965,813,166,410,415,400 | Tokenizes the text of the provided attribute of the incoming message. | rasa/nlu/tokenizers/tokenizer.py | tokenize | Ali-vohra/final_project | python | def tokenize(self, message: Message, attribute: Text) -> List[Token]:
raise NotImplementedError |
def train(self, training_data: TrainingData, config: Optional[RasaNLUModelConfig]=None, **kwargs: Any) -> None:
'Tokenize all training data.'
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if (example.get(attribute) is not None):
if (attribute == INTENT_ATTRIBUTE):
tokens = self._split_intent(example)
else:
tokens = self.tokenize(example, attribute)
tokens = self.add_cls_token(tokens, attribute)
example.set(TOKENS_NAMES[attribute], tokens) | -8,850,162,252,427,049,000 | Tokenize all training data. | rasa/nlu/tokenizers/tokenizer.py | train | Ali-vohra/final_project | python | def train(self, training_data: TrainingData, config: Optional[RasaNLUModelConfig]=None, **kwargs: Any) -> None:
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if (example.get(attribute) is not None):
if (attribute == INTENT_ATTRIBUTE):
tokens = self._split_intent(example)
else:
tokens = self.tokenize(example, attribute)
tokens = self.add_cls_token(tokens, attribute)
example.set(TOKENS_NAMES[attribute], tokens) |
def process(self, message: Message, **kwargs: Any) -> None:
'Tokenize the incoming message.'
tokens = self.tokenize(message, TEXT_ATTRIBUTE)
tokens = self.add_cls_token(tokens, TEXT_ATTRIBUTE)
message.set(TOKENS_NAMES[TEXT_ATTRIBUTE], tokens) | -1,282,235,664,095,653,600 | Tokenize the incoming message. | rasa/nlu/tokenizers/tokenizer.py | process | Ali-vohra/final_project | python | def process(self, message: Message, **kwargs: Any) -> None:
tokens = self.tokenize(message, TEXT_ATTRIBUTE)
tokens = self.add_cls_token(tokens, TEXT_ATTRIBUTE)
message.set(TOKENS_NAMES[TEXT_ATTRIBUTE], tokens) |
def _setup_outputs(root_output_dir, experiment_name, rounds_per_profile=0):
'Set up directories for experiment loops, write hyperparameters to disk.'
if (not experiment_name):
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = tff.simulation.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.simulation.CSVMetricsManager(csv_file)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tb_mngr = tff.simulation.TensorBoardManager(summary_dir=summary_logdir)
logging.info('Writing...')
logging.info(' checkpoints to: %s', checkpoint_dir)
logging.info(' metrics csv to: %s', metrics_mngr.metrics_filename)
logging.info(' summaries to: %s', summary_logdir)
@contextlib.contextmanager
def profiler(round_num):
if ((rounds_per_profile > 0) and ((round_num % rounds_per_profile) == 0)):
with tf.profiler.experimental.Profile(summary_logdir):
(yield)
else:
(yield)
return (checkpoint_mngr, metrics_mngr, tb_mngr, profiler) | 6,876,771,930,840,165,000 | Set up directories for experiment loops, write hyperparameters to disk. | utils/training_loop.py | _setup_outputs | houcharlie/federated | python | def _setup_outputs(root_output_dir, experiment_name, rounds_per_profile=0):
if (not experiment_name):
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = tff.simulation.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.simulation.CSVMetricsManager(csv_file)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tb_mngr = tff.simulation.TensorBoardManager(summary_dir=summary_logdir)
logging.info('Writing...')
logging.info(' checkpoints to: %s', checkpoint_dir)
logging.info(' metrics csv to: %s', metrics_mngr.metrics_filename)
logging.info(' summaries to: %s', summary_logdir)
@contextlib.contextmanager
def profiler(round_num):
if ((rounds_per_profile > 0) and ((round_num % rounds_per_profile) == 0)):
with tf.profiler.experimental.Profile(summary_logdir):
(yield)
else:
(yield)
return (checkpoint_mngr, metrics_mngr, tb_mngr, profiler) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.