body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
def rollout_lengths(self):
' Lengths of sub-rollouts. '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
return (bounds[1:] - bounds[:(- 1)]) | 4,705,739,400,180,986,000 | Lengths of sub-rollouts. | mushroom_rl/core/parallelization_tools/step_sequence.py | rollout_lengths | nifunk/GNNMushroomRL | python | @property
def rollout_lengths(self):
' '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
return (bounds[1:] - bounds[:(- 1)]) |
def __len__(self):
" Get the step sequence's length. "
return self.length | -953,459,290,759,964,000 | Get the step sequence's length. | mushroom_rl/core/parallelization_tools/step_sequence.py | __len__ | nifunk/GNNMushroomRL | python | def __len__(self):
" "
return self.length |
def add_data(self, name: str, value=None, item_shape: tuple=None, with_after_last: Optional[bool]=False):
'\n Add a new data field to the step sequence.\n\n :param name: string for the name\n :param value: the data\n :param item_shape: shape to store the data in\n :param with_after_last: `True` if there is one more element than the length (e.g. last observation)\n '
if (name in self._data_names):
raise pyrado.KeyErr(msg=f'Trying to add a duplicate data field for {name}!')
if (value is None):
ro_length = self.length
if with_after_last:
ro_length += 1
if (self._data_format == 'torch'):
value = to.zeros((to.Size([ro_length]) + to.Size(item_shape)))
else:
value = np.array(((ro_length,) + item_shape))
else:
self._validate_data_size(name, value)
if (not isinstance(value, (np.ndarray, to.Tensor))):
value = stack_to_format(value, self._data_format)
else:
value = to_format(value, self._data_format)
self._data_names.append(name)
self.__dict__[name] = value | -2,198,449,852,763,759,900 | Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation) | mushroom_rl/core/parallelization_tools/step_sequence.py | add_data | nifunk/GNNMushroomRL | python | def add_data(self, name: str, value=None, item_shape: tuple=None, with_after_last: Optional[bool]=False):
'\n Add a new data field to the step sequence.\n\n :param name: string for the name\n :param value: the data\n :param item_shape: shape to store the data in\n :param with_after_last: `True` if there is one more element than the length (e.g. last observation)\n '
if (name in self._data_names):
raise pyrado.KeyErr(msg=f'Trying to add a duplicate data field for {name}!')
if (value is None):
ro_length = self.length
if with_after_last:
ro_length += 1
if (self._data_format == 'torch'):
value = to.zeros((to.Size([ro_length]) + to.Size(item_shape)))
else:
value = np.array(((ro_length,) + item_shape))
else:
self._validate_data_size(name, value)
if (not isinstance(value, (np.ndarray, to.Tensor))):
value = stack_to_format(value, self._data_format)
else:
value = to_format(value, self._data_format)
self._data_names.append(name)
self.__dict__[name] = value |
def get_data_values(self, name: str, truncate_last: Optional[bool]=False):
'\n Return the data tensor stored under the given name.\n\n :param name: data name\n :param truncate_last: True to truncate the length+1 entry if present\n '
assert (name in self._data_names)
entry = self.__dict__[name]
if truncate_last:
entry = self._truncate_after_last(entry)
return entry | 1,501,171,783,061,499,400 | Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present | mushroom_rl/core/parallelization_tools/step_sequence.py | get_data_values | nifunk/GNNMushroomRL | python | def get_data_values(self, name: str, truncate_last: Optional[bool]=False):
'\n Return the data tensor stored under the given name.\n\n :param name: data name\n :param truncate_last: True to truncate the length+1 entry if present\n '
assert (name in self._data_names)
entry = self.__dict__[name]
if truncate_last:
entry = self._truncate_after_last(entry)
return entry |
def numpy(self, data_type=None):
'\n Convert data to numpy ndarrays.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('numpy', data_type) | 7,891,443,739,255,916,000 | Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged. | mushroom_rl/core/parallelization_tools/step_sequence.py | numpy | nifunk/GNNMushroomRL | python | def numpy(self, data_type=None):
'\n Convert data to numpy ndarrays.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('numpy', data_type) |
def torch(self, data_type=None):
'\n Convert data to PyTorch Tensors.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('torch', data_type) | 9,121,887,788,393,913,000 | Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged. | mushroom_rl/core/parallelization_tools/step_sequence.py | torch | nifunk/GNNMushroomRL | python | def torch(self, data_type=None):
'\n Convert data to PyTorch Tensors.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('torch', data_type) |
def convert(self, data_format: str, data_type=None):
'\n Convert data to specified format.\n\n :param data_format: torch to use Tensors, numpy to use ndarrays\n :param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.\n '
if (data_format not in {'torch', 'numpy'}):
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if (self._data_format == data_format):
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors((lambda t: to_format(t, data_format, data_type)), self.__dict__[dn]) | 5,424,327,337,248,844,000 | Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged. | mushroom_rl/core/parallelization_tools/step_sequence.py | convert | nifunk/GNNMushroomRL | python | def convert(self, data_format: str, data_type=None):
'\n Convert data to specified format.\n\n :param data_format: torch to use Tensors, numpy to use ndarrays\n :param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.\n '
if (data_format not in {'torch', 'numpy'}):
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if (self._data_format == data_format):
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors((lambda t: to_format(t, data_format, data_type)), self.__dict__[dn]) |
def get_rollout(self, index):
'\n Get an indexed sub-rollout.\n\n :param index: generic index of sub-rollout, negative values, slices and iterables are allowed\n :return: selected subset.\n '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
if isinstance(index, slice):
(start, end, step) = index.indices(self.rollout_count)
if (step == 1):
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
index = range(start, end, step)
if isinstance(index, Iterable):
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[(index + 1)]
return self[start_step:end_step] | 1,579,322,541,815,827,200 | Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset. | mushroom_rl/core/parallelization_tools/step_sequence.py | get_rollout | nifunk/GNNMushroomRL | python | def get_rollout(self, index):
'\n Get an indexed sub-rollout.\n\n :param index: generic index of sub-rollout, negative values, slices and iterables are allowed\n :return: selected subset.\n '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
if isinstance(index, slice):
(start, end, step) = index.indices(self.rollout_count)
if (step == 1):
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
index = range(start, end, step)
if isinstance(index, Iterable):
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[(index + 1)]
return self[start_step:end_step] |
def iterate_rollouts(self):
' Iterate over all sub-rollouts of a concatenated rollout. '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
count = (len(bounds) - 1)
if (count == 1):
(yield self)
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[(i + 1)]
(yield self[start_step:end_step]) | 3,715,250,394,928,838,000 | Iterate over all sub-rollouts of a concatenated rollout. | mushroom_rl/core/parallelization_tools/step_sequence.py | iterate_rollouts | nifunk/GNNMushroomRL | python | def iterate_rollouts(self):
' '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
count = (len(bounds) - 1)
if (count == 1):
(yield self)
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[(i + 1)]
(yield self[start_step:end_step]) |
def sample_w_next(self, batch_size: int) -> tuple:
'\n Sample a random batch of steps from a together with the associated next steps.\n Similar to `split_shuffled_batches` with `complete_rollouts=False`\n\n :param batch_size: number of steps to sample\n :return: randomly sampled batch of steps\n '
if (not (self.length >= 2)):
raise pyrado.ValueErr(given=self.length, ge_constraint='2')
shuffled_idcs = random.sample(range((self.length - 2)), batch_size)
shuffled_next_idcs = [(i + 1) for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return (steps, next_steps) | -3,061,683,762,915,780,600 | Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps | mushroom_rl/core/parallelization_tools/step_sequence.py | sample_w_next | nifunk/GNNMushroomRL | python | def sample_w_next(self, batch_size: int) -> tuple:
'\n Sample a random batch of steps from a together with the associated next steps.\n Similar to `split_shuffled_batches` with `complete_rollouts=False`\n\n :param batch_size: number of steps to sample\n :return: randomly sampled batch of steps\n '
if (not (self.length >= 2)):
raise pyrado.ValueErr(given=self.length, ge_constraint='2')
shuffled_idcs = random.sample(range((self.length - 2)), batch_size)
shuffled_next_idcs = [(i + 1) for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return (steps, next_steps) |
def split_ordered_batches(self, batch_size: int=None, num_batches: int=None):
'\n Batch generation. Split the step collection into ordered mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch, i.e. variable number of batches\n :param num_batches: number of batches to split the rollout in, i.e. variable batch size\n\n .. note::\n Left out the option to return complete rollouts like for `split_shuffled_batches`.\n '
if (((batch_size is None) and (num_batches is None)) or ((batch_size is not None) and (num_batches is not None))):
raise pyrado.ValueErr(msg='Either batch_size or num_batches must not be None, but not both or none!')
elif ((batch_size is not None) and (batch_size < 1)):
raise pyrado.ValueErr(given=batch_size, ge_constraint='1 (int)')
elif ((num_batches is not None) and (num_batches < 1)):
raise pyrado.ValueErr(given=num_batches, ge_constraint='1 (int)')
if (num_batches is not None):
batch_size = ceil((self.length / num_batches))
if (batch_size >= self.length):
(yield self)
else:
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
(yield self[b]) | -1,020,326,957,354,149,100 | Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`. | mushroom_rl/core/parallelization_tools/step_sequence.py | split_ordered_batches | nifunk/GNNMushroomRL | python | def split_ordered_batches(self, batch_size: int=None, num_batches: int=None):
'\n Batch generation. Split the step collection into ordered mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch, i.e. variable number of batches\n :param num_batches: number of batches to split the rollout in, i.e. variable batch size\n\n .. note::\n Left out the option to return complete rollouts like for `split_shuffled_batches`.\n '
if (((batch_size is None) and (num_batches is None)) or ((batch_size is not None) and (num_batches is not None))):
raise pyrado.ValueErr(msg='Either batch_size or num_batches must not be None, but not both or none!')
elif ((batch_size is not None) and (batch_size < 1)):
raise pyrado.ValueErr(given=batch_size, ge_constraint='1 (int)')
elif ((num_batches is not None) and (num_batches < 1)):
raise pyrado.ValueErr(given=num_batches, ge_constraint='1 (int)')
if (num_batches is not None):
batch_size = ceil((self.length / num_batches))
if (batch_size >= self.length):
(yield self)
else:
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
(yield self[b]) |
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool]=False):
'\n Batch generation. Split the step collection into random mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch\n :param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.\n However, the size of the returned batches cannot be strictly maintained in this case.\n\n .. note::\n This method is also supposed to be called for recurrent networks, which have a different `evaluate()`\n method that recognized where the rollouts end within a batch.\n '
if (batch_size >= self.length):
(yield self)
elif (complete_rollouts and self.continuous):
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if (cur_batch_size >= batch_size):
(yield self.get_rollout(batch))
batch.clear()
cur_batch_size = 0
if batch:
(yield self.get_rollout(batch))
else:
for b in gen_shuffled_batch_idcs(batch_size, self.length):
(yield self[b]) | 1,159,818,307,064,741,400 | Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch. | mushroom_rl/core/parallelization_tools/step_sequence.py | split_shuffled_batches | nifunk/GNNMushroomRL | python | def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool]=False):
'\n Batch generation. Split the step collection into random mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch\n :param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.\n However, the size of the returned batches cannot be strictly maintained in this case.\n\n .. note::\n This method is also supposed to be called for recurrent networks, which have a different `evaluate()`\n method that recognized where the rollouts end within a batch.\n '
if (batch_size >= self.length):
(yield self)
elif (complete_rollouts and self.continuous):
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if (cur_batch_size >= batch_size):
(yield self.get_rollout(batch))
batch.clear()
cur_batch_size = 0
if batch:
(yield self.get_rollout(batch))
else:
for b in gen_shuffled_batch_idcs(batch_size, self.length):
(yield self[b]) |
def undiscounted_return(self) -> float:
'\n Compute the undiscounted return.\n\n :return: sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The StepSequence must be a single continuous rollout.')
return self.rewards.sum() | -7,107,731,319,835,752,000 | Compute the undiscounted return.
:return: sum of rewards | mushroom_rl/core/parallelization_tools/step_sequence.py | undiscounted_return | nifunk/GNNMushroomRL | python | def undiscounted_return(self) -> float:
'\n Compute the undiscounted return.\n\n :return: sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The StepSequence must be a single continuous rollout.')
return self.rewards.sum() |
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
'\n Compute the discounted return.\n\n :param gamma: temporal discount factor\n :return: exponentially weighted sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The StepSequence must be a single continuous rollout.')
if (not (0 <= gamma <= 1)):
raise pyrado.ValueErr(given=gamma, ge_constraint='0', le_constraint='1')
if (self.data_format == 'torch'):
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length))) | -144,263,897,106,388,860 | Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards | mushroom_rl/core/parallelization_tools/step_sequence.py | discounted_return | nifunk/GNNMushroomRL | python | def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
'\n Compute the discounted return.\n\n :param gamma: temporal discount factor\n :return: exponentially weighted sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The StepSequence must be a single continuous rollout.')
if (not (0 <= gamma <= 1)):
raise pyrado.ValueErr(given=gamma, ge_constraint='0', le_constraint='1')
if (self.data_format == 'torch'):
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length))) |
@classmethod
def concat(cls, parts: Sequence['StepSequence'], data_format: Optional[str]=None, truncate_last: Optional[bool]=True):
'\n Concatenate multiple step sequences into one, truncating the last observation.\n\n :param parts: batch of sequences to concatenate\n :param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically\n :param truncate_last: remove the last step from each part, highly recommended to be `True`\n :return: concatenated sequence of `Steps`\n '
data_names = parts[0].data_names
if (data_format is None):
data_format = parts[0].data_format
data = {name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format) for name in data_names}
done = np.concatenate([ro.done for ro in parts])
continuous = all((ro.continuous for ro in parts))
rollout_bounds = None
if continuous:
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend((ro.rollout_bounds[1:] + acc_len))
acc_len += ro.rollout_bounds[(- 1)]
return StepSequence(data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data) | -2,580,023,898,341,784,000 | Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps` | mushroom_rl/core/parallelization_tools/step_sequence.py | concat | nifunk/GNNMushroomRL | python | @classmethod
def concat(cls, parts: Sequence['StepSequence'], data_format: Optional[str]=None, truncate_last: Optional[bool]=True):
'\n Concatenate multiple step sequences into one, truncating the last observation.\n\n :param parts: batch of sequences to concatenate\n :param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically\n :param truncate_last: remove the last step from each part, highly recommended to be `True`\n :return: concatenated sequence of `Steps`\n '
data_names = parts[0].data_names
if (data_format is None):
data_format = parts[0].data_format
data = {name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format) for name in data_names}
done = np.concatenate([ro.done for ro in parts])
continuous = all((ro.continuous for ro in parts))
rollout_bounds = None
if continuous:
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend((ro.rollout_bounds[1:] + acc_len))
acc_len += ro.rollout_bounds[(- 1)]
return StepSequence(data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data) |
@classmethod
def process_data(cls, rollout: 'StepSequence', fcn: Callable, fcn_arg_name: str, fcn_arg_types: Union[(type, Tuple[type])]=np.ndarray, include_fields: Sequence[str]=None, exclude_fields: Sequence[str]=None, **process_fcn_kwargs):
'\n Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.\n\n :param rollout: `StepSequence` holding the data\n :param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`\n :param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`\n :param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`\n :param include_fields: list of field names to include for processing, pass `None` to not include everything.\n If specified, only fields from this selection will be considered\n :param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything\n :param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`\n :return: new `StepSequence` instance with processed data\n '
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
' Wrap the processing function to call it recursivelyy for nested data structures. '
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for (key, value) in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
for (idx, item) in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
data_dict = dict()
include_fields = (include_fields or rollout.data_names)
exclude_fields = (exclude_fields or [])
for name in rollout.data_names:
data = rollout.get_data_values(name)
if ((name in include_fields) and (name not in exclude_fields)):
data = recursive_wrapper(data, **process_fcn_kwargs)
data_dict[name] = data
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous) | -3,135,531,648,346,896,400 | Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data | mushroom_rl/core/parallelization_tools/step_sequence.py | process_data | nifunk/GNNMushroomRL | python | @classmethod
def process_data(cls, rollout: 'StepSequence', fcn: Callable, fcn_arg_name: str, fcn_arg_types: Union[(type, Tuple[type])]=np.ndarray, include_fields: Sequence[str]=None, exclude_fields: Sequence[str]=None, **process_fcn_kwargs):
'\n Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.\n\n :param rollout: `StepSequence` holding the data\n :param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`\n :param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`\n :param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`\n :param include_fields: list of field names to include for processing, pass `None` to not include everything.\n If specified, only fields from this selection will be considered\n :param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything\n :param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`\n :return: new `StepSequence` instance with processed data\n '
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
' Wrap the processing function to call it recursivelyy for nested data structures. '
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for (key, value) in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
for (idx, item) in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
data_dict = dict()
include_fields = (include_fields or rollout.data_names)
exclude_fields = (exclude_fields or [])
for name in rollout.data_names:
data = rollout.get_data_values(name)
if ((name in include_fields) and (name not in exclude_fields)):
data = recursive_wrapper(data, **process_fcn_kwargs)
data_dict[name] = data
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous) |
def _next_value(step: Step) -> float:
' Helper to return `next_value = 0` for last step '
if step.done:
return 0.0
return step.next_value | -70,958,724,919,454,110 | Helper to return `next_value = 0` for last step | mushroom_rl/core/parallelization_tools/step_sequence.py | _next_value | nifunk/GNNMushroomRL | python | def _next_value(step: Step) -> float:
' '
if step.done:
return 0.0
return step.next_value |
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
' Wrap the processing function to call it recursivelyy for nested data structures. '
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for (key, value) in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
for (idx, item) in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp | 1,204,677,687,032,972,500 | Wrap the processing function to call it recursivelyy for nested data structures. | mushroom_rl/core/parallelization_tools/step_sequence.py | recursive_wrapper | nifunk/GNNMushroomRL | python | @functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
' '
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for (key, value) in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
for (idx, item) in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp |
def make_scoped_cache(scope):
'Create a new scoped cache.\n\n In most cases the global cache should not be used directly but rather\n with a scope depending on the module a cache is used for. This is\n especially important when passing user-provided data as the cache key\n to prevent reading other unrelated cache keys.\n '
return ScopedCache(cache, scope) | 4,806,206,535,844,877,000 | Create a new scoped cache.
In most cases the global cache should not be used directly but rather
with a scope depending on the module a cache is used for. This is
especially important when passing user-provided data as the cache key
to prevent reading other unrelated cache keys. | indico/core/cache.py | make_scoped_cache | errikos/indico | python | def make_scoped_cache(scope):
'Create a new scoped cache.\n\n In most cases the global cache should not be used directly but rather\n with a scope depending on the module a cache is used for. This is\n especially important when passing user-provided data as the cache key\n to prevent reading other unrelated cache keys.\n '
return ScopedCache(cache, scope) |
def set_detector_value(self, kwargs_list: list):
' Only allow changes to confidence or the model '
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if (field in ['detector_confidence', 'detector_model']):
logger.info(f'{self.detector_name}: setting value: {field}: {value}')
self.monitor_config[field] = value
except Exception as e:
logger.error(f'{self.__class__.__name__}: Error setting value: {e}') | -4,545,308,264,092,963,000 | Only allow changes to confidence or the model | traffic_monitor/services/detectors/detector_cvlib.py | set_detector_value | mcdomx/monitor | python | def set_detector_value(self, kwargs_list: list):
' '
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if (field in ['detector_confidence', 'detector_model']):
logger.info(f'{self.detector_name}: setting value: {field}: {value}')
self.monitor_config[field] = value
except Exception as e:
logger.error(f'{self.__class__.__name__}: Error setting value: {e}') |
def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.\n\n Reference: `"Faster R-CNN: Towards Real-Time Object Detection with\n Region Proposal Networks" <https://arxiv.org/abs/1506.01497>`_.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows, where ``N`` is the number of detections:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (``Int64Tensor[N]``): the predicted labels for each detection\n - scores (``Tensor[N]``): the scores of each detection\n\n For more details on the output, you may refer to :ref:`instance_seg_output`.\n\n Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n >>> # For training\n >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)\n >>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]\n >>> labels = torch.randint(1, 91, (4, 11))\n >>> images = list(image for image in images)\n >>> targets = []\n >>> for i in range(len(images)):\n >>> d = {}\n >>> d[\'boxes\'] = boxes[i]\n >>> d[\'labels\'] = labels[i]\n >>> targets.append(d)\n >>> output = model(images, targets)\n >>> # For inference\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n >>>\n >>> # optionally, if you want to export the model to ONNX:\n >>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n '
is_trained = (pretrained or pretrained_backbone)
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = (misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d)
if pretrained:
pretrained_backbone = False
backbone = resnet50(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = FasterRCNN(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'], progress=progress)
model.load_state_dict(state_dict)
overwrite_eps(model, 0.0)
return model | 1,192,879,758,170,599,200 | Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.
Reference: `"Faster R-CNN: Towards Real-Time Object Detection with
Region Proposal Networks" <https://arxiv.org/abs/1506.01497>`_.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending if it is in training or evaluation mode.
During training, the model expects both the input tensors, as well as a targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each detection
- scores (``Tensor[N]``): the scores of each detection
For more details on the output, you may refer to :ref:`instance_seg_output`.
Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
Example::
>>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
>>> # For training
>>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)
>>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]
>>> labels = torch.randint(1, 91, (4, 11))
>>> images = list(image for image in images)
>>> targets = []
>>> for i in range(len(images)):
>>> d = {}
>>> d['boxes'] = boxes[i]
>>> d['labels'] = labels[i]
>>> targets.append(d)
>>> output = model(images, targets)
>>> # For inference
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
>>>
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3. | torchvision/models/detection/faster_rcnn.py | fasterrcnn_resnet50_fpn | Bethhhh/vision | python | def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.\n\n Reference: `"Faster R-CNN: Towards Real-Time Object Detection with\n Region Proposal Networks" <https://arxiv.org/abs/1506.01497>`_.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows, where ``N`` is the number of detections:\n\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with\n ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.\n - labels (``Int64Tensor[N]``): the predicted labels for each detection\n - scores (``Tensor[N]``): the scores of each detection\n\n For more details on the output, you may refer to :ref:`instance_seg_output`.\n\n Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n >>> # For training\n >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)\n >>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]\n >>> labels = torch.randint(1, 91, (4, 11))\n >>> images = list(image for image in images)\n >>> targets = []\n >>> for i in range(len(images)):\n >>> d = {}\n >>> d[\'boxes\'] = boxes[i]\n >>> d[\'labels\'] = labels[i]\n >>> targets.append(d)\n >>> output = model(images, targets)\n >>> # For inference\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n >>>\n >>> # optionally, if you want to export the model to ONNX:\n >>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n '
is_trained = (pretrained or pretrained_backbone)
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = (misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d)
if pretrained:
pretrained_backbone = False
backbone = resnet50(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = FasterRCNN(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'], progress=progress)
model.load_state_dict(state_dict)
overwrite_eps(model, 0.0)
return model |
def fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See\n :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more\n details.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n '
weights_name = 'fasterrcnn_mobilenet_v3_large_320_fpn_coco'
defaults = {'min_size': 320, 'max_size': 640, 'rpn_pre_nms_top_n_test': 150, 'rpn_post_nms_top_n_test': 150, 'rpn_score_thresh': 0.05}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=pretrained, progress=progress, num_classes=num_classes, pretrained_backbone=pretrained_backbone, trainable_backbone_layers=trainable_backbone_layers, **kwargs) | -6,396,726,167,147,814,000 | Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3. | torchvision/models/detection/faster_rcnn.py | fasterrcnn_mobilenet_v3_large_320_fpn | Bethhhh/vision | python | def fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See\n :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more\n details.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n '
weights_name = 'fasterrcnn_mobilenet_v3_large_320_fpn_coco'
defaults = {'min_size': 320, 'max_size': 640, 'rpn_pre_nms_top_n_test': 150, 'rpn_post_nms_top_n_test': 150, 'rpn_score_thresh': 0.05}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=pretrained, progress=progress, num_classes=num_classes, pretrained_backbone=pretrained_backbone, trainable_backbone_layers=trainable_backbone_layers, **kwargs) |
def fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See\n :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more\n details.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n '
weights_name = 'fasterrcnn_mobilenet_v3_large_fpn_coco'
defaults = {'rpn_score_thresh': 0.05}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=pretrained, progress=progress, num_classes=num_classes, pretrained_backbone=pretrained_backbone, trainable_backbone_layers=trainable_backbone_layers, **kwargs) | 1,931,367,722,196,547,600 | Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3. | torchvision/models/detection/faster_rcnn.py | fasterrcnn_mobilenet_v3_large_fpn | Bethhhh/vision | python | def fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See\n :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more\n details.\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): number of output classes of the model (including the background)\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is\n passed (the default) this value is set to 3.\n '
weights_name = 'fasterrcnn_mobilenet_v3_large_fpn_coco'
defaults = {'rpn_score_thresh': 0.05}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=pretrained, progress=progress, num_classes=num_classes, pretrained_backbone=pretrained_backbone, trainable_backbone_layers=trainable_backbone_layers, **kwargs) |
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
'Permit Admins to pause the server.'
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202) | -3,257,034,508,495,823,400 | Permit Admins to pause the server. | nova/api/openstack/compute/plugins/v3/pause_server.py | _pause | PFZheng/nova | python | @extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202) |
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
'Permit Admins to unpause the server.'
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202) | 8,663,507,411,384,678,000 | Permit Admins to unpause the server. | nova/api/openstack/compute/plugins/v3/pause_server.py | _unpause | PFZheng/nova | python | @extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error, 'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202) |
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"Wait until the value returned by predicate is not False or\n the timeout is elapsed.\n 'predicate' takes the driver as argument.\n "
if (not timeout):
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(predicate) | 3,087,956,966,512,135,000 | Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument. | openstack-dashboard/openstack_dashboard/test/integration_tests/basewebobject.py | _wait_until | JerryDog/horizon-f-road | python | def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"Wait until the value returned by predicate is not False or\n the timeout is elapsed.\n 'predicate' takes the driver as argument.\n "
if (not timeout):
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(predicate) |
def _wait_till_text_present_in_element(self, element, text, timeout=None):
'Waiting for a text to appear in a certain element very often is\n actually waiting for a _different_ element with a different text to\n appear in place of an old element. So a way to avoid capturing stale\n element reference should be provided for this use case.\n\n Better to wrap getting entity status cell in a lambda\n to avoid problems with cell being replaced with totally different\n element by Javascript\n '
def predicate(_):
elt = (element() if hasattr(element, '__call__') else element)
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout) | 6,722,385,146,241,805,000 | Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
Better to wrap getting entity status cell in a lambda
to avoid problems with cell being replaced with totally different
element by Javascript | openstack-dashboard/openstack_dashboard/test/integration_tests/basewebobject.py | _wait_till_text_present_in_element | JerryDog/horizon-f-road | python | def _wait_till_text_present_in_element(self, element, text, timeout=None):
'Waiting for a text to appear in a certain element very often is\n actually waiting for a _different_ element with a different text to\n appear in place of an old element. So a way to avoid capturing stale\n element reference should be provided for this use case.\n\n Better to wrap getting entity status cell in a lambda\n to avoid problems with cell being replaced with totally different\n element by Javascript\n '
def predicate(_):
elt = (element() if hasattr(element, '__call__') else element)
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout) |
def airflow_test_suite():
'Test suite for Airflow tests'
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite | -580,689,423,026,202,600 | Test suite for Airflow tests | setup.py | airflow_test_suite | 312day/airflow | python | def airflow_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite |
def git_version(version_: str) -> str:
'\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a \'release:{version}\' prefix\n and the latter with a \'dev0\' prefix. Following the prefix will be a sha of the current\n branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n '
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
return '.release:{version}+{sha}'.format(version=version_, sha=sha)
else:
return 'no_git_version' | 1,922,320,437,300,587,000 | Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str | setup.py | git_version | 312day/airflow | python | def git_version(version_: str) -> str:
'\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a \'release:{version}\' prefix\n and the latter with a \'dev0\' prefix. Following the prefix will be a sha of the current\n branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n '
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
return '.release:{version}+{sha}'.format(version=version_, sha=sha)
else:
return 'no_git_version' |
def write_version(filename: str=os.path.join(*[my_dir, 'airflow', 'git_version'])):
'\n Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".\n\n :param str filename: Destination file to write\n '
text = '{}'.format(git_version(version))
with open(filename, 'w') as file:
file.write(text) | 2,068,128,176,971,055,900 | Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write | setup.py | write_version | 312day/airflow | python | def write_version(filename: str=os.path.join(*[my_dir, 'airflow', 'git_version'])):
'\n Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".\n\n :param str filename: Destination file to write\n '
text = '{}'.format(git_version(version))
with open(filename, 'w') as file:
file.write(text) |
def is_package_excluded(package: str, exclusion_list: List[str]):
'\n Checks if package should be excluded.\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n '
return any([package.startswith(excluded_package) for excluded_package in exclusion_list]) | -7,323,985,351,570,894,000 | Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded | setup.py | is_package_excluded | 312day/airflow | python | def is_package_excluded(package: str, exclusion_list: List[str]):
'\n Checks if package should be excluded.\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n '
return any([package.startswith(excluded_package) for excluded_package in exclusion_list]) |
def do_setup():
'Perform the Airflow package setup.'
write_version()
setup(name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', long_description=long_description, long_description_content_type='text/markdown', license='Apache License 2.0', version=version, packages=find_packages(include=['airflow', 'airflow.*']), package_data={'airflow': ['py.typed'], '': ['airflow/alembic.ini', 'airflow/git_version', '*.ipynb', 'airflow/providers/cncf/kubernetes/example_dags/*.yaml'], 'airflow.api_connexion.openapi': ['*.yaml'], 'airflow.serialization': ['*.json']}, include_package_data=True, zip_safe=False, entry_points={'console_scripts': ['airflow = airflow.__main__:main']}, install_requires=INSTALL_REQUIREMENTS, setup_requires=['bowler', 'docutils', 'gitpython', 'setuptools', 'wheel'], extras_require=EXTRAS_REQUIREMENTS, classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: System :: Monitoring'], author='Apache Software Foundation', author_email='[email protected]', url='http://airflow.apache.org/', download_url=('https://dist.apache.org/repos/dist/release/airflow/' + version), cmdclass={'extra_clean': CleanCommand, 'compile_assets': CompileAssets, 'list_extras': ListExtras}, test_suite='setup.airflow_test_suite', python_requires='~=3.6') | 2,453,606,635,205,621,000 | Perform the Airflow package setup. | setup.py | do_setup | 312day/airflow | python | def do_setup():
write_version()
setup(name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', long_description=long_description, long_description_content_type='text/markdown', license='Apache License 2.0', version=version, packages=find_packages(include=['airflow', 'airflow.*']), package_data={'airflow': ['py.typed'], : ['airflow/alembic.ini', 'airflow/git_version', '*.ipynb', 'airflow/providers/cncf/kubernetes/example_dags/*.yaml'], 'airflow.api_connexion.openapi': ['*.yaml'], 'airflow.serialization': ['*.json']}, include_package_data=True, zip_safe=False, entry_points={'console_scripts': ['airflow = airflow.__main__:main']}, install_requires=INSTALL_REQUIREMENTS, setup_requires=['bowler', 'docutils', 'gitpython', 'setuptools', 'wheel'], extras_require=EXTRAS_REQUIREMENTS, classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: System :: Monitoring'], author='Apache Software Foundation', author_email='[email protected]', url='http://airflow.apache.org/', download_url=('https://dist.apache.org/repos/dist/release/airflow/' + version), cmdclass={'extra_clean': CleanCommand, 'compile_assets': CompileAssets, 'list_extras': ListExtras}, test_suite='setup.airflow_test_suite', python_requires='~=3.6') |
def initialize_options(self):
'Set default values for options.' | 953,287,520,272,231,300 | Set default values for options. | setup.py | initialize_options | 312day/airflow | python | def initialize_options(self):
|
def finalize_options(self):
'Set final values for options.' | 1,898,983,747,956,181,200 | Set final values for options. | setup.py | finalize_options | 312day/airflow | python | def finalize_options(self):
|
def run(self):
'Run command to remove temporary files and directories.'
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info') | -9,137,811,618,676,238,000 | Run command to remove temporary files and directories. | setup.py | run | 312day/airflow | python | def run(self):
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info') |
def initialize_options(self):
'Set default values for options.' | 953,287,520,272,231,300 | Set default values for options. | setup.py | initialize_options | 312day/airflow | python | def initialize_options(self):
|
def finalize_options(self):
'Set final values for options.' | 1,898,983,747,956,181,200 | Set final values for options. | setup.py | finalize_options | 312day/airflow | python | def finalize_options(self):
|
def run(self):
'Run a command to compile and build assets.'
subprocess.check_call('./airflow/www/compile_assets.sh') | -811,770,318,344,817,200 | Run a command to compile and build assets. | setup.py | run | 312day/airflow | python | def run(self):
subprocess.check_call('./airflow/www/compile_assets.sh') |
def initialize_options(self):
'Set default values for options.' | 953,287,520,272,231,300 | Set default values for options. | setup.py | initialize_options | 312day/airflow | python | def initialize_options(self):
|
def finalize_options(self):
'Set final values for options.' | 1,898,983,747,956,181,200 | Set final values for options. | setup.py | finalize_options | 312day/airflow | python | def finalize_options(self):
|
def run(self):
'List extras.'
print('\n'.join(wrap(', '.join(EXTRAS_REQUIREMENTS.keys()), 100))) | 946,835,777,747,823,000 | List extras. | setup.py | run | 312day/airflow | python | def run(self):
print('\n'.join(wrap(', '.join(EXTRAS_REQUIREMENTS.keys()), 100))) |
def git_ignored(file: Path) -> bool:
'Returns true if this file is in a Git repo and ignored by that repo.\n\n Returns true for ignored files that were manually added to a repo.\n '
file = file.resolve()
directory = file.parent
while True:
try:
returncode = subprocess.run(['git', 'check-ignore', '--quiet', '--no-index', file], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, cwd=directory).returncode
return (returncode in (0, 128))
except FileNotFoundError:
if (directory == directory.parent):
return False
directory = directory.parent | -2,774,505,125,595,756,000 | Returns true if this file is in a Git repo and ignored by that repo.
Returns true for ignored files that were manually added to a repo. | pw_watch/py/pw_watch/watch.py | git_ignored | isabella232/pigweed | python | def git_ignored(file: Path) -> bool:
'Returns true if this file is in a Git repo and ignored by that repo.\n\n Returns true for ignored files that were manually added to a repo.\n '
file = file.resolve()
directory = file.parent
while True:
try:
returncode = subprocess.run(['git', 'check-ignore', '--quiet', '--no-index', file], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, cwd=directory).returncode
return (returncode in (0, 128))
except FileNotFoundError:
if (directory == directory.parent):
return False
directory = directory.parent |
def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
'Sets up an argument parser for pw watch.'
parser.add_argument('--patterns', help=(_WATCH_PATTERN_DELIMITER + '-delimited list of globs to watch to trigger recompile'), default=_WATCH_PATTERN_DELIMITER.join(_WATCH_PATTERNS))
parser.add_argument('--ignore_patterns', dest='ignore_patterns_string', help=(_WATCH_PATTERN_DELIMITER + '-delimited list of globs to ignore events from'))
parser.add_argument('--exclude_list', nargs='+', type=Path, help='directories to ignore during pw watch', default=[])
parser.add_argument('--no-restart', dest='restart', action='store_false', help='do not restart ongoing builds if files change')
parser.add_argument('default_build_targets', nargs='*', metavar='target', default=[], help='Automatically locate a build directory and build these targets. For example, `host docs` searches for a Ninja build directory (starting with out/) and builds the `host` and `docs` targets. To specify one or more directories, ust the -C / --build_directory option.')
parser.add_argument('-C', '--build_directory', dest='build_directories', nargs='+', action='append', default=[], metavar=('directory', 'target'), help='Specify a build directory and optionally targets to build. `pw watch -C out tgt` is equivalent to `ninja -C out tgt`') | 8,633,480,375,014,321,000 | Sets up an argument parser for pw watch. | pw_watch/py/pw_watch/watch.py | add_parser_arguments | isabella232/pigweed | python | def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--patterns', help=(_WATCH_PATTERN_DELIMITER + '-delimited list of globs to watch to trigger recompile'), default=_WATCH_PATTERN_DELIMITER.join(_WATCH_PATTERNS))
parser.add_argument('--ignore_patterns', dest='ignore_patterns_string', help=(_WATCH_PATTERN_DELIMITER + '-delimited list of globs to ignore events from'))
parser.add_argument('--exclude_list', nargs='+', type=Path, help='directories to ignore during pw watch', default=[])
parser.add_argument('--no-restart', dest='restart', action='store_false', help='do not restart ongoing builds if files change')
parser.add_argument('default_build_targets', nargs='*', metavar='target', default=[], help='Automatically locate a build directory and build these targets. For example, `host docs` searches for a Ninja build directory (starting with out/) and builds the `host` and `docs` targets. To specify one or more directories, ust the -C / --build_directory option.')
parser.add_argument('-C', '--build_directory', dest='build_directories', nargs='+', action='append', default=[], metavar=('directory', 'target'), help='Specify a build directory and optionally targets to build. `pw watch -C out tgt` is equivalent to `ninja -C out tgt`') |
def minimal_watch_directories(to_watch: Path, to_exclude: Iterable[Path]):
'Determine which subdirectory to watch recursively'
try:
to_watch = Path(to_watch)
except TypeError:
assert False, 'Please watch one directory at a time.'
directories_to_exclude: List[Path] = [to_watch.joinpath(directory_to_exclude) for directory_to_exclude in to_exclude if to_watch.joinpath(directory_to_exclude).is_dir()]
exclude_dir_parents = {to_watch}
for directory_to_exclude in directories_to_exclude:
parts = list(Path(directory_to_exclude).relative_to(to_watch).parts)[:(- 1)]
dir_tmp = to_watch
for part in parts:
dir_tmp = Path(dir_tmp, part)
exclude_dir_parents.add(dir_tmp)
for directory in exclude_dir_parents:
dir_path = Path(directory)
(yield (dir_path, False))
for item in Path(directory).iterdir():
if (item.is_dir() and (item not in exclude_dir_parents) and (item not in directories_to_exclude)):
(yield (item, True)) | 8,227,061,236,398,482,000 | Determine which subdirectory to watch recursively | pw_watch/py/pw_watch/watch.py | minimal_watch_directories | isabella232/pigweed | python | def minimal_watch_directories(to_watch: Path, to_exclude: Iterable[Path]):
try:
to_watch = Path(to_watch)
except TypeError:
assert False, 'Please watch one directory at a time.'
directories_to_exclude: List[Path] = [to_watch.joinpath(directory_to_exclude) for directory_to_exclude in to_exclude if to_watch.joinpath(directory_to_exclude).is_dir()]
exclude_dir_parents = {to_watch}
for directory_to_exclude in directories_to_exclude:
parts = list(Path(directory_to_exclude).relative_to(to_watch).parts)[:(- 1)]
dir_tmp = to_watch
for part in parts:
dir_tmp = Path(dir_tmp, part)
exclude_dir_parents.add(dir_tmp)
for directory in exclude_dir_parents:
dir_path = Path(directory)
(yield (dir_path, False))
for item in Path(directory).iterdir():
if (item.is_dir() and (item not in exclude_dir_parents) and (item not in directories_to_exclude)):
(yield (item, True)) |
def get_common_excludes() -> List[Path]:
'Find commonly excluded directories, and return them as a [Path]'
exclude_list: List[Path] = []
typical_ignored_directories: List[str] = ['.environment', '.presubmit', '.git', '.mypy_cache', '.cargo', 'environment', 'out']
pw_root_dir = Path(os.environ['PW_ROOT'])
exclude_list.extend(((pw_root_dir / ignored_directory) for ignored_directory in typical_ignored_directories))
pw_project_root_dir = Path(os.environ['PW_PROJECT_ROOT'])
if (pw_project_root_dir != pw_root_dir):
exclude_list.extend(((pw_project_root_dir / ignored_directory) for ignored_directory in typical_ignored_directories))
legacy_directories = ['.cipd', '.python3-venv']
found_legacy = False
for legacy_directory in legacy_directories:
full_legacy_directory = (pw_root_dir / legacy_directory)
if full_legacy_directory.is_dir():
_LOG.warning('Legacy environment directory found: %s', str(full_legacy_directory))
exclude_list.append(full_legacy_directory)
found_legacy = True
if found_legacy:
_LOG.warning('Found legacy environment directory(s); these should be deleted')
return exclude_list | -5,883,953,771,426,845,000 | Find commonly excluded directories, and return them as a [Path] | pw_watch/py/pw_watch/watch.py | get_common_excludes | isabella232/pigweed | python | def get_common_excludes() -> List[Path]:
exclude_list: List[Path] = []
typical_ignored_directories: List[str] = ['.environment', '.presubmit', '.git', '.mypy_cache', '.cargo', 'environment', 'out']
pw_root_dir = Path(os.environ['PW_ROOT'])
exclude_list.extend(((pw_root_dir / ignored_directory) for ignored_directory in typical_ignored_directories))
pw_project_root_dir = Path(os.environ['PW_PROJECT_ROOT'])
if (pw_project_root_dir != pw_root_dir):
exclude_list.extend(((pw_project_root_dir / ignored_directory) for ignored_directory in typical_ignored_directories))
legacy_directories = ['.cipd', '.python3-venv']
found_legacy = False
for legacy_directory in legacy_directories:
full_legacy_directory = (pw_root_dir / legacy_directory)
if full_legacy_directory.is_dir():
_LOG.warning('Legacy environment directory found: %s', str(full_legacy_directory))
exclude_list.append(full_legacy_directory)
found_legacy = True
if found_legacy:
_LOG.warning('Found legacy environment directory(s); these should be deleted')
return exclude_list |
def _find_build_dir(default_build_dir: Path=Path('out')) -> Optional[Path]:
'Searches for a build directory, returning the first it finds.'
if default_build_dir.joinpath('build.ninja').exists():
return default_build_dir
for path in default_build_dir.glob('**/build.ninja'):
return path.parent
for path in Path.cwd().glob('**/build.ninja'):
return path.parent
return None | 3,079,257,728,143,472,600 | Searches for a build directory, returning the first it finds. | pw_watch/py/pw_watch/watch.py | _find_build_dir | isabella232/pigweed | python | def _find_build_dir(default_build_dir: Path=Path('out')) -> Optional[Path]:
if default_build_dir.joinpath('build.ninja').exists():
return default_build_dir
for path in default_build_dir.glob('**/build.ninja'):
return path.parent
for path in Path.cwd().glob('**/build.ninja'):
return path.parent
return None |
def watch(default_build_targets: List[str], build_directories: List[str], patterns: str, ignore_patterns_string: str, exclude_list: List[Path], restart: bool):
'Watches files and runs Ninja commands when they change.'
_LOG.info('Starting Pigweed build watcher')
if (os.environ['PW_ROOT'] is None):
_exit_due_to_pigweed_not_installed()
pw_root = Path(os.environ['PW_ROOT']).resolve()
if (Path.cwd().resolve() not in [pw_root, *pw_root.parents]):
_exit_due_to_pigweed_not_installed()
exclude_list += get_common_excludes()
build_commands = [BuildCommand(Path(build_dir[0]), tuple(build_dir[1:])) for build_dir in build_directories]
if (default_build_targets or (not build_directories)):
build_dir = _find_build_dir()
if (build_dir is None):
_die("No build dirs found. Did you forget to run 'gn gen out'?")
build_commands.append(BuildCommand(build_dir, tuple(default_build_targets)))
for (i, build_target) in enumerate(build_commands, 1):
if (not build_target.build_dir.is_dir()):
_die("Build directory doesn't exist: %s", build_target)
else:
_LOG.info('Will build [%d/%d]: %s', i, len(build_commands), build_target)
_LOG.debug('Patterns: %s', patterns)
path_to_log = str(Path().resolve()).replace(str(Path.home()), '$HOME')
ignore_patterns = (ignore_patterns_string.split(_WATCH_PATTERN_DELIMITER) if ignore_patterns_string else [])
env = pw_cli.env.pigweed_environment()
if env.PW_EMOJI:
charset = _EMOJI_CHARSET
else:
charset = _ASCII_CHARSET
event_handler = PigweedBuildWatcher(patterns=patterns.split(_WATCH_PATTERN_DELIMITER), ignore_patterns=ignore_patterns, build_commands=build_commands, charset=charset, restart=restart)
try:
_LOG.info('Attaching filesystem watcher to %s/...', path_to_log)
observers = []
for (path, rec) in minimal_watch_directories(Path.cwd(), exclude_list):
observer = Observer()
observer.schedule(event_handler, str(path), recursive=rec)
observer.start()
observers.append(observer)
event_handler.debouncer.press('Triggering initial build...')
for observer in observers:
while observer.is_alive():
observer.join(1)
except (KeyboardInterrupt, EOFError):
_exit_due_to_interrupt()
except OSError as err:
if (err.args[0] == _ERRNO_INOTIFY_LIMIT_REACHED):
_exit_due_to_inotify_limit()
else:
raise err
_LOG.critical('Should never get here')
observer.join() | -2,830,571,198,471,650,300 | Watches files and runs Ninja commands when they change. | pw_watch/py/pw_watch/watch.py | watch | isabella232/pigweed | python | def watch(default_build_targets: List[str], build_directories: List[str], patterns: str, ignore_patterns_string: str, exclude_list: List[Path], restart: bool):
_LOG.info('Starting Pigweed build watcher')
if (os.environ['PW_ROOT'] is None):
_exit_due_to_pigweed_not_installed()
pw_root = Path(os.environ['PW_ROOT']).resolve()
if (Path.cwd().resolve() not in [pw_root, *pw_root.parents]):
_exit_due_to_pigweed_not_installed()
exclude_list += get_common_excludes()
build_commands = [BuildCommand(Path(build_dir[0]), tuple(build_dir[1:])) for build_dir in build_directories]
if (default_build_targets or (not build_directories)):
build_dir = _find_build_dir()
if (build_dir is None):
_die("No build dirs found. Did you forget to run 'gn gen out'?")
build_commands.append(BuildCommand(build_dir, tuple(default_build_targets)))
for (i, build_target) in enumerate(build_commands, 1):
if (not build_target.build_dir.is_dir()):
_die("Build directory doesn't exist: %s", build_target)
else:
_LOG.info('Will build [%d/%d]: %s', i, len(build_commands), build_target)
_LOG.debug('Patterns: %s', patterns)
path_to_log = str(Path().resolve()).replace(str(Path.home()), '$HOME')
ignore_patterns = (ignore_patterns_string.split(_WATCH_PATTERN_DELIMITER) if ignore_patterns_string else [])
env = pw_cli.env.pigweed_environment()
if env.PW_EMOJI:
charset = _EMOJI_CHARSET
else:
charset = _ASCII_CHARSET
event_handler = PigweedBuildWatcher(patterns=patterns.split(_WATCH_PATTERN_DELIMITER), ignore_patterns=ignore_patterns, build_commands=build_commands, charset=charset, restart=restart)
try:
_LOG.info('Attaching filesystem watcher to %s/...', path_to_log)
observers = []
for (path, rec) in minimal_watch_directories(Path.cwd(), exclude_list):
observer = Observer()
observer.schedule(event_handler, str(path), recursive=rec)
observer.start()
observers.append(observer)
event_handler.debouncer.press('Triggering initial build...')
for observer in observers:
while observer.is_alive():
observer.join(1)
except (KeyboardInterrupt, EOFError):
_exit_due_to_interrupt()
except OSError as err:
if (err.args[0] == _ERRNO_INOTIFY_LIMIT_REACHED):
_exit_due_to_inotify_limit()
else:
raise err
_LOG.critical('Should never get here')
observer.join() |
def main() -> None:
'Watch files for changes and rebuild.'
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
watch(**vars(parser.parse_args())) | 5,897,164,643,106,543,000 | Watch files for changes and rebuild. | pw_watch/py/pw_watch/watch.py | main | isabella232/pigweed | python | def main() -> None:
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
watch(**vars(parser.parse_args())) |
def _path_matches(self, path: Path) -> bool:
'Returns true if path matches according to the watcher patterns'
return ((not any((path.match(x) for x in self.ignore_patterns))) and any((path.match(x) for x in self.patterns))) | 4,431,571,565,773,947,000 | Returns true if path matches according to the watcher patterns | pw_watch/py/pw_watch/watch.py | _path_matches | isabella232/pigweed | python | def _path_matches(self, path: Path) -> bool:
return ((not any((path.match(x) for x in self.ignore_patterns))) and any((path.match(x) for x in self.patterns))) |
def run(self) -> None:
'Run all the builds in serial and capture pass/fail for each.'
print('\x1bc', end='')
print(pw_cli.branding.banner())
print(_COLOR.green(' Watching for changes. Ctrl-C to exit; enter to rebuild'))
print()
_LOG.info('Change detected: %s', self.matching_path)
self.builds_succeeded = []
num_builds = len(self.build_commands)
_LOG.info('Starting build with %d directories', num_builds)
env = os.environ.copy()
env['PW_USE_COLOR'] = '1'
for (i, cmd) in enumerate(self.build_commands, 1):
_LOG.info('[%d/%d] Starting build: %s', i, num_builds, cmd)
print()
self._current_build = subprocess.Popen(['ninja', '-C', *cmd.args()], env=env)
returncode = self._current_build.wait()
print()
build_ok = (returncode == 0)
if build_ok:
level = logging.INFO
tag = '(OK)'
else:
level = logging.ERROR
tag = '(FAIL)'
_LOG.log(level, '[%d/%d] Finished build: %s %s', i, num_builds, cmd, tag)
self.builds_succeeded.append(build_ok) | -7,174,389,099,788,953,000 | Run all the builds in serial and capture pass/fail for each. | pw_watch/py/pw_watch/watch.py | run | isabella232/pigweed | python | def run(self) -> None:
print('\x1bc', end=)
print(pw_cli.branding.banner())
print(_COLOR.green(' Watching for changes. Ctrl-C to exit; enter to rebuild'))
print()
_LOG.info('Change detected: %s', self.matching_path)
self.builds_succeeded = []
num_builds = len(self.build_commands)
_LOG.info('Starting build with %d directories', num_builds)
env = os.environ.copy()
env['PW_USE_COLOR'] = '1'
for (i, cmd) in enumerate(self.build_commands, 1):
_LOG.info('[%d/%d] Starting build: %s', i, num_builds, cmd)
print()
self._current_build = subprocess.Popen(['ninja', '-C', *cmd.args()], env=env)
returncode = self._current_build.wait()
print()
build_ok = (returncode == 0)
if build_ok:
level = logging.INFO
tag = '(OK)'
else:
level = logging.ERROR
tag = '(FAIL)'
_LOG.log(level, '[%d/%d] Finished build: %s %s', i, num_builds, cmd, tag)
self.builds_succeeded.append(build_ok) |
def _JsonValueToPythonValue(json_value):
'Convert the given JsonValue to a json string.'
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()]
assigned_entries = [(f, value) for (f, value) in entries if (value is not None)]
(field, value) = assigned_entries[0]
if (not isinstance(field, messages.MessageField)):
return value
elif (field.message_type is JsonObject):
return _JsonObjectToPythonValue(value)
elif (field.message_type is JsonArray):
return _JsonArrayToPythonValue(value) | 231,432,454,391,992,060 | Convert the given JsonValue to a json string. | .install/.backup/lib/apitools/base/py/extra_types.py | _JsonValueToPythonValue | Technology-Hatchery/google-cloud-sdk | python | def _JsonValueToPythonValue(json_value):
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()]
assigned_entries = [(f, value) for (f, value) in entries if (value is not None)]
(field, value) = assigned_entries[0]
if (not isinstance(field, messages.MessageField)):
return value
elif (field.message_type is JsonObject):
return _JsonObjectToPythonValue(value)
elif (field.message_type is JsonArray):
return _JsonArrayToPythonValue(value) |
def _PythonValueToJsonValue(py_value):
'Convert the given python value to a JsonValue.'
if (py_value is None):
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, (int, long)):
if (_MININT64 < py_value < _MAXINT64):
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, collections.Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(('Cannot convert "%s" to JsonValue' % py_value)) | -8,058,865,423,543,755,000 | Convert the given python value to a JsonValue. | .install/.backup/lib/apitools/base/py/extra_types.py | _PythonValueToJsonValue | Technology-Hatchery/google-cloud-sdk | python | def _PythonValueToJsonValue(py_value):
if (py_value is None):
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, (int, long)):
if (_MININT64 < py_value < _MAXINT64):
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, collections.Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(('Cannot convert "%s" to JsonValue' % py_value)) |
def _EncodeInt64Field(field, value):
'Handle the special case of int64 as a string.'
capabilities = [messages.Variant.INT64, messages.Variant.UINT64]
if (field.variant not in capabilities):
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True) | 6,361,198,810,250,222,000 | Handle the special case of int64 as a string. | .install/.backup/lib/apitools/base/py/extra_types.py | _EncodeInt64Field | Technology-Hatchery/google-cloud-sdk | python | def _EncodeInt64Field(field, value):
capabilities = [messages.Variant.INT64, messages.Variant.UINT64]
if (field.variant not in capabilities):
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True) |
def vmin(*vectors):
'\n Retrieve the minimum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError('Expected an iterable of Vectors')
if (len(vectors) == 0):
raise ValueError('min() arg is an empty sequence')
ret = vectors[0]
for i in vectors[1:]:
if ((i < ret) or (i <= ret)):
ret = i
return ret | 1,086,249,124,194,980,200 | Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied | devito/ir/support/vector.py | vmin | rhodrin/devito | python | def vmin(*vectors):
'\n Retrieve the minimum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError('Expected an iterable of Vectors')
if (len(vectors) == 0):
raise ValueError('min() arg is an empty sequence')
ret = vectors[0]
for i in vectors[1:]:
if ((i < ret) or (i <= ret)):
ret = i
return ret |
def vmax(*vectors):
'\n Retrieve the maximum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError('Expected an iterable of Vectors')
if (len(vectors) == 0):
raise ValueError('min() arg is an empty sequence')
ret = vectors[0]
for i in vectors[1:]:
if ((i > ret) or (i >= ret)):
ret = i
return ret | 4,997,967,146,906,403,000 | Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied | devito/ir/support/vector.py | vmax | rhodrin/devito | python | def vmax(*vectors):
'\n Retrieve the maximum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError('Expected an iterable of Vectors')
if (len(vectors) == 0):
raise ValueError('min() arg is an empty sequence')
ret = vectors[0]
for i in vectors[1:]:
if ((i > ret) or (i >= ret)):
ret = i
return ret |
def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n The distance is a reflexive, transitive, and anti-symmetric relation,\n which establishes a total ordering amongst Vectors.\n\n The distance is a function [Vector x Vector --> D]. D is a tuple of length\n equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether\n the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or\n succeeds (> 0) the i-th component of ``other``, other_i.\n\n In particular, the *absolute value* of D_i represents the number of\n integer points that exist between self_i and sink_i.\n\n Examples\n --------\n | 3 | | 1 | | 2 |\n source = | 2 | , sink = | 4 | , distance => | -2 |\n | 1 | | 5 | | -4 |\n\n There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.\n '
return (self - other) | -7,095,503,812,526,716,000 | Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively. | devito/ir/support/vector.py | distance | rhodrin/devito | python | def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n The distance is a reflexive, transitive, and anti-symmetric relation,\n which establishes a total ordering amongst Vectors.\n\n The distance is a function [Vector x Vector --> D]. D is a tuple of length\n equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether\n the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or\n succeeds (> 0) the i-th component of ``other``, other_i.\n\n In particular, the *absolute value* of D_i represents the number of\n integer points that exist between self_i and sink_i.\n\n Examples\n --------\n | 3 | | 1 | | 2 |\n source = | 2 | , sink = | 4 | , distance => | -2 |\n | 1 | | 5 | | -4 |\n\n There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.\n '
return (self - other) |
@classmethod
def transpose(cls, *vectors):
'\n Transpose a matrix represented as an iterable of homogeneous LabeledVectors.\n '
if (len(vectors) == 0):
return LabeledVector()
if (not all((isinstance(v, LabeledVector) for v in vectors))):
raise ValueError(('All items must be of type LabeledVector, got [%s]' % ','.join((i.__class__.__name__ for i in vectors))))
T = OrderedDict()
for v in vectors:
for (l, i) in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple(((l, Vector(*i)) for (l, i) in T.items())) | 4,527,107,071,712,793,000 | Transpose a matrix represented as an iterable of homogeneous LabeledVectors. | devito/ir/support/vector.py | transpose | rhodrin/devito | python | @classmethod
def transpose(cls, *vectors):
'\n \n '
if (len(vectors) == 0):
return LabeledVector()
if (not all((isinstance(v, LabeledVector) for v in vectors))):
raise ValueError(('All items must be of type LabeledVector, got [%s]' % ','.join((i.__class__.__name__ for i in vectors))))
T = OrderedDict()
for v in vectors:
for (l, i) in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple(((l, Vector(*i)) for (l, i) in T.items())) |
@memoized_meth
def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n Parameters\n ----------\n other : LabeledVector\n The LabeledVector from which the distance is computed.\n '
if (not isinstance(other, LabeledVector)):
raise TypeError('Cannot compute distance from obj of type %s', type(other))
if (self.labels != other.labels):
raise TypeError('Cannot compute distance due to mismatching `labels`')
return LabeledVector(list(zip(self.labels, (self - other)))) | 7,381,341,259,469,386,000 | Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed. | devito/ir/support/vector.py | distance | rhodrin/devito | python | @memoized_meth
def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n Parameters\n ----------\n other : LabeledVector\n The LabeledVector from which the distance is computed.\n '
if (not isinstance(other, LabeledVector)):
raise TypeError('Cannot compute distance from obj of type %s', type(other))
if (self.labels != other.labels):
raise TypeError('Cannot compute distance due to mismatching `labels`')
return LabeledVector(list(zip(self.labels, (self - other)))) |
def generate_bin():
'Generate bin files.'
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')
parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')
parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')
args = parser.parse_args()
(feature, biases, _, _, _, _, y_test, test_mask) = load_and_process(args.data_dir, args.train_nodes_num, args.eval_nodes_num, args.test_nodes_num)
feature_path = os.path.join(args.result_path, '00_data')
biases_path = os.path.join(args.result_path, '01_data')
y_test_path = os.path.join(args.result_path, 'y_test.npy')
test_mask_path = os.path.join(args.result_path, 'test_mask.npy')
os.makedirs(feature_path)
os.makedirs(biases_path)
feature.tofile(os.path.join(feature_path, 'feature.bin'))
biases.tofile(os.path.join(biases_path, 'biases.bin'))
np.save(y_test_path, y_test)
np.save(test_mask_path, test_mask) | -6,830,304,606,836,681,000 | Generate bin files. | model_zoo/official/gnn/gat/preprocess.py | generate_bin | 233-puchi/mindspore | python | def generate_bin():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')
parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')
parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')
args = parser.parse_args()
(feature, biases, _, _, _, _, y_test, test_mask) = load_and_process(args.data_dir, args.train_nodes_num, args.eval_nodes_num, args.test_nodes_num)
feature_path = os.path.join(args.result_path, '00_data')
biases_path = os.path.join(args.result_path, '01_data')
y_test_path = os.path.join(args.result_path, 'y_test.npy')
test_mask_path = os.path.join(args.result_path, 'test_mask.npy')
os.makedirs(feature_path)
os.makedirs(biases_path)
feature.tofile(os.path.join(feature_path, 'feature.bin'))
biases.tofile(os.path.join(biases_path, 'biases.bin'))
np.save(y_test_path, y_test)
np.save(test_mask_path, test_mask) |
def test_encode_nibbles_variable_over_max() -> None:
'Variable field length is over maximum allowed'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
decoded = {'t': '0200', '2': '1234'}
with pytest.raises(iso8583.EncodeError, match='Field data is 8 nibbles, larger than maximum 4: field 2'):
iso8583.encode(decoded, spec=spec) | -3,900,687,380,426,086,400 | Variable field length is over maximum allowed | tests/test_nibbles.py | test_encode_nibbles_variable_over_max | knovichikhin/pyiso8583 | python | def test_encode_nibbles_variable_over_max() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
decoded = {'t': '0200', '2': '1234'}
with pytest.raises(iso8583.EncodeError, match='Field data is 8 nibbles, larger than maximum 4: field 2'):
iso8583.encode(decoded, spec=spec) |
def test_encode_nibbles_fixed_partial() -> None:
'Fixed field is provided partially'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
decoded = {'t': '0200', '2': '1'}
with pytest.raises(iso8583.EncodeError, match='Field data is 2 nibbles, expecting 4: field 2'):
iso8583.encode(decoded, spec=spec) | 6,831,563,969,507,959,000 | Fixed field is provided partially | tests/test_nibbles.py | test_encode_nibbles_fixed_partial | knovichikhin/pyiso8583 | python | def test_encode_nibbles_fixed_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
decoded = {'t': '0200', '2': '1'}
with pytest.raises(iso8583.EncodeError, match='Field data is 2 nibbles, expecting 4: field 2'):
iso8583.encode(decoded, spec=spec) |
def test_encode_nibbles_fixed_missing() -> None:
'Fixed field is missing'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
decoded = {'t': '0200', '2': ''}
with pytest.raises(iso8583.EncodeError, match='Field data is 0 nibbles, expecting 4: field 2'):
iso8583.encode(decoded, spec=spec) | -2,089,517,104,892,656,000 | Fixed field is missing | tests/test_nibbles.py | test_encode_nibbles_fixed_missing | knovichikhin/pyiso8583 | python | def test_encode_nibbles_fixed_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
decoded = {'t': '0200', '2': }
with pytest.raises(iso8583.EncodeError, match='Field data is 0 nibbles, expecting 4: field 2'):
iso8583.encode(decoded, spec=spec) |
def test_decode_nibbles_variable_over_max() -> None:
'Variable field length is over maximum allowed'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'02004000000000000000081234'
with pytest.raises(iso8583.DecodeError, match='Field data is 8 nibbles, larger than maximum 4: field 2 pos 20'):
iso8583.decode(s, spec=spec) | -2,357,995,680,247,013,000 | Variable field length is over maximum allowed | tests/test_nibbles.py | test_decode_nibbles_variable_over_max | knovichikhin/pyiso8583 | python | def test_decode_nibbles_variable_over_max() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'02004000000000000000081234'
with pytest.raises(iso8583.DecodeError, match='Field data is 8 nibbles, larger than maximum 4: field 2 pos 20'):
iso8583.decode(s, spec=spec) |
def test_decode_nibbles_variable_partial() -> None:
'Variable field is provided partially'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'02004000000000000000041'
with pytest.raises(iso8583.DecodeError, match='Field data is 2 nibbles, expecting 4: field 2 pos 22'):
iso8583.decode(s, spec=spec) | 3,606,540,515,098,440,700 | Variable field is provided partially | tests/test_nibbles.py | test_decode_nibbles_variable_partial | knovichikhin/pyiso8583 | python | def test_decode_nibbles_variable_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'02004000000000000000041'
with pytest.raises(iso8583.DecodeError, match='Field data is 2 nibbles, expecting 4: field 2 pos 22'):
iso8583.decode(s, spec=spec) |
def test_decode_nibbles_variable_missing() -> None:
'Variable field is missing'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'0200400000000000000004'
with pytest.raises(iso8583.DecodeError, match='Field data is 0 nibbles, expecting 4: field 2 pos 22'):
iso8583.decode(s, spec=spec) | -7,780,357,992,181,737,000 | Variable field is missing | tests/test_nibbles.py | test_decode_nibbles_variable_missing | knovichikhin/pyiso8583 | python | def test_decode_nibbles_variable_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'0200400000000000000004'
with pytest.raises(iso8583.DecodeError, match='Field data is 0 nibbles, expecting 4: field 2 pos 22'):
iso8583.decode(s, spec=spec) |
def test_decode_nibbles_fixed_partial() -> None:
'Fixed field is provided partially'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'020040000000000000001'
with pytest.raises(iso8583.DecodeError, match='Field data is 2 nibbles, expecting 4: field 2 pos 20'):
iso8583.decode(s, spec=spec) | -8,831,870,260,704,048,000 | Fixed field is provided partially | tests/test_nibbles.py | test_decode_nibbles_fixed_partial | knovichikhin/pyiso8583 | python | def test_decode_nibbles_fixed_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'020040000000000000001'
with pytest.raises(iso8583.DecodeError, match='Field data is 2 nibbles, expecting 4: field 2 pos 20'):
iso8583.decode(s, spec=spec) |
def test_decode_nibbles_fixed_missing() -> None:
'Fixed field is missing'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'02004000000000000000'
with pytest.raises(iso8583.DecodeError, match='Field data is 0 nibbles, expecting 4: field 2 pos 20'):
iso8583.decode(s, spec=spec) | 7,520,147,214,975,566,000 | Fixed field is missing | tests/test_nibbles.py | test_decode_nibbles_fixed_missing | knovichikhin/pyiso8583 | python | def test_decode_nibbles_fixed_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['len_count'] = 'nibbles'
s = b'02004000000000000000'
with pytest.raises(iso8583.DecodeError, match='Field data is 0 nibbles, expecting 4: field 2 pos 20'):
iso8583.decode(s, spec=spec) |
@property
def energy_thresh_lo(self):
'Low energy threshold'
return (self.meta['LO_THRES'] * u.TeV) | -6,215,233,484,041,799,000 | Low energy threshold | gammapy/irf/psf/gauss.py | energy_thresh_lo | mdebony/gammapy | python | @property
def energy_thresh_lo(self):
return (self.meta['LO_THRES'] * u.TeV) |
@property
def energy_thresh_hi(self):
'High energy threshold'
return (self.meta['HI_THRES'] * u.TeV) | 4,512,721,132,327,294,000 | High energy threshold | gammapy/irf/psf/gauss.py | energy_thresh_hi | mdebony/gammapy | python | @property
def energy_thresh_hi(self):
return (self.meta['HI_THRES'] * u.TeV) |
@classmethod
def read(cls, filename, hdu='PSF_2D_GAUSS'):
'Create `EnergyDependentMultiGaussPSF` from FITS file.\n\n Parameters\n ----------\n filename : str\n File name\n '
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu]) | -1,094,260,843,814,858,400 | Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name | gammapy/irf/psf/gauss.py | read | mdebony/gammapy | python | @classmethod
def read(cls, filename, hdu='PSF_2D_GAUSS'):
'Create `EnergyDependentMultiGaussPSF` from FITS file.\n\n Parameters\n ----------\n filename : str\n File name\n '
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu]) |
@classmethod
def from_table_hdu(cls, hdu):
'Create `EnergyDependentMultiGaussPSF` from HDU list.\n\n Parameters\n ----------\n hdu : `~astropy.io.fits.BinTableHDU`\n HDU\n '
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(table, column_prefix='ENERG', format='gadf-dl3')
offset_axis = MapAxis.from_table(table, column_prefix='THETA', format='gadf-dl3')
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ['SIGMA_1', 'SIGMA_2', 'SIGMA_3']:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
norms = []
for key in ['SCALE', 'AMPL_2', 'AMPL_3']:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(energy_axis_true=energy_axis_true, offset_axis=offset_axis, sigmas=sigmas, norms=norms, meta=dict(hdu.header)) | -4,669,775,591,258,652,000 | Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU | gammapy/irf/psf/gauss.py | from_table_hdu | mdebony/gammapy | python | @classmethod
def from_table_hdu(cls, hdu):
'Create `EnergyDependentMultiGaussPSF` from HDU list.\n\n Parameters\n ----------\n hdu : `~astropy.io.fits.BinTableHDU`\n HDU\n '
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(table, column_prefix='ENERG', format='gadf-dl3')
offset_axis = MapAxis.from_table(table, column_prefix='THETA', format='gadf-dl3')
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ['SIGMA_1', 'SIGMA_2', 'SIGMA_3']:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
norms = []
for key in ['SCALE', 'AMPL_2', 'AMPL_3']:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(energy_axis_true=energy_axis_true, offset_axis=offset_axis, sigmas=sigmas, norms=norms, meta=dict(hdu.header)) |
def to_hdulist(self):
'\n Convert psf table data to FITS hdu list.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n PSF in HDU list format.\n '
names = ['SCALE', 'SIGMA_1', 'AMPL_2', 'SIGMA_2', 'AMPL_3', 'SIGMA_3']
units = ['', 'deg', '', 'deg', '', 'deg']
data = [self.norms[0], self.sigmas[0], self.norms[1], self.sigmas[1], self.norms[2], self.sigmas[2]]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format='gadf-dl3')
for (name_, data_, unit_) in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu]) | -3,263,587,897,682,827,300 | Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format. | gammapy/irf/psf/gauss.py | to_hdulist | mdebony/gammapy | python | def to_hdulist(self):
'\n Convert psf table data to FITS hdu list.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n PSF in HDU list format.\n '
names = ['SCALE', 'SIGMA_1', 'AMPL_2', 'SIGMA_2', 'AMPL_3', 'SIGMA_3']
units = [, 'deg', , 'deg', , 'deg']
data = [self.norms[0], self.sigmas[0], self.norms[1], self.sigmas[1], self.norms[2], self.sigmas[2]]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format='gadf-dl3')
for (name_, data_, unit_) in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu]) |
def write(self, filename, *args, **kwargs):
'Write PSF to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs) | 2,628,976,963,919,575,000 | Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments. | gammapy/irf/psf/gauss.py | write | mdebony/gammapy | python | def write(self, filename, *args, **kwargs):
'Write PSF to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs) |
def psf_at_energy_and_theta(self, energy, theta):
'\n Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.\n\n No interpolation is used.\n\n Parameters\n ----------\n energy : `~astropy.units.u.Quantity`\n Energy at which a PSF is requested.\n theta : `~astropy.coordinates.Angle`\n Offset angle at which a PSF is requested.\n\n Returns\n -------\n psf : `~gammapy.utils.gauss.MultiGauss2D`\n Multigauss PSF object.\n '
energy = u.Quantity(energy)
theta = u.Quantity(theta)
(sigmas, norms) = ([], [])
pars = {'A_1': 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for (name, interp_norm) in zip(['scale', 'A_2', 'A_3'], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for (idx, sigma) in enumerate(sigmas):
a = pars[f'A_{(idx + 1)}']
norm = (((pars['scale'] * 2) * a) * (sigma ** 2))
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m | 6,419,885,130,308,105,000 | Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object. | gammapy/irf/psf/gauss.py | psf_at_energy_and_theta | mdebony/gammapy | python | def psf_at_energy_and_theta(self, energy, theta):
'\n Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.\n\n No interpolation is used.\n\n Parameters\n ----------\n energy : `~astropy.units.u.Quantity`\n Energy at which a PSF is requested.\n theta : `~astropy.coordinates.Angle`\n Offset angle at which a PSF is requested.\n\n Returns\n -------\n psf : `~gammapy.utils.gauss.MultiGauss2D`\n Multigauss PSF object.\n '
energy = u.Quantity(energy)
theta = u.Quantity(theta)
(sigmas, norms) = ([], [])
pars = {'A_1': 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for (name, interp_norm) in zip(['scale', 'A_2', 'A_3'], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for (idx, sigma) in enumerate(sigmas):
a = pars[f'A_{(idx + 1)}']
norm = (((pars['scale'] * 2) * a) * (sigma ** 2))
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m |
def containment_radius(self, energy, theta, fraction=0.68):
'Compute containment for all energy and theta values'
energies = u.Quantity(energy).flatten()
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for (idx, energy) in enumerate(energies):
for (jdx, theta) in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[(jdx, idx)] = psf.containment_radius(fraction)
except ValueError:
log.debug(f'Computing containment failed for energy = {energy:.2f} and theta={theta:.2f}')
log.debug(f'Sigmas: {psf.sigmas} Norms: {psf.norms}')
radius[(jdx, idx)] = np.nan
return Angle(radius, 'deg') | 2,057,220,733,141,044,700 | Compute containment for all energy and theta values | gammapy/irf/psf/gauss.py | containment_radius | mdebony/gammapy | python | def containment_radius(self, energy, theta, fraction=0.68):
energies = u.Quantity(energy).flatten()
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for (idx, energy) in enumerate(energies):
for (jdx, theta) in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[(jdx, idx)] = psf.containment_radius(fraction)
except ValueError:
log.debug(f'Computing containment failed for energy = {energy:.2f} and theta={theta:.2f}')
log.debug(f'Sigmas: {psf.sigmas} Norms: {psf.norms}')
radius[(jdx, idx)] = np.nan
return Angle(radius, 'deg') |
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
'\n Plot containment image with energy and theta axes.\n\n Parameters\n ----------\n fraction : float\n Containment fraction between 0 and 1.\n add_cbar : bool\n Add a colorbar\n '
import matplotlib.pyplot as plt
ax = (plt.gca() if (ax is None) else ax)
energy = self.energy_axis_true.center
offset = self.offset_axis.center
containment = self.containment_radius(energy, offset, fraction)
kwargs.setdefault('cmap', 'GnBu')
kwargs.setdefault('vmin', np.nanmin(containment.value))
kwargs.setdefault('vmax', np.nanmax(containment.value))
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
ax.semilogx()
ax.set_ylabel(f'Offset ({offset.unit})')
ax.set_xlabel(f'Energy ({energy.unit})')
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f'Containment radius R{(100 * fraction):.0f} ({containment.unit})'
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax | -2,834,756,856,326,981,600 | Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar | gammapy/irf/psf/gauss.py | plot_containment | mdebony/gammapy | python | def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
'\n Plot containment image with energy and theta axes.\n\n Parameters\n ----------\n fraction : float\n Containment fraction between 0 and 1.\n add_cbar : bool\n Add a colorbar\n '
import matplotlib.pyplot as plt
ax = (plt.gca() if (ax is None) else ax)
energy = self.energy_axis_true.center
offset = self.offset_axis.center
containment = self.containment_radius(energy, offset, fraction)
kwargs.setdefault('cmap', 'GnBu')
kwargs.setdefault('vmin', np.nanmin(containment.value))
kwargs.setdefault('vmax', np.nanmax(containment.value))
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
ax.semilogx()
ax.set_ylabel(f'Offset ({offset.unit})')
ax.set_xlabel(f'Energy ({energy.unit})')
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f'Containment radius R{(100 * fraction):.0f} ({containment.unit})'
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax |
def _plot_safe_energy_range(self, ax):
'add safe energy range lines to the plot'
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f'Safe energy threshold: {esafe:3.2f}'
ax.text(x=(1.1 * esafe.value), y=0.3, s=label, va='top') | 1,833,843,675,341,529,300 | add safe energy range lines to the plot | gammapy/irf/psf/gauss.py | _plot_safe_energy_range | mdebony/gammapy | python | def _plot_safe_energy_range(self, ax):
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f'Safe energy threshold: {esafe:3.2f}'
ax.text(x=(1.1 * esafe.value), y=0.3, s=label, va='top') |
def plot_containment_vs_energy(self, fractions=[0.68, 0.95], thetas=Angle([0, 1], 'deg'), ax=None, **kwargs):
'Plot containment fraction as a function of energy.\n '
import matplotlib.pyplot as plt
ax = (plt.gca() if (ax is None) else ax)
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault('label', f'{theta.deg} deg, {(100 * fraction):.1f}%')
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc='best')
ax.set_xlabel('Energy (TeV)')
ax.set_ylabel('Containment radius (deg)') | -9,218,549,933,889,060,000 | Plot containment fraction as a function of energy. | gammapy/irf/psf/gauss.py | plot_containment_vs_energy | mdebony/gammapy | python | def plot_containment_vs_energy(self, fractions=[0.68, 0.95], thetas=Angle([0, 1], 'deg'), ax=None, **kwargs):
'\n '
import matplotlib.pyplot as plt
ax = (plt.gca() if (ax is None) else ax)
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault('label', f'{theta.deg} deg, {(100 * fraction):.1f}%')
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc='best')
ax.set_xlabel('Energy (TeV)')
ax.set_ylabel('Containment radius (deg)') |
def peek(self, figsize=(15, 5)):
'Quick-look summary plots.'
import matplotlib.pyplot as plt
(fig, axes) = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
plt.tight_layout() | 4,996,040,905,371,016,000 | Quick-look summary plots. | gammapy/irf/psf/gauss.py | peek | mdebony/gammapy | python | def peek(self, figsize=(15, 5)):
import matplotlib.pyplot as plt
(fig, axes) = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
plt.tight_layout() |
def info(self, fractions=[0.68, 0.95], energies=u.Quantity([1.0, 10.0], 'TeV'), thetas=u.Quantity([0.0], 'deg')):
'\n Print PSF summary info.\n\n The containment radius for given fraction, energies and thetas is\n computed and printed on the command line.\n\n Parameters\n ----------\n fractions : list\n Containment fraction to compute containment radius for.\n energies : `~astropy.units.u.Quantity`\n Energies to compute containment radius for.\n thetas : `~astropy.units.u.Quantity`\n Thetas to compute containment radius for.\n\n Returns\n -------\n ss : string\n Formatted string containing the summary info.\n '
ss = '\nSummary PSF info\n'
ss += '----------------\n'
ss += array_stats_str(self.offset_axis.center.to('deg'), 'Theta')
ss += array_stats_str(self.energy_axis_true.edges[1:], 'Energy hi')
ss += array_stats_str(self.energy_axis_true.edges[:(- 1)], 'Energy lo')
ss += f'''Safe energy threshold lo: {self.energy_thresh_lo:6.3f}
'''
ss += f'''Safe energy threshold hi: {self.energy_thresh_hi:6.3f}
'''
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for (i, energy) in enumerate(energies):
for (j, theta) in enumerate(thetas):
radius = containment[(j, i)]
ss += '{:2.0f}% containment radius at theta = {} and E = {:4.1f}: {:5.8f}\n'.format((100 * fraction), theta, energy, radius)
return ss | -5,399,679,831,764,795,000 | Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info. | gammapy/irf/psf/gauss.py | info | mdebony/gammapy | python | def info(self, fractions=[0.68, 0.95], energies=u.Quantity([1.0, 10.0], 'TeV'), thetas=u.Quantity([0.0], 'deg')):
'\n Print PSF summary info.\n\n The containment radius for given fraction, energies and thetas is\n computed and printed on the command line.\n\n Parameters\n ----------\n fractions : list\n Containment fraction to compute containment radius for.\n energies : `~astropy.units.u.Quantity`\n Energies to compute containment radius for.\n thetas : `~astropy.units.u.Quantity`\n Thetas to compute containment radius for.\n\n Returns\n -------\n ss : string\n Formatted string containing the summary info.\n '
ss = '\nSummary PSF info\n'
ss += '----------------\n'
ss += array_stats_str(self.offset_axis.center.to('deg'), 'Theta')
ss += array_stats_str(self.energy_axis_true.edges[1:], 'Energy hi')
ss += array_stats_str(self.energy_axis_true.edges[:(- 1)], 'Energy lo')
ss += f'Safe energy threshold lo: {self.energy_thresh_lo:6.3f}
'
ss += f'Safe energy threshold hi: {self.energy_thresh_hi:6.3f}
'
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for (i, energy) in enumerate(energies):
for (j, theta) in enumerate(thetas):
radius = containment[(j, i)]
ss += '{:2.0f}% containment radius at theta = {} and E = {:4.1f}: {:5.8f}\n'.format((100 * fraction), theta, energy, radius)
return ss |
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"Convert triple Gaussian PSF ot table PSF.\n\n Parameters\n ----------\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n rad : `~astropy.coordinates.Angle`\n Offset from PSF center used for evaluating the PSF on a grid.\n Default offset = [0, 0.005, ..., 1.495, 1.5] deg.\n exposure : `~astropy.units.u.Quantity`\n Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.\n Default exposure = 1.\n\n Returns\n -------\n tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`\n Instance of `EnergyDependentTablePSF`.\n "
energies = self.energy_axis_true.center
if (theta is None):
theta = Angle(0, 'deg')
else:
theta = Angle(theta)
if (rad is None):
rad = Angle(np.arange(0, 1.5, 0.005), 'deg')
rad_axis = MapAxis.from_nodes(rad, name='rad')
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), 'deg^-2')
for (idx, energy) in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), 'deg^-2')
return EnergyDependentTablePSF(energy_axis_true=self.energy_axis_true, rad_axis=rad_axis, exposure=exposure, data=psf_value) | -273,039,063,780,967,460 | Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`. | gammapy/irf/psf/gauss.py | to_energy_dependent_table_psf | mdebony/gammapy | python | def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"Convert triple Gaussian PSF ot table PSF.\n\n Parameters\n ----------\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n rad : `~astropy.coordinates.Angle`\n Offset from PSF center used for evaluating the PSF on a grid.\n Default offset = [0, 0.005, ..., 1.495, 1.5] deg.\n exposure : `~astropy.units.u.Quantity`\n Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.\n Default exposure = 1.\n\n Returns\n -------\n tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`\n Instance of `EnergyDependentTablePSF`.\n "
energies = self.energy_axis_true.center
if (theta is None):
theta = Angle(0, 'deg')
else:
theta = Angle(theta)
if (rad is None):
rad = Angle(np.arange(0, 1.5, 0.005), 'deg')
rad_axis = MapAxis.from_nodes(rad, name='rad')
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), 'deg^-2')
for (idx, energy) in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), 'deg^-2')
return EnergyDependentTablePSF(energy_axis_true=self.energy_axis_true, rad_axis=rad_axis, exposure=exposure, data=psf_value) |
def to_psf3d(self, rad=None):
'Create a PSF3D from an analytical PSF.\n\n Parameters\n ----------\n rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`\n the array of position errors (rad) on which the PSF3D will be defined\n\n Returns\n -------\n psf3d : `~gammapy.irf.PSF3D`\n the PSF3D. It will be defined on the same energy and offset values than the input psf.\n '
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if (rad is None):
rad = (np.linspace(0, 0.66, 67) * u.deg)
rad_axis = MapAxis.from_edges(rad, name='rad')
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = (np.zeros(shape) * u.Unit('sr-1'))
for (idx, offset) in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(energy_axis_true=self.energy_axis_true, rad_axis=rad_axis, offset_axis=self.offset_axis, data=psf_value, meta=self.meta.copy()) | -6,570,291,098,575,002,000 | Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf. | gammapy/irf/psf/gauss.py | to_psf3d | mdebony/gammapy | python | def to_psf3d(self, rad=None):
'Create a PSF3D from an analytical PSF.\n\n Parameters\n ----------\n rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`\n the array of position errors (rad) on which the PSF3D will be defined\n\n Returns\n -------\n psf3d : `~gammapy.irf.PSF3D`\n the PSF3D. It will be defined on the same energy and offset values than the input psf.\n '
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if (rad is None):
rad = (np.linspace(0, 0.66, 67) * u.deg)
rad_axis = MapAxis.from_edges(rad, name='rad')
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = (np.zeros(shape) * u.Unit('sr-1'))
for (idx, offset) in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(energy_axis_true=self.energy_axis_true, rad_axis=rad_axis, offset_axis=self.offset_axis, data=psf_value, meta=self.meta.copy()) |
def read_model(model_path, weigths_path):
'Load your pretrained model\n\t'
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model | -8,900,926,367,652,668,000 | Load your pretrained model | demo.py | read_model | ijinmao/CAM-Localization | python | def read_model(model_path, weigths_path):
'\n\t'
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model |
def train_cam_model(X_train, Y_train, X_test, Y_test, batch_size, nb_epoch):
'Train CAM model based on your pretrained model\n\n\t# Arguments\n\t\tmodel: your pretrained model, CAM model is trained based on this model.\n\n\t'
pretrained_model_path = ''
pretrained_weights_path = ''
pretrained_model_name = 'VGG16'
num_classes = 10
gap_spacial_size = 14
if (pretrained_model_name == 'VGG16'):
in_layer_name = 'block5_conv3'
elif (pretrained_model_name == 'InceptionV3'):
in_layer_name = 'batchnormalization_921'
elif (pretrained_model_name == 'ResNet50'):
in_layer_name = 'merge_13'
else:
in_layer_name = ''
pretrained_model = read_model(pretrained_model_path, pretrained_weights_path)
model = create_cam_model(pretrained_model, gap_spacial_size, num_classes, in_layer_name, CAM_CONV_LAYER)
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True, verbose=1, validation_data=(X_test, Y_test))
model.save_weights('')
return model | 8,713,368,420,113,241,000 | Train CAM model based on your pretrained model
# Arguments
model: your pretrained model, CAM model is trained based on this model. | demo.py | train_cam_model | ijinmao/CAM-Localization | python | def train_cam_model(X_train, Y_train, X_test, Y_test, batch_size, nb_epoch):
'Train CAM model based on your pretrained model\n\n\t# Arguments\n\t\tmodel: your pretrained model, CAM model is trained based on this model.\n\n\t'
pretrained_model_path =
pretrained_weights_path =
pretrained_model_name = 'VGG16'
num_classes = 10
gap_spacial_size = 14
if (pretrained_model_name == 'VGG16'):
in_layer_name = 'block5_conv3'
elif (pretrained_model_name == 'InceptionV3'):
in_layer_name = 'batchnormalization_921'
elif (pretrained_model_name == 'ResNet50'):
in_layer_name = 'merge_13'
else:
in_layer_name =
pretrained_model = read_model(pretrained_model_path, pretrained_weights_path)
model = create_cam_model(pretrained_model, gap_spacial_size, num_classes, in_layer_name, CAM_CONV_LAYER)
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True, verbose=1, validation_data=(X_test, Y_test))
model.save_weights()
return model |
def cam_model():
'\n\tReturn your trained CAM model\n\t'
return | 4,348,099,947,783,747,000 | Return your trained CAM model | demo.py | cam_model | ijinmao/CAM-Localization | python | def cam_model():
'\n\t\n\t'
return |
def plot_cam_map(img_path, img_size, batch_size, label_plot):
'Plot class activation map.\n\n\t'
gap_spacial_size = 14
model = cam_model()
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
im_cam = get_cam_img(model, test_data, label_plot, CAM_CONV_LAYER, ratio=(img_size / gap_spacial_size))
if (im_cam.shape != im_ori[:, :, 0].shape):
im_cam = cv2.resize(im_cam, (img_size, img_size), cv2.INTER_LINEAR)
prediction_labels = model.predict(test_data.astype('float32'), batch_size=batch_size, verbose=1)
print('Info: Predictions:\n{}'.format(prediction_labels))
plt.imshow(im_ori)
plt.imshow(im_cam, cmap='jet', alpha=0.5, interpolation='bilinear')
plt.show() | -4,127,016,016,632,387,600 | Plot class activation map. | demo.py | plot_cam_map | ijinmao/CAM-Localization | python | def plot_cam_map(img_path, img_size, batch_size, label_plot):
'\n\n\t'
gap_spacial_size = 14
model = cam_model()
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
im_cam = get_cam_img(model, test_data, label_plot, CAM_CONV_LAYER, ratio=(img_size / gap_spacial_size))
if (im_cam.shape != im_ori[:, :, 0].shape):
im_cam = cv2.resize(im_cam, (img_size, img_size), cv2.INTER_LINEAR)
prediction_labels = model.predict(test_data.astype('float32'), batch_size=batch_size, verbose=1)
print('Info: Predictions:\n{}'.format(prediction_labels))
plt.imshow(im_ori)
plt.imshow(im_cam, cmap='jet', alpha=0.5, interpolation='bilinear')
plt.show() |
def train(self, start_epoch, max_epoch):
'Generic training loops.'
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train() | -5,646,719,091,533,232,000 | Generic training loops. | dassl/engine/trainer.py | train | zhaoxin94/Dassl.pytorch | python | def train(self, start_epoch, max_epoch):
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train() |
def check_cfg(self, cfg):
"Check whether some variables are set correctly for\n the trainer (optional).\n\n For example, a trainer might require a particular sampler\n for training such as 'RandomDomainSampler', so it is good\n to do the checking:\n\n assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'\n "
pass | -6,042,607,910,904,794,000 | Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler' | dassl/engine/trainer.py | check_cfg | zhaoxin94/Dassl.pytorch | python | def check_cfg(self, cfg):
"Check whether some variables are set correctly for\n the trainer (optional).\n\n For example, a trainer might require a particular sampler\n for training such as 'RandomDomainSampler', so it is good\n to do the checking:\n\n assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'\n "
pass |
def build_data_loader(self):
'Create essential data-related attributes.\n\n What must be done in the re-implementation\n of this method:\n 1) initialize data manager\n 2) assign as attributes the data loaders\n 3) assign as attribute the number of classes\n '
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes | -6,602,930,038,107,107,000 | Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes | dassl/engine/trainer.py | build_data_loader | zhaoxin94/Dassl.pytorch | python | def build_data_loader(self):
'Create essential data-related attributes.\n\n What must be done in the re-implementation\n of this method:\n 1) initialize data manager\n 2) assign as attributes the data loaders\n 3) assign as attribute the number of classes\n '
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes |
def build_model(self):
'Build and register model.\n\n The default builds a classification model along with its\n optimizer and scheduler.\n\n Custom trainers can re-implement this method if necessary.\n '
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched) | -4,805,539,644,684,228,000 | Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary. | dassl/engine/trainer.py | build_model | zhaoxin94/Dassl.pytorch | python | def build_model(self):
'Build and register model.\n\n The default builds a classification model along with its\n optimizer and scheduler.\n\n Custom trainers can re-implement this method if necessary.\n '
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched) |
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
'A generic testing pipeline.'
self.set_model_mode('eval')
self.evaluator.reset()
if (split is None):
split = self.cfg.TEST.SPLIT
if ((split == 'val') and (self.val_loader is not None)):
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for (batch_idx, batch) in enumerate(data_loader):
(input, label) = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for (k, v) in results.items():
if (k == 'perclass_accuracies'):
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if (not return_per_class_results):
return list(results.values())[0]
else:
return (results['accuracy'], results['perclass_accuracies']) | 8,053,161,361,375,309,000 | A generic testing pipeline. | dassl/engine/trainer.py | test | zhaoxin94/Dassl.pytorch | python | @torch.no_grad()
def test(self, split=None, return_per_class_results=False):
self.set_model_mode('eval')
self.evaluator.reset()
if (split is None):
split = self.cfg.TEST.SPLIT
if ((split == 'val') and (self.val_loader is not None)):
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for (batch_idx, batch) in enumerate(data_loader):
(input, label) = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for (k, v) in results.items():
if (k == 'perclass_accuracies'):
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if (not return_per_class_results):
return list(results.values())[0]
else:
return (results['accuracy'], results['perclass_accuracies']) |
@property
def method(self) -> str:
'HTTP method used for the request'
return self._environ['REQUEST_METHOD'] | 5,419,372,049,588,608,000 | HTTP method used for the request | pogweb/models.py | method | ahnaf-zamil/pogweb | python | @property
def method(self) -> str:
return self._environ['REQUEST_METHOD'] |
@property
def endpoint(self) -> str:
'The route/endpoint used for that specific request'
return self._environ['PATH_INFO'] | -2,896,867,337,130,264,000 | The route/endpoint used for that specific request | pogweb/models.py | endpoint | ahnaf-zamil/pogweb | python | @property
def endpoint(self) -> str:
return self._environ['PATH_INFO'] |
@property
def query_args(self) -> ImmutableDict:
'Query arguments from the request'
args = self._environ['QUERY_STRING']
if (not args):
return ImmutableDict({})
args = args.split('&')
query_args = {}
for _arg in args:
(name, value) = _arg.split('=')
query_args[name] = value
return ImmutableDict(query_args) | 6,587,376,489,600,675,000 | Query arguments from the request | pogweb/models.py | query_args | ahnaf-zamil/pogweb | python | @property
def query_args(self) -> ImmutableDict:
args = self._environ['QUERY_STRING']
if (not args):
return ImmutableDict({})
args = args.split('&')
query_args = {}
for _arg in args:
(name, value) = _arg.split('=')
query_args[name] = value
return ImmutableDict(query_args) |
@property
def form(self) -> typing.Optional[typing.Dict]:
'Form data sent via HTTP request'
data = self._environ.get('wsgi.input')
if data:
form_dict = parse_qs(data.getvalue().decode('utf-8'))
final_dict = {}
for (k, v) in form_dict.items():
final_dict[k] = v[0]
return ImmutableDict(final_dict) | 6,462,910,950,514,397,000 | Form data sent via HTTP request | pogweb/models.py | form | ahnaf-zamil/pogweb | python | @property
def form(self) -> typing.Optional[typing.Dict]:
data = self._environ.get('wsgi.input')
if data:
form_dict = parse_qs(data.getvalue().decode('utf-8'))
final_dict = {}
for (k, v) in form_dict.items():
final_dict[k] = v[0]
return ImmutableDict(final_dict) |
def get_entity(hass):
'Get the fan entity.'
return hass.states.get(FAN_ENTITY_ID) | 7,856,547,689,952,820,000 | Get the fan entity. | tests/components/demo/test_fan.py | get_entity | ActuallyRuben/home-assistant | python | def get_entity(hass):
return hass.states.get(FAN_ENTITY_ID) |
@pytest.fixture(autouse=True)
def setup_comp(hass):
'Initialize components.'
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {'fan': {'platform': 'demo'}})) | -959,499,954,790,526,500 | Initialize components. | tests/components/demo/test_fan.py | setup_comp | ActuallyRuben/home-assistant | python | @pytest.fixture(autouse=True)
def setup_comp(hass):
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {'fan': {'platform': 'demo'}})) |
async def test_turn_on(hass):
'Test turning on the device.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH))
assert (STATE_ON == get_entity(hass).state)
assert (fan.SPEED_HIGH == get_entity(hass).attributes[fan.ATTR_SPEED]) | -8,242,091,094,631,456,000 | Test turning on the device. | tests/components/demo/test_fan.py | test_turn_on | ActuallyRuben/home-assistant | python | async def test_turn_on(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH))
assert (STATE_ON == get_entity(hass).state)
assert (fan.SPEED_HIGH == get_entity(hass).attributes[fan.ATTR_SPEED]) |
async def test_turn_off(hass):
'Test turning off the device.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass, FAN_ENTITY_ID))
assert (STATE_OFF == get_entity(hass).state) | 9,193,625,582,298,587,000 | Test turning off the device. | tests/components/demo/test_fan.py | test_turn_off | ActuallyRuben/home-assistant | python | async def test_turn_off(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass, FAN_ENTITY_ID))
assert (STATE_OFF == get_entity(hass).state) |
async def test_turn_off_without_entity_id(hass):
'Test turning off all fans.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass))
assert (STATE_OFF == get_entity(hass).state) | 4,560,760,357,813,158,400 | Test turning off all fans. | tests/components/demo/test_fan.py | test_turn_off_without_entity_id | ActuallyRuben/home-assistant | python | async def test_turn_off_without_entity_id(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass))
assert (STATE_OFF == get_entity(hass).state) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.