body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def pack_range(key, packing, grad_vars, rng): 'Form the concatenation of a specified range of gradient tensors.\n\n Args:\n key: Value under which to store meta-data in packing that will be used\n later to restore the grad_var list structure.\n packing: Dict holding data describing packed ranges of small tensors.\n grad_vars: List of (grad, var) pairs for one replica.\n rng: A pair of integers giving the first, last indices of a consecutive\n range of tensors to be packed.\n\n Returns:\n A tensor that is the concatenation of all the specified small tensors.\n ' to_pack = grad_vars[rng[0]:(rng[1] + 1)] members = [] variables = [] restore_shapes = [] with ops.name_scope('pack'): for (g, v) in to_pack: variables.append(v) restore_shapes.append(g.shape) with ops.device(g.device): members.append(array_ops.reshape(g, [(- 1)])) packing[key] = GradPackTuple(indices=range(rng[0], (rng[1] + 1)), vars=variables, shapes=restore_shapes) with ops.device(members[0].device): return array_ops.concat(members, 0)
-8,537,589,089,792,121,000
Form the concatenation of a specified range of gradient tensors. Args: key: Value under which to store meta-data in packing that will be used later to restore the grad_var list structure. packing: Dict holding data describing packed ranges of small tensors. grad_vars: List of (grad, var) pairs for one replica. rng: A pair of integers giving the first, last indices of a consecutive range of tensors to be packed. Returns: A tensor that is the concatenation of all the specified small tensors.
tensorflow/python/distribute/cross_device_utils.py
pack_range
DeuroIO/Deuro-tensorflow
python
def pack_range(key, packing, grad_vars, rng): 'Form the concatenation of a specified range of gradient tensors.\n\n Args:\n key: Value under which to store meta-data in packing that will be used\n later to restore the grad_var list structure.\n packing: Dict holding data describing packed ranges of small tensors.\n grad_vars: List of (grad, var) pairs for one replica.\n rng: A pair of integers giving the first, last indices of a consecutive\n range of tensors to be packed.\n\n Returns:\n A tensor that is the concatenation of all the specified small tensors.\n ' to_pack = grad_vars[rng[0]:(rng[1] + 1)] members = [] variables = [] restore_shapes = [] with ops.name_scope('pack'): for (g, v) in to_pack: variables.append(v) restore_shapes.append(g.shape) with ops.device(g.device): members.append(array_ops.reshape(g, [(- 1)])) packing[key] = GradPackTuple(indices=range(rng[0], (rng[1] + 1)), vars=variables, shapes=restore_shapes) with ops.device(members[0].device): return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt): 'Unpack a previously packed collection of gradient tensors.\n\n Args:\n gv: A (grad, var) pair to be unpacked.\n gpt: A GradPackTuple describing the packing operation that produced gv.\n\n Returns:\n A list of (grad, var) pairs corresponding to the values that were\n originally packed into gv, maybe following subsequent operations like\n reduction.\n ' elt_widths = [x.num_elements() for x in gpt.shapes] with ops.device(gv[0][0].device): with ops.name_scope('unpack'): splits = array_ops.split(gv[0], elt_widths) unpacked_gv = [] for (idx, s) in enumerate(splits): unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) return unpacked_gv
588,470,004,200,064,300
Unpack a previously packed collection of gradient tensors. Args: gv: A (grad, var) pair to be unpacked. gpt: A GradPackTuple describing the packing operation that produced gv. Returns: A list of (grad, var) pairs corresponding to the values that were originally packed into gv, maybe following subsequent operations like reduction.
tensorflow/python/distribute/cross_device_utils.py
unpack_grad_tuple
DeuroIO/Deuro-tensorflow
python
def unpack_grad_tuple(gv, gpt): 'Unpack a previously packed collection of gradient tensors.\n\n Args:\n gv: A (grad, var) pair to be unpacked.\n gpt: A GradPackTuple describing the packing operation that produced gv.\n\n Returns:\n A list of (grad, var) pairs corresponding to the values that were\n originally packed into gv, maybe following subsequent operations like\n reduction.\n ' elt_widths = [x.num_elements() for x in gpt.shapes] with ops.device(gv[0][0].device): with ops.name_scope('unpack'): splits = array_ops.split(gv[0], elt_widths) unpacked_gv = [] for (idx, s) in enumerate(splits): unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0): "Concatenate small gradient tensors together for reduction.\n\n Args:\n replica_grads: List of lists of (gradient, variable) tuples.\n max_bytes: Int giving max number of bytes in a tensor that\n may be considered small.\n max_group: Int giving max number of small tensors that may be\n concatenated into one new tensor.\n\n Returns:\n new_replica_grads, packing where new_replica_grads is identical to\n replica_grads except that all feasible small_tensors have been removed\n from their places and concatenated into larger tensors that are\n now in the front of the list for each replica, and packing contains\n the data necessary to restore the replica_grads structure.\n\n Look through the first replica for gradients of the same type (float),\n and small size, that are all sequential. For each such group,\n replace by a new tensor that is a flattened concatenation. Note\n that the corresponding variable will be absent, which doesn't matter\n because it isn't used during all-reduce.\n\n Requires:\n Every gv_list in replicas must have isomorphic structure including identical\n tensor sizes and types.\n " small_indices = [] large_indices = [] for (idx, (g, _)) in enumerate(replica_grads[0]): if ((g.dtype == dtypes.float32) and ((4 * g.shape.num_elements()) <= max_bytes)): small_indices.append(idx) else: large_indices.append(idx) (small_ranges, small_singles) = extract_ranges(small_indices, range_size_limit=max_group) large_indices = sorted((large_indices + small_singles)) num_gv = len(replica_grads[0]) packing = {} if small_ranges: new_replica_grads = [] for (dev_idx, gv_list) in enumerate(replica_grads): assert (len(gv_list) == num_gv) new_gv_list = [] for r in small_ranges: key = ('%d:%d' % (dev_idx, len(new_gv_list))) new_gv_list.append((pack_range(key, packing, gv_list, r), 'packing_var_placeholder')) for i in large_indices: new_gv_list.append(gv_list[i]) new_replica_grads.append(new_gv_list) return (new_replica_grads, packing) else: return (replica_grads, None)
-3,869,403,477,617,599,000
Concatenate small gradient tensors together for reduction. Args: replica_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. max_group: Int giving max number of small tensors that may be concatenated into one new tensor. Returns: new_replica_grads, packing where new_replica_grads is identical to replica_grads except that all feasible small_tensors have been removed from their places and concatenated into larger tensors that are now in the front of the list for each replica, and packing contains the data necessary to restore the replica_grads structure. Look through the first replica for gradients of the same type (float), and small size, that are all sequential. For each such group, replace by a new tensor that is a flattened concatenation. Note that the corresponding variable will be absent, which doesn't matter because it isn't used during all-reduce. Requires: Every gv_list in replicas must have isomorphic structure including identical tensor sizes and types.
tensorflow/python/distribute/cross_device_utils.py
pack_small_tensors
DeuroIO/Deuro-tensorflow
python
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0): "Concatenate small gradient tensors together for reduction.\n\n Args:\n replica_grads: List of lists of (gradient, variable) tuples.\n max_bytes: Int giving max number of bytes in a tensor that\n may be considered small.\n max_group: Int giving max number of small tensors that may be\n concatenated into one new tensor.\n\n Returns:\n new_replica_grads, packing where new_replica_grads is identical to\n replica_grads except that all feasible small_tensors have been removed\n from their places and concatenated into larger tensors that are\n now in the front of the list for each replica, and packing contains\n the data necessary to restore the replica_grads structure.\n\n Look through the first replica for gradients of the same type (float),\n and small size, that are all sequential. For each such group,\n replace by a new tensor that is a flattened concatenation. Note\n that the corresponding variable will be absent, which doesn't matter\n because it isn't used during all-reduce.\n\n Requires:\n Every gv_list in replicas must have isomorphic structure including identical\n tensor sizes and types.\n " small_indices = [] large_indices = [] for (idx, (g, _)) in enumerate(replica_grads[0]): if ((g.dtype == dtypes.float32) and ((4 * g.shape.num_elements()) <= max_bytes)): small_indices.append(idx) else: large_indices.append(idx) (small_ranges, small_singles) = extract_ranges(small_indices, range_size_limit=max_group) large_indices = sorted((large_indices + small_singles)) num_gv = len(replica_grads[0]) packing = {} if small_ranges: new_replica_grads = [] for (dev_idx, gv_list) in enumerate(replica_grads): assert (len(gv_list) == num_gv) new_gv_list = [] for r in small_ranges: key = ('%d:%d' % (dev_idx, len(new_gv_list))) new_gv_list.append((pack_range(key, packing, gv_list, r), 'packing_var_placeholder')) for i in large_indices: new_gv_list.append(gv_list[i]) new_replica_grads.append(new_gv_list) return (new_replica_grads, packing) else: return (replica_grads, None)
def unpack_small_tensors(replica_grads, packing): 'Undo the structure alterations to replica_grads done by pack_small_tensors.\n\n Args:\n replica_grads: List of List of (grad, var) tuples.\n packing: A dict generated by pack_small_tensors describing the changes\n it made to replica_grads.\n\n Returns:\n new_replica_grads: identical to replica_grads except that concatenations\n of small tensors have been split apart and returned to their original\n positions, paired with their original variables.\n ' if (not packing): return replica_grads new_replica_grads = [] num_devices = len(replica_grads) num_packed = (len(packing.keys()) // num_devices) for (dev_idx, gv_list) in enumerate(replica_grads): gv_list = list(gv_list) new_gv_list = gv_list[num_packed:] for i in range(num_packed): k = ('%d:%d' % (dev_idx, i)) gpt = packing[k] gv = unpack_grad_tuple(gv_list[i], gpt) for (gi, idx) in enumerate(gpt.indices): assert (idx == gpt.indices[gi]) new_gv_list.insert(idx, gv[gi]) new_replica_grads.append(new_gv_list) return new_replica_grads
4,266,833,636,242,527,700
Undo the structure alterations to replica_grads done by pack_small_tensors. Args: replica_grads: List of List of (grad, var) tuples. packing: A dict generated by pack_small_tensors describing the changes it made to replica_grads. Returns: new_replica_grads: identical to replica_grads except that concatenations of small tensors have been split apart and returned to their original positions, paired with their original variables.
tensorflow/python/distribute/cross_device_utils.py
unpack_small_tensors
DeuroIO/Deuro-tensorflow
python
def unpack_small_tensors(replica_grads, packing): 'Undo the structure alterations to replica_grads done by pack_small_tensors.\n\n Args:\n replica_grads: List of List of (grad, var) tuples.\n packing: A dict generated by pack_small_tensors describing the changes\n it made to replica_grads.\n\n Returns:\n new_replica_grads: identical to replica_grads except that concatenations\n of small tensors have been split apart and returned to their original\n positions, paired with their original variables.\n ' if (not packing): return replica_grads new_replica_grads = [] num_devices = len(replica_grads) num_packed = (len(packing.keys()) // num_devices) for (dev_idx, gv_list) in enumerate(replica_grads): gv_list = list(gv_list) new_gv_list = gv_list[num_packed:] for i in range(num_packed): k = ('%d:%d' % (dev_idx, i)) gpt = packing[k] gv = unpack_grad_tuple(gv_list[i], gpt) for (gi, idx) in enumerate(gpt.indices): assert (idx == gpt.indices[gi]) new_gv_list.insert(idx, gv[gi]) new_replica_grads.append(new_gv_list) return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n): 'Aggregate tensors using `accumulation_fn` and IndexedSlices via concat.' if any((isinstance(v, ops.IndexedSlices) for v in values)): return gradients_impl._AggregateIndexedSlicesGradients(values) else: return accumulation_fn(values)
-8,755,428,239,939,372,000
Aggregate tensors using `accumulation_fn` and IndexedSlices via concat.
tensorflow/python/distribute/cross_device_utils.py
aggregate_tensors_or_indexed_slices
DeuroIO/Deuro-tensorflow
python
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n): if any((isinstance(v, ops.IndexedSlices) for v in values)): return gradients_impl._AggregateIndexedSlicesGradients(values) else: return accumulation_fn(values)
def contains_indexed_slices(value): 'Check whether the value is `IndexedSlices` or contains `IndexedSlices`.' if isinstance(value, ops.IndexedSlices): return True elif (isinstance(value, (list, tuple)) and value): return any((contains_indexed_slices(v) for v in value)) elif isinstance(value, value_lib.DistributedValues): return contains_indexed_slices(list(value._index.values())) else: return False
4,811,654,315,259,058,000
Check whether the value is `IndexedSlices` or contains `IndexedSlices`.
tensorflow/python/distribute/cross_device_utils.py
contains_indexed_slices
DeuroIO/Deuro-tensorflow
python
def contains_indexed_slices(value): if isinstance(value, ops.IndexedSlices): return True elif (isinstance(value, (list, tuple)) and value): return any((contains_indexed_slices(v) for v in value)) elif isinstance(value, value_lib.DistributedValues): return contains_indexed_slices(list(value._index.values())) else: return False
def __init__(self, group_key_start=1, instance_key_start=100, instance_key_with_id_start=10000): 'Initializes the object.\n\n Args:\n group_key_start: the starting integer of group key.\n instance_key_start: the starting integer of instance key.\n instance_key_with_id_start: the starting integer of instance key that is\n recorded with an id.\n ' self._group_key = group_key_start self._group_key_table = dict() self._instance_key_id_to_key_table = dict() self._instance_key_with_id_counter = instance_key_with_id_start self._instance_key_start = instance_key_start
-8,157,370,672,428,255,000
Initializes the object. Args: group_key_start: the starting integer of group key. instance_key_start: the starting integer of instance key. instance_key_with_id_start: the starting integer of instance key that is recorded with an id.
tensorflow/python/distribute/cross_device_utils.py
__init__
DeuroIO/Deuro-tensorflow
python
def __init__(self, group_key_start=1, instance_key_start=100, instance_key_with_id_start=10000): 'Initializes the object.\n\n Args:\n group_key_start: the starting integer of group key.\n instance_key_start: the starting integer of instance key.\n instance_key_with_id_start: the starting integer of instance key that is\n recorded with an id.\n ' self._group_key = group_key_start self._group_key_table = dict() self._instance_key_id_to_key_table = dict() self._instance_key_with_id_counter = instance_key_with_id_start self._instance_key_start = instance_key_start
def get_group_key(self, devices): 'Returns a group key for the set of devices.\n\n Args:\n devices: list of strings naming devices in a collective group.\n\n Returns:\n int key uniquely identifying the set of device names.\n ' parsed = [pydev.DeviceSpec.from_string(d) for d in devices] names = sorted([('%s:%d' % (d.device_type, d.device_index)) for d in parsed]) key_id = ','.join(names) with _lock: if (key_id not in self._group_key_table): new_key = self._group_key self._group_key += 1 self._group_key_table[key_id] = new_key return self._group_key_table[key_id]
-1,122,532,047,166,860,800
Returns a group key for the set of devices. Args: devices: list of strings naming devices in a collective group. Returns: int key uniquely identifying the set of device names.
tensorflow/python/distribute/cross_device_utils.py
get_group_key
DeuroIO/Deuro-tensorflow
python
def get_group_key(self, devices): 'Returns a group key for the set of devices.\n\n Args:\n devices: list of strings naming devices in a collective group.\n\n Returns:\n int key uniquely identifying the set of device names.\n ' parsed = [pydev.DeviceSpec.from_string(d) for d in devices] names = sorted([('%s:%d' % (d.device_type, d.device_index)) for d in parsed]) key_id = ','.join(names) with _lock: if (key_id not in self._group_key_table): new_key = self._group_key self._group_key += 1 self._group_key_table[key_id] = new_key return self._group_key_table[key_id]
def get_instance_key(self, key_id=None): 'Returns a new instance key for use in defining a collective op.\n\n Args:\n key_id: optional string. If set, key will be recorded and the same key\n will be returned when the same key_id is provided. If not, an increasing\n instance key will be returned.\n ' if key_id: with _lock: if (key_id not in self._instance_key_id_to_key_table): self._instance_key_with_id_counter += 1 self._instance_key_id_to_key_table[key_id] = self._instance_key_with_id_counter return self._instance_key_id_to_key_table[key_id] else: v = self._get_thread_local_object().instance_key self._get_thread_local_object().instance_key += 1 return v
6,131,911,157,188,649,000
Returns a new instance key for use in defining a collective op. Args: key_id: optional string. If set, key will be recorded and the same key will be returned when the same key_id is provided. If not, an increasing instance key will be returned.
tensorflow/python/distribute/cross_device_utils.py
get_instance_key
DeuroIO/Deuro-tensorflow
python
def get_instance_key(self, key_id=None): 'Returns a new instance key for use in defining a collective op.\n\n Args:\n key_id: optional string. If set, key will be recorded and the same key\n will be returned when the same key_id is provided. If not, an increasing\n instance key will be returned.\n ' if key_id: with _lock: if (key_id not in self._instance_key_id_to_key_table): self._instance_key_with_id_counter += 1 self._instance_key_id_to_key_table[key_id] = self._instance_key_with_id_counter return self._instance_key_id_to_key_table[key_id] else: v = self._get_thread_local_object().instance_key self._get_thread_local_object().instance_key += 1 return v
def test_user_commands(): 'test all user commands.' assert (__main__.main(['user', 'create', 'ec2mc_test_user', 'setup_users', '--default']) is not False) sleep(5) assert (__main__.main(['user', 'list']) is not False) assert (__main__.main(['user', 'set_group', 'EC2MC_TEST_USER', 'basic_users']) is not False) assert (__main__.main(['user', 'be', 'takingitcasual']) is not False) assert (__main__.main(['user', 'rotate_key', 'Ec2Mc_TeSt_UsEr']) is not False) assert (__main__.main(['user', 'delete', 'eC2mC_tEsT_uSeR']) is not False)
1,676,041,740,222,803,000
test all user commands.
tests/test_user_commands.py
test_user_commands
TakingItCasual/easymc
python
def test_user_commands(): assert (__main__.main(['user', 'create', 'ec2mc_test_user', 'setup_users', '--default']) is not False) sleep(5) assert (__main__.main(['user', 'list']) is not False) assert (__main__.main(['user', 'set_group', 'EC2MC_TEST_USER', 'basic_users']) is not False) assert (__main__.main(['user', 'be', 'takingitcasual']) is not False) assert (__main__.main(['user', 'rotate_key', 'Ec2Mc_TeSt_UsEr']) is not False) assert (__main__.main(['user', 'delete', 'eC2mC_tEsT_uSeR']) is not False)
def coalesce(edge_index, edge_attr=None, num_nodes=None, reduce='add', is_sorted=False, sort_by_row=True): 'Row-wise sorts :obj:`edge_index` and removes its duplicated entries.\n Duplicate entries in :obj:`edge_attr` are merged by scattering them\n together according to the given :obj:`reduce` option.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-\n dimensional edge features.\n If given as a list, will re-shuffle and remove duplicates for all\n its entries. (default: :obj:`None`)\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)\n reduce (string, optional): The reduce operation to use for merging edge\n features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,\n :obj:`"mul"`). (default: :obj:`"add"`)\n is_sorted (bool, optional): If set to :obj:`True`, will expect\n :obj:`edge_index` to be already sorted row-wise.\n sort_by_row (bool, optional): If set to :obj:`False`, will sort\n :obj:`edge_index` column-wise.\n\n :rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else\n (:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)\n ' if tlx.is_tensor(edge_index): edge_index = tlx.convert_to_numpy(edge_index) nnz = edge_index.shape[1] num_nodes = maybe_num_nodes(edge_index, num_nodes) idx = np.zeros((nnz + 1)) idx[0] = (- 1) idx[1:] = edge_index[(1 - int(sort_by_row))] idx[1:] = np.add(np.multiply(idx[1:], num_nodes), edge_index[int(sort_by_row)]) if (not is_sorted): perm = np.argsort(idx[1:]) idx[1:] = np.sort(idx[1:]) edge_index = edge_index[:, perm] if ((edge_attr is not None) and tlx.ops.is_tensor(edge_attr)): edge_attr = tlx.gather(edge_attr, tlx.convert_to_tensor(perm), axis=0) elif ((edge_attr is not None) and check_is_numpy(edge_attr)): edge_attr = edge_attr[perm] elif (edge_attr is not None): edge_attr = [tlx.gather(e, perm, axis=0) for e in edge_attr] mask = (idx[1:] > idx[:(- 1)]) if mask.all(): edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64) return (edge_index if (edge_attr is None) else (edge_index, edge_attr)) edge_index = edge_index[:, mask] edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64) if (edge_attr is None): return edge_index idx = np.arange(0, nnz) idx = tlx.convert_to_tensor((idx - (1 - mask).cumsum(axis=0))) if tlx.ops.is_tensor(edge_attr): edge_attr = mpops.segment_sum(edge_attr, idx) return (edge_index, edge_attr)
-587,692,267,582,630,000
Row-wise sorts :obj:`edge_index` and removes its duplicated entries. Duplicate entries in :obj:`edge_attr` are merged by scattering them together according to the given :obj:`reduce` option. Args: edge_index (LongTensor): The edge indices. edge_attr (Tensor or List[Tensor], optional): Edge weights or multi- dimensional edge features. If given as a list, will re-shuffle and remove duplicates for all its entries. (default: :obj:`None`) num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) reduce (string, optional): The reduce operation to use for merging edge features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`"add"`) is_sorted (bool, optional): If set to :obj:`True`, will expect :obj:`edge_index` to be already sorted row-wise. sort_by_row (bool, optional): If set to :obj:`False`, will sort :obj:`edge_index` column-wise. :rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else (:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)
gammagl/utils/coalesce.py
coalesce
BUPT-GAMMA/GammaGL
python
def coalesce(edge_index, edge_attr=None, num_nodes=None, reduce='add', is_sorted=False, sort_by_row=True): 'Row-wise sorts :obj:`edge_index` and removes its duplicated entries.\n Duplicate entries in :obj:`edge_attr` are merged by scattering them\n together according to the given :obj:`reduce` option.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-\n dimensional edge features.\n If given as a list, will re-shuffle and remove duplicates for all\n its entries. (default: :obj:`None`)\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)\n reduce (string, optional): The reduce operation to use for merging edge\n features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,\n :obj:`"mul"`). (default: :obj:`"add"`)\n is_sorted (bool, optional): If set to :obj:`True`, will expect\n :obj:`edge_index` to be already sorted row-wise.\n sort_by_row (bool, optional): If set to :obj:`False`, will sort\n :obj:`edge_index` column-wise.\n\n :rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else\n (:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)\n ' if tlx.is_tensor(edge_index): edge_index = tlx.convert_to_numpy(edge_index) nnz = edge_index.shape[1] num_nodes = maybe_num_nodes(edge_index, num_nodes) idx = np.zeros((nnz + 1)) idx[0] = (- 1) idx[1:] = edge_index[(1 - int(sort_by_row))] idx[1:] = np.add(np.multiply(idx[1:], num_nodes), edge_index[int(sort_by_row)]) if (not is_sorted): perm = np.argsort(idx[1:]) idx[1:] = np.sort(idx[1:]) edge_index = edge_index[:, perm] if ((edge_attr is not None) and tlx.ops.is_tensor(edge_attr)): edge_attr = tlx.gather(edge_attr, tlx.convert_to_tensor(perm), axis=0) elif ((edge_attr is not None) and check_is_numpy(edge_attr)): edge_attr = edge_attr[perm] elif (edge_attr is not None): edge_attr = [tlx.gather(e, perm, axis=0) for e in edge_attr] mask = (idx[1:] > idx[:(- 1)]) if mask.all(): edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64) return (edge_index if (edge_attr is None) else (edge_index, edge_attr)) edge_index = edge_index[:, mask] edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64) if (edge_attr is None): return edge_index idx = np.arange(0, nnz) idx = tlx.convert_to_tensor((idx - (1 - mask).cumsum(axis=0))) if tlx.ops.is_tensor(edge_attr): edge_attr = mpops.segment_sum(edge_attr, idx) return (edge_index, edge_attr)
def __init__(self, force=False): '\n Creates an exporter instance. If force flag is set, all data already\n present in a destination folder are overwritten on export.\n\n @param force: If True, force flag is set. It is not otherwise.\n @type force: bool\n ' self._force = force
-4,257,012,996,661,849,600
Creates an exporter instance. If force flag is set, all data already present in a destination folder are overwritten on export. @param force: If True, force flag is set. It is not otherwise. @type force: bool
comodit_client/api/exporter.py
__init__
geoco84/comodit-client
python
def __init__(self, force=False): '\n Creates an exporter instance. If force flag is set, all data already\n present in a destination folder are overwritten on export.\n\n @param force: If True, force flag is set. It is not otherwise.\n @type force: bool\n ' self._force = force
def export_application(self, app, path, backup=False): '\n Exports an application to a local folder.\n\n @param app: The application to export.\n @type app: L{Application}\n @param path: Path to local directory.\n @type path: string\n @param backup: indicate is a backup.\n @type path: bool\n ' self._export_entity(app, path, True, True, backup)
-2,885,658,103,454,825,000
Exports an application to a local folder. @param app: The application to export. @type app: L{Application} @param path: Path to local directory. @type path: string @param backup: indicate is a backup. @type path: bool
comodit_client/api/exporter.py
export_application
geoco84/comodit-client
python
def export_application(self, app, path, backup=False): '\n Exports an application to a local folder.\n\n @param app: The application to export.\n @type app: L{Application}\n @param path: Path to local directory.\n @type path: string\n @param backup: indicate is a backup.\n @type path: bool\n ' self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup=False): '\n Exports a distribution to a local folder.\n\n @param dist: The distribution to export.\n @type dist: L{Distribution}\n @param path: Path to local directory.\n @type path: string\n @param backup: indicate is a backup.\n @type path: bool\n ' self._export_entity(dist, path, True, True, backup)
-7,077,236,834,681,137,000
Exports a distribution to a local folder. @param dist: The distribution to export. @type dist: L{Distribution} @param path: Path to local directory. @type path: string @param backup: indicate is a backup. @type path: bool
comodit_client/api/exporter.py
export_distribution
geoco84/comodit-client
python
def export_distribution(self, dist, path, backup=False): '\n Exports a distribution to a local folder.\n\n @param dist: The distribution to export.\n @type dist: L{Distribution}\n @param path: Path to local directory.\n @type path: string\n @param backup: indicate is a backup.\n @type path: bool\n ' self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup=False): '\n Exports a platform to a local folder.\n\n @param plat: The platform to export.\n @type plat: L{Platform}\n @param path: Path to local directory.\n @type path: string\n @param backup: indicate is a backup.\n @type path: bool\n ' self._export_entity(plat, path, True, backup=backup)
5,090,413,098,656,828,000
Exports a platform to a local folder. @param plat: The platform to export. @type plat: L{Platform} @param path: Path to local directory. @type path: string @param backup: indicate is a backup. @type path: bool
comodit_client/api/exporter.py
export_platform
geoco84/comodit-client
python
def export_platform(self, plat, path, backup=False): '\n Exports a platform to a local folder.\n\n @param plat: The platform to export.\n @type plat: L{Platform}\n @param path: Path to local directory.\n @type path: string\n @param backup: indicate is a backup.\n @type path: bool\n ' self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path): '\n Exports an environment to a local folder. Hosts of the environment\n are exported also.\n\n @param env: The environment to export.\n @type env: L{Environment}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(env, path) hosts_folder = os.path.join(path, 'hosts') for host in env.hosts(): self.export_host(host, os.path.join(hosts_folder, host.name))
-7,664,905,090,788,252,000
Exports an environment to a local folder. Hosts of the environment are exported also. @param env: The environment to export. @type env: L{Environment} @param path: Path to local directory. @type path: string
comodit_client/api/exporter.py
export_environment
geoco84/comodit-client
python
def export_environment(self, env, path): '\n Exports an environment to a local folder. Hosts of the environment\n are exported also.\n\n @param env: The environment to export.\n @type env: L{Environment}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(env, path) hosts_folder = os.path.join(path, 'hosts') for host in env.hosts(): self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path): '\n Exports a job to a local folder.\n\n @param job: The job to export.\n @type job: L{Job}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(job, path)
-6,253,046,233,192,386,000
Exports a job to a local folder. @param job: The job to export. @type job: L{Job} @param path: Path to local directory. @type path: string
comodit_client/api/exporter.py
export_job
geoco84/comodit-client
python
def export_job(self, job, path): '\n Exports a job to a local folder.\n\n @param job: The job to export.\n @type job: L{Job}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(job, path)
def export_orchestration(self, orchestration, path): '\n Exports a orchestration to a local folder.\n\n @param job: The orchestration to export.\n @type orchestration: L{Orchestration}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(orchestration, path)
-5,400,655,714,325,233,000
Exports a orchestration to a local folder. @param job: The orchestration to export. @type orchestration: L{Orchestration} @param path: Path to local directory. @type path: string
comodit_client/api/exporter.py
export_orchestration
geoco84/comodit-client
python
def export_orchestration(self, orchestration, path): '\n Exports a orchestration to a local folder.\n\n @param job: The orchestration to export.\n @type orchestration: L{Orchestration}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(orchestration, path)
def export_notification(self, notification, path): '\n Exports a jobnotificationto a local folder.\n\n @param notification: The notification to export.\n @type notification: L{Notification}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(notification, path)
-3,760,152,571,254,754,000
Exports a jobnotificationto a local folder. @param notification: The notification to export. @type notification: L{Notification} @param path: Path to local directory. @type path: string
comodit_client/api/exporter.py
export_notification
geoco84/comodit-client
python
def export_notification(self, notification, path): '\n Exports a jobnotificationto a local folder.\n\n @param notification: The notification to export.\n @type notification: L{Notification}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(notification, path)
def export_host(self, host, path): '\n Exports a host to a local folder. Contexts and instance are exported\n also.\n\n @param host: The host to export.\n @type host: L{Host}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(host, path) if (host.state != Host.State.DEFINED): try: instance = host.get_instance() instance.dump_json(os.path.join(path, 'instance.json')) except PythonApiException: pass app_folder = os.path.join(path, 'applications') ensure(app_folder) for context in host.applications(): context.dump_json(os.path.join(app_folder, (context.application + '.json'))) try: host.get_platform().dump_json(os.path.join(path, 'platform.json')) except EntityNotFoundException: pass try: host.get_distribution().dump_json(os.path.join(path, 'distribution.json')) except EntityNotFoundException: pass
-7,571,313,200,186,343,000
Exports a host to a local folder. Contexts and instance are exported also. @param host: The host to export. @type host: L{Host} @param path: Path to local directory. @type path: string
comodit_client/api/exporter.py
export_host
geoco84/comodit-client
python
def export_host(self, host, path): '\n Exports a host to a local folder. Contexts and instance are exported\n also.\n\n @param host: The host to export.\n @type host: L{Host}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(host, path) if (host.state != Host.State.DEFINED): try: instance = host.get_instance() instance.dump_json(os.path.join(path, 'instance.json')) except PythonApiException: pass app_folder = os.path.join(path, 'applications') ensure(app_folder) for context in host.applications(): context.dump_json(os.path.join(app_folder, (context.application + '.json'))) try: host.get_platform().dump_json(os.path.join(path, 'platform.json')) except EntityNotFoundException: pass try: host.get_distribution().dump_json(os.path.join(path, 'distribution.json')) except EntityNotFoundException: pass
def export_organization(self, org, path): '\n Exports an organization to a local folder. Environments, applications,\n distributions and platforms are exported also.\n\n @param org: The organization to export.\n @type org: L{Organization}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(org, path) for app in org.applications(): self.export_application(app, os.path.join(path, 'applications', app.name)) for dist in org.distributions(): self.export_distribution(dist, os.path.join(path, 'distributions', dist.name)) for plat in org.platforms(): self.export_platform(plat, os.path.join(path, 'platforms', plat.name)) for job in org.jobs(): self.export_job(job, os.path.join(path, 'jobs', job.name)) for orch in org.orchestrations(): self.export_orchestration(orch, os.path.join(path, 'orchestrations', orch.name)) for env in org.environments(): self.export_environment(env, os.path.join(path, 'environments', env.name))
3,655,422,668,646,644,000
Exports an organization to a local folder. Environments, applications, distributions and platforms are exported also. @param org: The organization to export. @type org: L{Organization} @param path: Path to local directory. @type path: string
comodit_client/api/exporter.py
export_organization
geoco84/comodit-client
python
def export_organization(self, org, path): '\n Exports an organization to a local folder. Environments, applications,\n distributions and platforms are exported also.\n\n @param org: The organization to export.\n @type org: L{Organization}\n @param path: Path to local directory.\n @type path: string\n ' self._export_entity(org, path) for app in org.applications(): self.export_application(app, os.path.join(path, 'applications', app.name)) for dist in org.distributions(): self.export_distribution(dist, os.path.join(path, 'distributions', dist.name)) for plat in org.platforms(): self.export_platform(plat, os.path.join(path, 'platforms', plat.name)) for job in org.jobs(): self.export_job(job, os.path.join(path, 'jobs', job.name)) for orch in org.orchestrations(): self.export_orchestration(orch, os.path.join(path, 'orchestrations', orch.name)) for env in org.environments(): self.export_environment(env, os.path.join(path, 'environments', env.name))
def test_multiple_metavar_help(self, parser): '\n Help text for options with a metavar tuple should display help\n in the form "--preferences=value1 value2 value3" (#2004).\n ' group = parser.getgroup('general') group.addoption('--preferences', metavar=('value1', 'value2', 'value3'), nargs=3) group._addoption('-h', '--help', action='store_true', dest='help') parser.parse(['-h']) help = parser.optparser.format_help() assert ('--preferences=value1 value2 value3' in help)
2,200,379,206,769,270,800
Help text for options with a metavar tuple should display help in the form "--preferences=value1 value2 value3" (#2004).
tools/third_party/pytest/testing/test_parseopt.py
test_multiple_metavar_help
2-GARIK20/wpt
python
def test_multiple_metavar_help(self, parser): '\n Help text for options with a metavar tuple should display help\n in the form "--preferences=value1 value2 value3" (#2004).\n ' group = parser.getgroup('general') group.addoption('--preferences', metavar=('value1', 'value2', 'value3'), nargs=3) group._addoption('-h', '--help', action='store_true', dest='help') parser.parse(['-h']) help = parser.optparser.format_help() assert ('--preferences=value1 value2 value3' in help)
@classmethod def from_string(cls, permission): 'Create a FileSystemSasPermissions from a string.\n\n To specify read, write, or delete permissions you need only to\n include the first letter of the word in the string. E.g. For read and\n write permissions, you would provide a string "rw".\n\n :param str permission: The string which dictates the read, add, create,\n write, or delete permissions.\n :return: A FileSystemSasPermissions object\n :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions\n ' p_read = ('r' in permission) p_write = ('w' in permission) p_delete = ('d' in permission) p_list = ('l' in permission) p_move = ('m' in permission) p_execute = ('e' in permission) p_manage_ownership = ('o' in permission) p_manage_access_control = ('p' in permission) parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, manage_access_control=p_manage_access_control) return parsed
-2,302,456,270,963,383,800
Create a FileSystemSasPermissions from a string. To specify read, write, or delete permissions you need only to include the first letter of the word in the string. E.g. For read and write permissions, you would provide a string "rw". :param str permission: The string which dictates the read, add, create, write, or delete permissions. :return: A FileSystemSasPermissions object :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py
from_string
Co0olboi/azure-sdk-for-python
python
@classmethod def from_string(cls, permission): 'Create a FileSystemSasPermissions from a string.\n\n To specify read, write, or delete permissions you need only to\n include the first letter of the word in the string. E.g. For read and\n write permissions, you would provide a string "rw".\n\n :param str permission: The string which dictates the read, add, create,\n write, or delete permissions.\n :return: A FileSystemSasPermissions object\n :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions\n ' p_read = ('r' in permission) p_write = ('w' in permission) p_delete = ('d' in permission) p_list = ('l' in permission) p_move = ('m' in permission) p_execute = ('e' in permission) p_manage_ownership = ('o' in permission) p_manage_access_control = ('p' in permission) parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, manage_access_control=p_manage_access_control) return parsed
@classmethod def from_string(cls, permission): 'Create a DirectorySasPermissions from a string.\n\n To specify read, create, write, or delete permissions you need only to\n include the first letter of the word in the string. E.g. For read and\n write permissions, you would provide a string "rw".\n\n :param str permission: The string which dictates the read, add, create,\n write, or delete permissions.\n :return: A DirectorySasPermissions object\n :rtype: ~azure.storage.filedatalake.DirectorySasPermissions\n ' p_read = ('r' in permission) p_create = ('c' in permission) p_write = ('w' in permission) p_delete = ('d' in permission) p_list = ('l' in permission) p_move = ('m' in permission) p_execute = ('e' in permission) p_manage_ownership = ('o' in permission) p_manage_access_control = ('p' in permission) parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, manage_access_control=p_manage_access_control) return parsed
656,015,843,620,330,800
Create a DirectorySasPermissions from a string. To specify read, create, write, or delete permissions you need only to include the first letter of the word in the string. E.g. For read and write permissions, you would provide a string "rw". :param str permission: The string which dictates the read, add, create, write, or delete permissions. :return: A DirectorySasPermissions object :rtype: ~azure.storage.filedatalake.DirectorySasPermissions
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py
from_string
Co0olboi/azure-sdk-for-python
python
@classmethod def from_string(cls, permission): 'Create a DirectorySasPermissions from a string.\n\n To specify read, create, write, or delete permissions you need only to\n include the first letter of the word in the string. E.g. For read and\n write permissions, you would provide a string "rw".\n\n :param str permission: The string which dictates the read, add, create,\n write, or delete permissions.\n :return: A DirectorySasPermissions object\n :rtype: ~azure.storage.filedatalake.DirectorySasPermissions\n ' p_read = ('r' in permission) p_create = ('c' in permission) p_write = ('w' in permission) p_delete = ('d' in permission) p_list = ('l' in permission) p_move = ('m' in permission) p_execute = ('e' in permission) p_manage_ownership = ('o' in permission) p_manage_access_control = ('p' in permission) parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, manage_access_control=p_manage_access_control) return parsed
@classmethod def from_string(cls, permission): 'Create a FileSasPermissions from a string.\n\n To specify read, write, or delete permissions you need only to\n include the first letter of the word in the string. E.g. For read and\n write permissions, you would provide a string "rw".\n\n :param str permission: The string which dictates the read, add, create,\n write, or delete permissions.\n :return: A FileSasPermissions object\n :rtype: ~azure.storage.fildatalake.FileSasPermissions\n ' p_read = ('r' in permission) p_create = ('c' in permission) p_write = ('w' in permission) p_delete = ('d' in permission) p_move = ('m' in permission) p_execute = ('e' in permission) p_manage_ownership = ('o' in permission) p_manage_access_control = ('p' in permission) parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, manage_access_control=p_manage_access_control) return parsed
6,598,723,634,432,000,000
Create a FileSasPermissions from a string. To specify read, write, or delete permissions you need only to include the first letter of the word in the string. E.g. For read and write permissions, you would provide a string "rw". :param str permission: The string which dictates the read, add, create, write, or delete permissions. :return: A FileSasPermissions object :rtype: ~azure.storage.fildatalake.FileSasPermissions
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py
from_string
Co0olboi/azure-sdk-for-python
python
@classmethod def from_string(cls, permission): 'Create a FileSasPermissions from a string.\n\n To specify read, write, or delete permissions you need only to\n include the first letter of the word in the string. E.g. For read and\n write permissions, you would provide a string "rw".\n\n :param str permission: The string which dictates the read, add, create,\n write, or delete permissions.\n :return: A FileSasPermissions object\n :rtype: ~azure.storage.fildatalake.FileSasPermissions\n ' p_read = ('r' in permission) p_create = ('c' in permission) p_write = ('w' in permission) p_delete = ('d' in permission) p_move = ('m' in permission) p_execute = ('e' in permission) p_manage_ownership = ('o' in permission) p_manage_access_control = ('p' in permission) parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, manage_access_control=p_manage_access_control) return parsed
def _ensure_tf_install(): 'Attempt to import tensorflow, and ensure its version is sufficient.\n\n Raises:\n ImportError: if either tensorflow is not importable or its version is\n inadequate.\n ' try: import tensorflow as tf except ImportError: print('\n\nFailed to import TensorFlow. Please note that TensorFlow is not installed by default when you install TensorFlow Model Optimization. This is so that users can decide whether to install the GPU-enabled TensorFlow package. To use TensorFlow Model Optimization, please install the most recent version of TensorFlow, by following instructions at https://tensorflow.org/install.\n\n') raise import distutils.version required_tensorflow_version = '1.14.0' if (distutils.version.LooseVersion(tf.version.VERSION) < distutils.version.LooseVersion(required_tensorflow_version)): raise ImportError('This version of TensorFlow Model Optimization requires TensorFlow version >= {required}; Detected an installation of version {present}. Please upgrade TensorFlow to proceed.'.format(required=required_tensorflow_version, present=tf.__version__))
-1,946,075,856,956,076,000
Attempt to import tensorflow, and ensure its version is sufficient. Raises: ImportError: if either tensorflow is not importable or its version is inadequate.
tensorflow_model_optimization/__init__.py
_ensure_tf_install
13957166977/model-optimization
python
def _ensure_tf_install(): 'Attempt to import tensorflow, and ensure its version is sufficient.\n\n Raises:\n ImportError: if either tensorflow is not importable or its version is\n inadequate.\n ' try: import tensorflow as tf except ImportError: print('\n\nFailed to import TensorFlow. Please note that TensorFlow is not installed by default when you install TensorFlow Model Optimization. This is so that users can decide whether to install the GPU-enabled TensorFlow package. To use TensorFlow Model Optimization, please install the most recent version of TensorFlow, by following instructions at https://tensorflow.org/install.\n\n') raise import distutils.version required_tensorflow_version = '1.14.0' if (distutils.version.LooseVersion(tf.version.VERSION) < distutils.version.LooseVersion(required_tensorflow_version)): raise ImportError('This version of TensorFlow Model Optimization requires TensorFlow version >= {required}; Detected an installation of version {present}. Please upgrade TensorFlow to proceed.'.format(required=required_tensorflow_version, present=tf.__version__))
def array(data: Union[(Sequence[object], AnyArrayLike)], dtype: Optional[Dtype]=None, copy: bool=True) -> ExtensionArray: '\n Create an array.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n data : Sequence of objects\n The scalars inside `data` should be instances of the\n scalar type for `dtype`. It\'s expected that `data`\n represents a 1-dimensional array of data.\n\n When `data` is an Index or Series, the underlying array\n will be extracted from `data`.\n\n dtype : str, np.dtype, or ExtensionDtype, optional\n The dtype to use for the array. This may be a NumPy\n dtype or an extension type registered with pandas using\n :meth:`pandas.api.extensions.register_extension_dtype`.\n\n If not specified, there are two possibilities:\n\n 1. When `data` is a :class:`Series`, :class:`Index`, or\n :class:`ExtensionArray`, the `dtype` will be taken\n from the data.\n 2. Otherwise, pandas will attempt to infer the `dtype`\n from the data.\n\n Note that when `data` is a NumPy array, ``data.dtype`` is\n *not* used for inferring the array type. This is because\n NumPy cannot represent all the types of data that can be\n held in extension arrays.\n\n Currently, pandas will infer an extension dtype for sequences of\n\n ============================== =====================================\n Scalar Type Array Type\n ============================== =====================================\n :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`\n :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`\n :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`\n :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`\n :class:`int` :class:`pandas.arrays.IntegerArray`\n :class:`float` :class:`pandas.arrays.FloatingArray`\n :class:`str` :class:`pandas.arrays.StringArray`\n :class:`bool` :class:`pandas.arrays.BooleanArray`\n ============================== =====================================\n\n For all other cases, NumPy\'s usual inference rules will be used.\n\n .. versionchanged:: 1.0.0\n\n Pandas infers nullable-integer dtype for integer data,\n string dtype for string data, and nullable-boolean dtype\n for boolean data.\n\n .. versionchanged:: 1.2.0\n\n Pandas now also infers nullable-floating dtype for float-like\n input data\n\n copy : bool, default True\n Whether to copy the data, even if not necessary. Depending\n on the type of `data`, creating the new array may require\n copying data, even if ``copy=False``.\n\n Returns\n -------\n ExtensionArray\n The newly created array.\n\n Raises\n ------\n ValueError\n When `data` is not 1-dimensional.\n\n See Also\n --------\n numpy.array : Construct a NumPy array.\n Series : Construct a pandas Series.\n Index : Construct a pandas Index.\n arrays.PandasArray : ExtensionArray wrapping a NumPy array.\n Series.array : Extract the array stored within a Series.\n\n Notes\n -----\n Omitting the `dtype` argument means pandas will attempt to infer the\n best array type from the values in the data. As new array types are\n added by pandas and 3rd party libraries, the "best" array type may\n change. We recommend specifying `dtype` to ensure that\n\n 1. the correct array type for the data is returned\n 2. the returned array type doesn\'t change as new extension types\n are added by pandas and third-party libraries\n\n Additionally, if the underlying memory representation of the returned\n array matters, we recommend specifying the `dtype` as a concrete object\n rather than a string alias or allowing it to be inferred. For example,\n a future version of pandas or a 3rd-party library may include a\n dedicated ExtensionArray for string data. In this event, the following\n would no longer return a :class:`arrays.PandasArray` backed by a NumPy\n array.\n\n >>> pd.array([\'a\', \'b\'], dtype=str)\n <PandasArray>\n [\'a\', \'b\']\n Length: 2, dtype: str32\n\n This would instead return the new ExtensionArray dedicated for string\n data. If you really need the new array to be backed by a NumPy array,\n specify that in the dtype.\n\n >>> pd.array([\'a\', \'b\'], dtype=np.dtype("<U1"))\n <PandasArray>\n [\'a\', \'b\']\n Length: 2, dtype: str32\n\n Finally, Pandas has arrays that mostly overlap with NumPy\n\n * :class:`arrays.DatetimeArray`\n * :class:`arrays.TimedeltaArray`\n\n When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is\n passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``\n rather than a ``PandasArray``. This is for symmetry with the case of\n timezone-aware data, which NumPy does not natively support.\n\n >>> pd.array([\'2015\', \'2016\'], dtype=\'datetime64[ns]\')\n <DatetimeArray>\n [\'2015-01-01 00:00:00\', \'2016-01-01 00:00:00\']\n Length: 2, dtype: datetime64[ns]\n\n >>> pd.array(["1H", "2H"], dtype=\'timedelta64[ns]\')\n <TimedeltaArray>\n [\'0 days 01:00:00\', \'0 days 02:00:00\']\n Length: 2, dtype: timedelta64[ns]\n\n Examples\n --------\n If a dtype is not specified, pandas will infer the best dtype from the values.\n See the description of `dtype` for the types pandas infers for.\n\n >>> pd.array([1, 2])\n <IntegerArray>\n [1, 2]\n Length: 2, dtype: Int64\n\n >>> pd.array([1, 2, np.nan])\n <IntegerArray>\n [1, 2, <NA>]\n Length: 3, dtype: Int64\n\n >>> pd.array([1.1, 2.2])\n <FloatingArray>\n [1.1, 2.2]\n Length: 2, dtype: Float64\n\n >>> pd.array(["a", None, "c"])\n <StringArray>\n [\'a\', <NA>, \'c\']\n Length: 3, dtype: string\n\n >>> pd.array([pd.Period(\'2000\', freq="D"), pd.Period("2000", freq="D")])\n <PeriodArray>\n [\'2000-01-01\', \'2000-01-01\']\n Length: 2, dtype: period[D]\n\n You can use the string alias for `dtype`\n\n >>> pd.array([\'a\', \'b\', \'a\'], dtype=\'category\')\n [\'a\', \'b\', \'a\']\n Categories (2, object): [\'a\', \'b\']\n\n Or specify the actual dtype\n\n >>> pd.array([\'a\', \'b\', \'a\'],\n ... dtype=pd.CategoricalDtype([\'a\', \'b\', \'c\'], ordered=True))\n [\'a\', \'b\', \'a\']\n Categories (3, object): [\'a\' < \'b\' < \'c\']\n\n If pandas does not infer a dedicated extension type a\n :class:`arrays.PandasArray` is returned.\n\n >>> pd.array([1 + 1j, 3 + 2j])\n <PandasArray>\n [(1+1j), (3+2j)]\n Length: 2, dtype: complex128\n\n As mentioned in the "Notes" section, new extension types may be added\n in the future (by pandas or 3rd party libraries), causing the return\n value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`\n as a NumPy dtype if you need to ensure there\'s no future change in\n behavior.\n\n >>> pd.array([1, 2], dtype=np.dtype("int32"))\n <PandasArray>\n [1, 2]\n Length: 2, dtype: int32\n\n `data` must be 1-dimensional. A ValueError is raised when the input\n has the wrong dimensionality.\n\n >>> pd.array(1)\n Traceback (most recent call last):\n ...\n ValueError: Cannot pass scalar \'1\' to \'pandas.array\'.\n ' from pandas.core.arrays import BooleanArray, DatetimeArray, FloatingArray, IntegerArray, IntervalArray, PandasArray, StringArray, TimedeltaArray, period_array if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) if ((dtype is None) and isinstance(data, (ABCSeries, ABCIndexClass, ABCExtensionArray))): dtype = data.dtype data = extract_array(data, extract_numpy=True) if isinstance(dtype, str): dtype = (registry.find(dtype) or dtype) if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if (dtype is None): inferred_dtype = lib.infer_dtype(data, skipna=True) if (inferred_dtype == 'period'): try: return period_array(data, copy=copy) except IncompatibleFrequency: pass elif (inferred_dtype == 'interval'): try: return IntervalArray(data, copy=copy) except ValueError: pass elif inferred_dtype.startswith('datetime'): try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: pass elif inferred_dtype.startswith('timedelta'): return TimedeltaArray._from_sequence(data, copy=copy) elif (inferred_dtype == 'string'): return StringArray._from_sequence(data, copy=copy) elif (inferred_dtype == 'integer'): return IntegerArray._from_sequence(data, copy=copy) elif (inferred_dtype in ('floating', 'mixed-integer-float')): return FloatingArray._from_sequence(data, copy=copy) elif (inferred_dtype == 'boolean'): return BooleanArray._from_sequence(data, copy=copy) if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) result = PandasArray._from_sequence(data, dtype=dtype, copy=copy) return result
-2,947,146,966,493,969,400
Create an array. .. versionadded:: 0.24.0 Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ===================================== Scalar Type Array Type ============================== ===================================== :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ===================================== For all other cases, NumPy's usual inference rules will be used. .. versionchanged:: 1.0.0 Pandas infers nullable-integer dtype for integer data, string dtype for string data, and nullable-boolean dtype for boolean data. .. versionchanged:: 1.2.0 Pandas now also infers nullable-floating dtype for float-like input data copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.PandasArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.PandasArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``PandasArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array([1.1, 2.2]) <FloatingArray> [1.1, 2.2] Length: 2, dtype: Float64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') ['a', 'b', 'a'] Categories (2, object): ['a', 'b'] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) ['a', 'b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] If pandas does not infer a dedicated extension type a :class:`arrays.PandasArray` is returned. >>> pd.array([1 + 1j, 3 + 2j]) <PandasArray> [(1+1j), (3+2j)] Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <PandasArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'.
pandas/core/construction.py
array
BhavarthShah/pandas
python
def array(data: Union[(Sequence[object], AnyArrayLike)], dtype: Optional[Dtype]=None, copy: bool=True) -> ExtensionArray: '\n Create an array.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n data : Sequence of objects\n The scalars inside `data` should be instances of the\n scalar type for `dtype`. It\'s expected that `data`\n represents a 1-dimensional array of data.\n\n When `data` is an Index or Series, the underlying array\n will be extracted from `data`.\n\n dtype : str, np.dtype, or ExtensionDtype, optional\n The dtype to use for the array. This may be a NumPy\n dtype or an extension type registered with pandas using\n :meth:`pandas.api.extensions.register_extension_dtype`.\n\n If not specified, there are two possibilities:\n\n 1. When `data` is a :class:`Series`, :class:`Index`, or\n :class:`ExtensionArray`, the `dtype` will be taken\n from the data.\n 2. Otherwise, pandas will attempt to infer the `dtype`\n from the data.\n\n Note that when `data` is a NumPy array, ``data.dtype`` is\n *not* used for inferring the array type. This is because\n NumPy cannot represent all the types of data that can be\n held in extension arrays.\n\n Currently, pandas will infer an extension dtype for sequences of\n\n ============================== =====================================\n Scalar Type Array Type\n ============================== =====================================\n :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`\n :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`\n :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`\n :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`\n :class:`int` :class:`pandas.arrays.IntegerArray`\n :class:`float` :class:`pandas.arrays.FloatingArray`\n :class:`str` :class:`pandas.arrays.StringArray`\n :class:`bool` :class:`pandas.arrays.BooleanArray`\n ============================== =====================================\n\n For all other cases, NumPy\'s usual inference rules will be used.\n\n .. versionchanged:: 1.0.0\n\n Pandas infers nullable-integer dtype for integer data,\n string dtype for string data, and nullable-boolean dtype\n for boolean data.\n\n .. versionchanged:: 1.2.0\n\n Pandas now also infers nullable-floating dtype for float-like\n input data\n\n copy : bool, default True\n Whether to copy the data, even if not necessary. Depending\n on the type of `data`, creating the new array may require\n copying data, even if ``copy=False``.\n\n Returns\n -------\n ExtensionArray\n The newly created array.\n\n Raises\n ------\n ValueError\n When `data` is not 1-dimensional.\n\n See Also\n --------\n numpy.array : Construct a NumPy array.\n Series : Construct a pandas Series.\n Index : Construct a pandas Index.\n arrays.PandasArray : ExtensionArray wrapping a NumPy array.\n Series.array : Extract the array stored within a Series.\n\n Notes\n -----\n Omitting the `dtype` argument means pandas will attempt to infer the\n best array type from the values in the data. As new array types are\n added by pandas and 3rd party libraries, the "best" array type may\n change. We recommend specifying `dtype` to ensure that\n\n 1. the correct array type for the data is returned\n 2. the returned array type doesn\'t change as new extension types\n are added by pandas and third-party libraries\n\n Additionally, if the underlying memory representation of the returned\n array matters, we recommend specifying the `dtype` as a concrete object\n rather than a string alias or allowing it to be inferred. For example,\n a future version of pandas or a 3rd-party library may include a\n dedicated ExtensionArray for string data. In this event, the following\n would no longer return a :class:`arrays.PandasArray` backed by a NumPy\n array.\n\n >>> pd.array([\'a\', \'b\'], dtype=str)\n <PandasArray>\n [\'a\', \'b\']\n Length: 2, dtype: str32\n\n This would instead return the new ExtensionArray dedicated for string\n data. If you really need the new array to be backed by a NumPy array,\n specify that in the dtype.\n\n >>> pd.array([\'a\', \'b\'], dtype=np.dtype("<U1"))\n <PandasArray>\n [\'a\', \'b\']\n Length: 2, dtype: str32\n\n Finally, Pandas has arrays that mostly overlap with NumPy\n\n * :class:`arrays.DatetimeArray`\n * :class:`arrays.TimedeltaArray`\n\n When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is\n passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``\n rather than a ``PandasArray``. This is for symmetry with the case of\n timezone-aware data, which NumPy does not natively support.\n\n >>> pd.array([\'2015\', \'2016\'], dtype=\'datetime64[ns]\')\n <DatetimeArray>\n [\'2015-01-01 00:00:00\', \'2016-01-01 00:00:00\']\n Length: 2, dtype: datetime64[ns]\n\n >>> pd.array(["1H", "2H"], dtype=\'timedelta64[ns]\')\n <TimedeltaArray>\n [\'0 days 01:00:00\', \'0 days 02:00:00\']\n Length: 2, dtype: timedelta64[ns]\n\n Examples\n --------\n If a dtype is not specified, pandas will infer the best dtype from the values.\n See the description of `dtype` for the types pandas infers for.\n\n >>> pd.array([1, 2])\n <IntegerArray>\n [1, 2]\n Length: 2, dtype: Int64\n\n >>> pd.array([1, 2, np.nan])\n <IntegerArray>\n [1, 2, <NA>]\n Length: 3, dtype: Int64\n\n >>> pd.array([1.1, 2.2])\n <FloatingArray>\n [1.1, 2.2]\n Length: 2, dtype: Float64\n\n >>> pd.array(["a", None, "c"])\n <StringArray>\n [\'a\', <NA>, \'c\']\n Length: 3, dtype: string\n\n >>> pd.array([pd.Period(\'2000\', freq="D"), pd.Period("2000", freq="D")])\n <PeriodArray>\n [\'2000-01-01\', \'2000-01-01\']\n Length: 2, dtype: period[D]\n\n You can use the string alias for `dtype`\n\n >>> pd.array([\'a\', \'b\', \'a\'], dtype=\'category\')\n [\'a\', \'b\', \'a\']\n Categories (2, object): [\'a\', \'b\']\n\n Or specify the actual dtype\n\n >>> pd.array([\'a\', \'b\', \'a\'],\n ... dtype=pd.CategoricalDtype([\'a\', \'b\', \'c\'], ordered=True))\n [\'a\', \'b\', \'a\']\n Categories (3, object): [\'a\' < \'b\' < \'c\']\n\n If pandas does not infer a dedicated extension type a\n :class:`arrays.PandasArray` is returned.\n\n >>> pd.array([1 + 1j, 3 + 2j])\n <PandasArray>\n [(1+1j), (3+2j)]\n Length: 2, dtype: complex128\n\n As mentioned in the "Notes" section, new extension types may be added\n in the future (by pandas or 3rd party libraries), causing the return\n value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`\n as a NumPy dtype if you need to ensure there\'s no future change in\n behavior.\n\n >>> pd.array([1, 2], dtype=np.dtype("int32"))\n <PandasArray>\n [1, 2]\n Length: 2, dtype: int32\n\n `data` must be 1-dimensional. A ValueError is raised when the input\n has the wrong dimensionality.\n\n >>> pd.array(1)\n Traceback (most recent call last):\n ...\n ValueError: Cannot pass scalar \'1\' to \'pandas.array\'.\n ' from pandas.core.arrays import BooleanArray, DatetimeArray, FloatingArray, IntegerArray, IntervalArray, PandasArray, StringArray, TimedeltaArray, period_array if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) if ((dtype is None) and isinstance(data, (ABCSeries, ABCIndexClass, ABCExtensionArray))): dtype = data.dtype data = extract_array(data, extract_numpy=True) if isinstance(dtype, str): dtype = (registry.find(dtype) or dtype) if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if (dtype is None): inferred_dtype = lib.infer_dtype(data, skipna=True) if (inferred_dtype == 'period'): try: return period_array(data, copy=copy) except IncompatibleFrequency: pass elif (inferred_dtype == 'interval'): try: return IntervalArray(data, copy=copy) except ValueError: pass elif inferred_dtype.startswith('datetime'): try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: pass elif inferred_dtype.startswith('timedelta'): return TimedeltaArray._from_sequence(data, copy=copy) elif (inferred_dtype == 'string'): return StringArray._from_sequence(data, copy=copy) elif (inferred_dtype == 'integer'): return IntegerArray._from_sequence(data, copy=copy) elif (inferred_dtype in ('floating', 'mixed-integer-float')): return FloatingArray._from_sequence(data, copy=copy) elif (inferred_dtype == 'boolean'): return BooleanArray._from_sequence(data, copy=copy) if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) result = PandasArray._from_sequence(data, dtype=dtype, copy=copy) return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool=False) -> ArrayLike: "\n Extract the ndarray or ExtensionArray from a Series or Index.\n\n For all other types, `obj` is just returned as is.\n\n Parameters\n ----------\n obj : object\n For Series / Index, the underlying ExtensionArray is unboxed.\n For Numpy-backed ExtensionArrays, the ndarray is extracted.\n\n extract_numpy : bool, default False\n Whether to extract the ndarray from a PandasArray\n\n Returns\n -------\n arr : object\n\n Examples\n --------\n >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))\n ['a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n\n Other objects like lists, arrays, and DataFrames are just passed through.\n\n >>> extract_array([1, 2, 3])\n [1, 2, 3]\n\n For an ndarray-backed Series / Index a PandasArray is returned.\n\n >>> extract_array(pd.Series([1, 2, 3]))\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n To extract all the way down to the ndarray, pass ``extract_numpy=True``.\n\n >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)\n array([1, 2, 3])\n " if isinstance(obj, (ABCIndexClass, ABCSeries)): obj = obj.array if (extract_numpy and isinstance(obj, ABCPandasArray)): obj = obj.to_numpy() return obj
7,053,903,833,595,036,000
Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. For Numpy-backed ExtensionArrays, the ndarray is extracted. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index a PandasArray is returned. >>> extract_array(pd.Series([1, 2, 3])) <PandasArray> [1, 2, 3] Length: 3, dtype: int64 To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3])
pandas/core/construction.py
extract_array
BhavarthShah/pandas
python
def extract_array(obj: AnyArrayLike, extract_numpy: bool=False) -> ArrayLike: "\n Extract the ndarray or ExtensionArray from a Series or Index.\n\n For all other types, `obj` is just returned as is.\n\n Parameters\n ----------\n obj : object\n For Series / Index, the underlying ExtensionArray is unboxed.\n For Numpy-backed ExtensionArrays, the ndarray is extracted.\n\n extract_numpy : bool, default False\n Whether to extract the ndarray from a PandasArray\n\n Returns\n -------\n arr : object\n\n Examples\n --------\n >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))\n ['a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n\n Other objects like lists, arrays, and DataFrames are just passed through.\n\n >>> extract_array([1, 2, 3])\n [1, 2, 3]\n\n For an ndarray-backed Series / Index a PandasArray is returned.\n\n >>> extract_array(pd.Series([1, 2, 3]))\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n To extract all the way down to the ndarray, pass ``extract_numpy=True``.\n\n >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)\n array([1, 2, 3])\n " if isinstance(obj, (ABCIndexClass, ABCSeries)): obj = obj.array if (extract_numpy and isinstance(obj, ABCPandasArray)): obj = obj.to_numpy() return obj
def sanitize_array(data, index: Optional[Index], dtype: Optional[DtypeObj]=None, copy: bool=False, raise_cast_failure: bool=False) -> ArrayLike: '\n Sanitize input data to an ndarray or ExtensionArray, copy if specified,\n coerce to the dtype if specified.\n ' if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): (data, fill_value) = maybe_upcast(data, copy=True) data.soften_mask() data[mask] = fill_value else: data = data.copy() data = extract_array(data, extract_numpy=True) if isinstance(data, np.ndarray): if ((dtype is not None) and is_float_dtype(data.dtype) and is_integer_dtype(dtype)): try: subarr = _try_cast(data, dtype, copy, True) except ValueError: if copy: subarr = data.copy() else: subarr = np.array(data, copy=False) else: subarr = _try_cast(data, dtype, copy, raise_cast_failure) elif isinstance(data, ABCExtensionArray): subarr = data if (dtype is not None): subarr = subarr.astype(dtype, copy=copy) elif copy: subarr = subarr.copy() return subarr elif (isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and (len(data) > 0)): if isinstance(data, set): raise TypeError('Set type is unordered') data = list(data) if (dtype is not None): subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: subarr = maybe_convert_platform(data) subarr = maybe_cast_to_datetime(subarr, dtype) elif isinstance(data, range): arr = np.arange(data.start, data.stop, data.step, dtype='int64') subarr = _try_cast(arr, dtype, copy, raise_cast_failure) elif (lib.is_scalar(data) and (index is not None) and (dtype is not None)): data = maybe_cast_to_datetime(data, dtype) if (not lib.is_scalar(data)): data = data[0] subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype) else: subarr = _try_cast(data, dtype, copy, raise_cast_failure) if (getattr(subarr, 'ndim', 0) == 0): if isinstance(data, list): subarr = np.array(data, dtype=object) elif (index is not None): value = data if (dtype is None): (dtype, value) = infer_dtype_from_scalar(value, pandas_dtype=True) else: value = maybe_cast_to_datetime(value, dtype) subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype) else: return subarr.item() elif (subarr.ndim == 1): if (index is not None): if ((len(subarr) != len(index)) and (len(subarr) == 1)): subarr = construct_1d_arraylike_from_scalar(subarr[0], len(index), subarr.dtype) elif (subarr.ndim > 1): if isinstance(data, np.ndarray): raise ValueError('Data must be 1-dimensional') else: subarr = com.asarray_tuplesafe(data, dtype=dtype) if (not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype))): if issubclass(subarr.dtype.type, str): if (not lib.is_scalar(data)): if (not np.all(isna(data))): data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) is_object_or_str_dtype = (is_object_dtype(dtype) or is_string_dtype(dtype)) if (is_object_dtype(subarr.dtype) and (not is_object_or_str_dtype)): inferred = lib.infer_dtype(subarr, skipna=False) if (inferred in {'interval', 'period'}): subarr = array(subarr) return subarr
-8,508,708,360,423,698,000
Sanitize input data to an ndarray or ExtensionArray, copy if specified, coerce to the dtype if specified.
pandas/core/construction.py
sanitize_array
BhavarthShah/pandas
python
def sanitize_array(data, index: Optional[Index], dtype: Optional[DtypeObj]=None, copy: bool=False, raise_cast_failure: bool=False) -> ArrayLike: '\n Sanitize input data to an ndarray or ExtensionArray, copy if specified,\n coerce to the dtype if specified.\n ' if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): (data, fill_value) = maybe_upcast(data, copy=True) data.soften_mask() data[mask] = fill_value else: data = data.copy() data = extract_array(data, extract_numpy=True) if isinstance(data, np.ndarray): if ((dtype is not None) and is_float_dtype(data.dtype) and is_integer_dtype(dtype)): try: subarr = _try_cast(data, dtype, copy, True) except ValueError: if copy: subarr = data.copy() else: subarr = np.array(data, copy=False) else: subarr = _try_cast(data, dtype, copy, raise_cast_failure) elif isinstance(data, ABCExtensionArray): subarr = data if (dtype is not None): subarr = subarr.astype(dtype, copy=copy) elif copy: subarr = subarr.copy() return subarr elif (isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and (len(data) > 0)): if isinstance(data, set): raise TypeError('Set type is unordered') data = list(data) if (dtype is not None): subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: subarr = maybe_convert_platform(data) subarr = maybe_cast_to_datetime(subarr, dtype) elif isinstance(data, range): arr = np.arange(data.start, data.stop, data.step, dtype='int64') subarr = _try_cast(arr, dtype, copy, raise_cast_failure) elif (lib.is_scalar(data) and (index is not None) and (dtype is not None)): data = maybe_cast_to_datetime(data, dtype) if (not lib.is_scalar(data)): data = data[0] subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype) else: subarr = _try_cast(data, dtype, copy, raise_cast_failure) if (getattr(subarr, 'ndim', 0) == 0): if isinstance(data, list): subarr = np.array(data, dtype=object) elif (index is not None): value = data if (dtype is None): (dtype, value) = infer_dtype_from_scalar(value, pandas_dtype=True) else: value = maybe_cast_to_datetime(value, dtype) subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype) else: return subarr.item() elif (subarr.ndim == 1): if (index is not None): if ((len(subarr) != len(index)) and (len(subarr) == 1)): subarr = construct_1d_arraylike_from_scalar(subarr[0], len(index), subarr.dtype) elif (subarr.ndim > 1): if isinstance(data, np.ndarray): raise ValueError('Data must be 1-dimensional') else: subarr = com.asarray_tuplesafe(data, dtype=dtype) if (not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype))): if issubclass(subarr.dtype.type, str): if (not lib.is_scalar(data)): if (not np.all(isna(data))): data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) is_object_or_str_dtype = (is_object_dtype(dtype) or is_string_dtype(dtype)) if (is_object_dtype(subarr.dtype) and (not is_object_or_str_dtype)): inferred = lib.infer_dtype(subarr, skipna=False) if (inferred in {'interval', 'period'}): subarr = array(subarr) return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool): "\n Convert input to numpy ndarray and optionally cast to a given dtype.\n\n Parameters\n ----------\n arr : ndarray, scalar, list, tuple, iterator (catchall)\n Excludes: ExtensionArray, Series, Index.\n dtype : np.dtype, ExtensionDtype or None\n copy : bool\n If False, don't copy the data if not needed.\n raise_cast_failure : bool\n If True, and if a dtype is specified, raise errors during casting.\n Otherwise an object array is returned.\n " if isinstance(arr, np.ndarray): if (maybe_castable(arr) and (not copy) and (dtype is None)): return arr if (isinstance(dtype, ExtensionDtype) and ((dtype.kind != 'M') or is_sparse(dtype))): array_type = dtype.construct_array_type()._from_sequence subarr = array_type(arr, dtype=dtype, copy=copy) return subarr try: if is_integer_dtype(dtype): maybe_cast_to_integer_array(arr, dtype) subarr = arr else: subarr = maybe_cast_to_datetime(arr, dtype) if (is_object_dtype(dtype) and (is_list_like(subarr) and (not (is_iterator(subarr) or isinstance(subarr, np.ndarray))))): subarr = construct_1d_object_array_from_listlike(subarr) elif (not is_extension_array_dtype(subarr)): subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) except OutOfBoundsDatetime: raise except (ValueError, TypeError): if ((dtype is not None) and raise_cast_failure): raise else: subarr = np.array(arr, dtype=object, copy=copy) return subarr
2,792,998,909,573,878,300
Convert input to numpy ndarray and optionally cast to a given dtype. Parameters ---------- arr : ndarray, scalar, list, tuple, iterator (catchall) Excludes: ExtensionArray, Series, Index. dtype : np.dtype, ExtensionDtype or None copy : bool If False, don't copy the data if not needed. raise_cast_failure : bool If True, and if a dtype is specified, raise errors during casting. Otherwise an object array is returned.
pandas/core/construction.py
_try_cast
BhavarthShah/pandas
python
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool): "\n Convert input to numpy ndarray and optionally cast to a given dtype.\n\n Parameters\n ----------\n arr : ndarray, scalar, list, tuple, iterator (catchall)\n Excludes: ExtensionArray, Series, Index.\n dtype : np.dtype, ExtensionDtype or None\n copy : bool\n If False, don't copy the data if not needed.\n raise_cast_failure : bool\n If True, and if a dtype is specified, raise errors during casting.\n Otherwise an object array is returned.\n " if isinstance(arr, np.ndarray): if (maybe_castable(arr) and (not copy) and (dtype is None)): return arr if (isinstance(dtype, ExtensionDtype) and ((dtype.kind != 'M') or is_sparse(dtype))): array_type = dtype.construct_array_type()._from_sequence subarr = array_type(arr, dtype=dtype, copy=copy) return subarr try: if is_integer_dtype(dtype): maybe_cast_to_integer_array(arr, dtype) subarr = arr else: subarr = maybe_cast_to_datetime(arr, dtype) if (is_object_dtype(dtype) and (is_list_like(subarr) and (not (is_iterator(subarr) or isinstance(subarr, np.ndarray))))): subarr = construct_1d_object_array_from_listlike(subarr) elif (not is_extension_array_dtype(subarr)): subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) except OutOfBoundsDatetime: raise except (ValueError, TypeError): if ((dtype is not None) and raise_cast_failure): raise else: subarr = np.array(arr, dtype=object, copy=copy) return subarr
def is_empty_data(data: Any) -> bool: '\n Utility to check if a Series is instantiated with empty data,\n which does not contain dtype information.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series.\n\n Returns\n -------\n bool\n ' is_none = (data is None) is_list_like_without_dtype = (is_list_like(data) and (not hasattr(data, 'dtype'))) is_simple_empty = (is_list_like_without_dtype and (not data)) return (is_none or is_simple_empty)
6,240,236,482,168,920,000
Utility to check if a Series is instantiated with empty data, which does not contain dtype information. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. Returns ------- bool
pandas/core/construction.py
is_empty_data
BhavarthShah/pandas
python
def is_empty_data(data: Any) -> bool: '\n Utility to check if a Series is instantiated with empty data,\n which does not contain dtype information.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series.\n\n Returns\n -------\n bool\n ' is_none = (data is None) is_list_like_without_dtype = (is_list_like(data) and (not hasattr(data, 'dtype'))) is_simple_empty = (is_list_like_without_dtype and (not data)) return (is_none or is_simple_empty)
def create_series_with_explicit_dtype(data: Any=None, index: Optional[Union[(ArrayLike, Index)]]=None, dtype: Optional[Dtype]=None, name: Optional[str]=None, copy: bool=False, fastpath: bool=False, dtype_if_empty: Dtype=object) -> Series: '\n Helper to pass an explicit dtype when instantiating an empty Series.\n\n This silences a DeprecationWarning described in GitHub-17261.\n\n Parameters\n ----------\n data : Mirrored from Series.__init__\n index : Mirrored from Series.__init__\n dtype : Mirrored from Series.__init__\n name : Mirrored from Series.__init__\n copy : Mirrored from Series.__init__\n fastpath : Mirrored from Series.__init__\n dtype_if_empty : str, numpy.dtype, or ExtensionDtype\n This dtype will be passed explicitly if an empty Series will\n be instantiated.\n\n Returns\n -------\n Series\n ' from pandas.core.series import Series if (is_empty_data(data) and (dtype is None)): dtype = dtype_if_empty return Series(data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
6,999,774,149,489,639,000
Helper to pass an explicit dtype when instantiating an empty Series. This silences a DeprecationWarning described in GitHub-17261. Parameters ---------- data : Mirrored from Series.__init__ index : Mirrored from Series.__init__ dtype : Mirrored from Series.__init__ name : Mirrored from Series.__init__ copy : Mirrored from Series.__init__ fastpath : Mirrored from Series.__init__ dtype_if_empty : str, numpy.dtype, or ExtensionDtype This dtype will be passed explicitly if an empty Series will be instantiated. Returns ------- Series
pandas/core/construction.py
create_series_with_explicit_dtype
BhavarthShah/pandas
python
def create_series_with_explicit_dtype(data: Any=None, index: Optional[Union[(ArrayLike, Index)]]=None, dtype: Optional[Dtype]=None, name: Optional[str]=None, copy: bool=False, fastpath: bool=False, dtype_if_empty: Dtype=object) -> Series: '\n Helper to pass an explicit dtype when instantiating an empty Series.\n\n This silences a DeprecationWarning described in GitHub-17261.\n\n Parameters\n ----------\n data : Mirrored from Series.__init__\n index : Mirrored from Series.__init__\n dtype : Mirrored from Series.__init__\n name : Mirrored from Series.__init__\n copy : Mirrored from Series.__init__\n fastpath : Mirrored from Series.__init__\n dtype_if_empty : str, numpy.dtype, or ExtensionDtype\n This dtype will be passed explicitly if an empty Series will\n be instantiated.\n\n Returns\n -------\n Series\n ' from pandas.core.series import Series if (is_empty_data(data) and (dtype is None)): dtype = dtype_if_empty return Series(data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
def cli_endpoint(fn): '\n Decorator for command line endpoints that execute dags or tasks. It runs\n the decorated function, captures exception (if any), sends a colored\n traceback to standard error and exits with code 1.\n\n Notes\n -----\n Functions decorated with this must be called with keyword arguments\n\n Call some_endpoint(catch_exception=False) to disable this behavior (e.g.\n for testing)\n ' @wraps(fn) def wrapper(catch_exception=True, **kwargs): if catch_exception: try: fn(**kwargs) except (DAGBuildError, DAGRenderError): error = traceback.format_exc() color = False except Exception: error = traceback.format_exc() color = True else: error = None if error: if color: tw = TerminalWriter(file=sys.stderr) tw._write_source(error.splitlines()) else: print(error, file=sys.stderr) sys.exit(1) else: fn(**kwargs) return wrapper
-2,612,740,748,282,923,500
Decorator for command line endpoints that execute dags or tasks. It runs the decorated function, captures exception (if any), sends a colored traceback to standard error and exits with code 1. Notes ----- Functions decorated with this must be called with keyword arguments Call some_endpoint(catch_exception=False) to disable this behavior (e.g. for testing)
src/ploomber/cli/io.py
cli_endpoint
abhishak3/ploomber
python
def cli_endpoint(fn): '\n Decorator for command line endpoints that execute dags or tasks. It runs\n the decorated function, captures exception (if any), sends a colored\n traceback to standard error and exits with code 1.\n\n Notes\n -----\n Functions decorated with this must be called with keyword arguments\n\n Call some_endpoint(catch_exception=False) to disable this behavior (e.g.\n for testing)\n ' @wraps(fn) def wrapper(catch_exception=True, **kwargs): if catch_exception: try: fn(**kwargs) except (DAGBuildError, DAGRenderError): error = traceback.format_exc() color = False except Exception: error = traceback.format_exc() color = True else: error = None if error: if color: tw = TerminalWriter(file=sys.stderr) tw._write_source(error.splitlines()) else: print(error, file=sys.stderr) sys.exit(1) else: fn(**kwargs) return wrapper
def command_endpoint(fn): '\n Decorator for command line endpoints that only parse dags or tasks but do\n not execute them. If it tails, it prints error message to stderror, then\n calls with exit code 1.\n ' @wraps(fn) def wrapper(**kwargs): try: fn(**kwargs) except Exception as e: print(f'Error: {e}', file=sys.stderr) sys.exit(1) return wrapper
-1,015,329,598,697,073,800
Decorator for command line endpoints that only parse dags or tasks but do not execute them. If it tails, it prints error message to stderror, then calls with exit code 1.
src/ploomber/cli/io.py
command_endpoint
abhishak3/ploomber
python
def command_endpoint(fn): '\n Decorator for command line endpoints that only parse dags or tasks but do\n not execute them. If it tails, it prints error message to stderror, then\n calls with exit code 1.\n ' @wraps(fn) def wrapper(**kwargs): try: fn(**kwargs) except Exception as e: print(f'Error: {e}', file=sys.stderr) sys.exit(1) return wrapper
def assert_flash(self, text): 'asserts that message exists in flashes' for flash_dom in self.find_elements('.flash'): if (flash_dom.text == text): return print(flash_dom.text) raise AssertionError(f'Flash not found for text "{text}"')
-740,493,910,525,271,300
asserts that message exists in flashes
qa327_test/frontend/geek_base.py
assert_flash
nicoleooi/cmpe327
python
def assert_flash(self, text): for flash_dom in self.find_elements('.flash'): if (flash_dom.text == text): return print(flash_dom.text) raise AssertionError(f'Flash not found for text "{text}"')
def login_test_user(self, email=TEST_USER.email, password='test_frontend'): 'login our test user' self.open((base_url + '/login')) self.input('#email', email) self.input('#password', password) self.click('#btn-submit')
214,767,214,298,577,860
login our test user
qa327_test/frontend/geek_base.py
login_test_user
nicoleooi/cmpe327
python
def login_test_user(self, email=TEST_USER.email, password='test_frontend'): self.open((base_url + '/login')) self.input('#email', email) self.input('#password', password) self.click('#btn-submit')
def reject_outliers(y, x=None, m=2.0, replaceNaN=True): ' Reject outliers:\n If replaceNaN is true: they are replaced by NaN \n Otherwise they are removed\n ' if (m == 0): pass else: dd = np.abs((y - np.nanmedian(y))) mdev = np.nanmedian(dd) if mdev: ss = (dd / mdev) b = (ss < m) if replaceNaN: y = y.copy() y[(~ b)] = np.nan else: y = y[b] if (x is not None): x = x[b] if (x is None): return y else: return (x, y)
2,903,634,235,489,495,000
Reject outliers: If replaceNaN is true: they are replaced by NaN Otherwise they are removed
pydatview/tools/signal.py
reject_outliers
cdrtm/pyDatView
python
def reject_outliers(y, x=None, m=2.0, replaceNaN=True): ' Reject outliers:\n If replaceNaN is true: they are replaced by NaN \n Otherwise they are removed\n ' if (m == 0): pass else: dd = np.abs((y - np.nanmedian(y))) mdev = np.nanmedian(dd) if mdev: ss = (dd / mdev) b = (ss < m) if replaceNaN: y = y.copy() y[(~ b)] = np.nan else: y = y[b] if (x is not None): x = x[b] if (x is None): return y else: return (x, y)
def moving_average(a, n=3): ' \n perform moving average, return a vector of same length as input\n\n NOTE: also in kalman.filters\n ' a = a.ravel() a = np.concatenate((([a[0]] * (n - 1)), a)) ret = np.cumsum(a, dtype=float) ret[n:] = (ret[n:] - ret[:(- n)]) ret = (ret[(n - 1):] / n) return ret
-1,125,062,380,859,889,400
perform moving average, return a vector of same length as input NOTE: also in kalman.filters
pydatview/tools/signal.py
moving_average
cdrtm/pyDatView
python
def moving_average(a, n=3): ' \n perform moving average, return a vector of same length as input\n\n NOTE: also in kalman.filters\n ' a = a.ravel() a = np.concatenate((([a[0]] * (n - 1)), a)) ret = np.cumsum(a, dtype=float) ret[n:] = (ret[n:] - ret[:(- n)]) ret = (ret[(n - 1):] / n) return ret
def lowpass1(y, dt, fc=3): ' \n 1st order low pass filter\n ' tau = (1 / ((2 * np.pi) * fc)) alpha = (dt / (tau + dt)) y_filt = np.zeros(y.shape) y_filt[0] = y[0] for i in np.arange(1, len(y)): y_filt[i] = ((alpha * y[i]) + ((1 - alpha) * y_filt[(i - 1)])) return y_filt
7,886,073,476,410,002,000
1st order low pass filter
pydatview/tools/signal.py
lowpass1
cdrtm/pyDatView
python
def lowpass1(y, dt, fc=3): ' \n \n ' tau = (1 / ((2 * np.pi) * fc)) alpha = (dt / (tau + dt)) y_filt = np.zeros(y.shape) y_filt[0] = y[0] for i in np.arange(1, len(y)): y_filt[i] = ((alpha * y[i]) + ((1 - alpha) * y_filt[(i - 1)])) return y_filt
def highpass1(y, dt, fc=3): ' \n 1st order high pass filter\n ' tau = (1 / ((2 * np.pi) * fc)) alpha = (tau / (tau + dt)) y_filt = np.zeros(y.shape) y_filt[0] = 0 for i in np.arange(1, len(y)): y_filt[i] = ((alpha * y_filt[(i - 1)]) + (alpha * (y[i] - y[(i - 1)]))) m0 = np.mean(y) m1 = np.mean(y_filt) y_filt += (m0 - m1) return y_filt
-2,147,949,212,982,904,600
1st order high pass filter
pydatview/tools/signal.py
highpass1
cdrtm/pyDatView
python
def highpass1(y, dt, fc=3): ' \n \n ' tau = (1 / ((2 * np.pi) * fc)) alpha = (tau / (tau + dt)) y_filt = np.zeros(y.shape) y_filt[0] = 0 for i in np.arange(1, len(y)): y_filt[i] = ((alpha * y_filt[(i - 1)]) + (alpha * (y[i] - y[(i - 1)]))) m0 = np.mean(y) m1 = np.mean(y_filt) y_filt += (m0 - m1) return y_filt
def zero_crossings(y, x=None, direction=None): "\n Find zero-crossing points in a discrete vector, using linear interpolation.\n\n direction: 'up' or 'down', to select only up-crossings or down-crossings\n\n returns: \n x values xzc such that y(yzc)==0\n indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)\n\n if direction is not provided, also returns:\n sign, equal to 1 for up crossing\n " if (x is None): x = np.arange(len(y)) if np.any(((x[1:] - x[0:(- 1)]) <= 0.0)): raise Exception('x values need to be in ascending order') iBef = np.where(((y[1:] * y[0:(- 1)]) < 0.0))[0] xzc = (x[iBef] - ((y[iBef] * (x[(iBef + 1)] - x[iBef])) / (y[(iBef + 1)] - y[iBef]))) iZero = np.where((y == 0.0))[0] iZero = iZero[np.where(((iZero > 0) & (iZero < (x.size - 1))))] iZero = iZero[np.where(((y[(iZero - 1)] * y[(iZero + 1)]) < 0.0))] xzc = np.concatenate((xzc, x[iZero])) iBef = np.concatenate((iBef, iZero)) iSort = np.argsort(xzc) (xzc, iBef) = (xzc[iSort], iBef[iSort]) sign = np.sign((y[(iBef + 1)] - y[iBef])) if (direction == 'up'): I = np.where((sign == 1))[0] return (xzc[I], iBef[I]) elif (direction == 'down'): I = np.where((sign == (- 1)))[0] return (xzc[I], iBef[I]) elif (direction is not None): raise Exception('Direction should be either `up` or `down`') return (xzc, iBef, sign)
4,096,691,605,760,065,000
Find zero-crossing points in a discrete vector, using linear interpolation. direction: 'up' or 'down', to select only up-crossings or down-crossings returns: x values xzc such that y(yzc)==0 indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included) if direction is not provided, also returns: sign, equal to 1 for up crossing
pydatview/tools/signal.py
zero_crossings
cdrtm/pyDatView
python
def zero_crossings(y, x=None, direction=None): "\n Find zero-crossing points in a discrete vector, using linear interpolation.\n\n direction: 'up' or 'down', to select only up-crossings or down-crossings\n\n returns: \n x values xzc such that y(yzc)==0\n indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)\n\n if direction is not provided, also returns:\n sign, equal to 1 for up crossing\n " if (x is None): x = np.arange(len(y)) if np.any(((x[1:] - x[0:(- 1)]) <= 0.0)): raise Exception('x values need to be in ascending order') iBef = np.where(((y[1:] * y[0:(- 1)]) < 0.0))[0] xzc = (x[iBef] - ((y[iBef] * (x[(iBef + 1)] - x[iBef])) / (y[(iBef + 1)] - y[iBef]))) iZero = np.where((y == 0.0))[0] iZero = iZero[np.where(((iZero > 0) & (iZero < (x.size - 1))))] iZero = iZero[np.where(((y[(iZero - 1)] * y[(iZero + 1)]) < 0.0))] xzc = np.concatenate((xzc, x[iZero])) iBef = np.concatenate((iBef, iZero)) iSort = np.argsort(xzc) (xzc, iBef) = (xzc[iSort], iBef[iSort]) sign = np.sign((y[(iBef + 1)] - y[iBef])) if (direction == 'up'): I = np.where((sign == 1))[0] return (xzc[I], iBef[I]) elif (direction == 'down'): I = np.where((sign == (- 1)))[0] return (xzc[I], iBef[I]) elif (direction is not None): raise Exception('Direction should be either `up` or `down`') return (xzc, iBef, sign)
def correlation(x, nMax=80, dt=1, method='manual'): ' \n Compute auto correlation of a signal\n ' nvec = np.arange(0, nMax) sigma2 = np.var(x) R = np.zeros(nMax) R[0] = 1 for (i, nDelay) in enumerate(nvec[1:]): R[(i + 1)] = (np.mean((x[0:(- nDelay)] * x[nDelay:])) / sigma2) tau = (nvec * dt) return (R, tau)
-753,259,868,257,545,100
Compute auto correlation of a signal
pydatview/tools/signal.py
correlation
cdrtm/pyDatView
python
def correlation(x, nMax=80, dt=1, method='manual'): ' \n \n ' nvec = np.arange(0, nMax) sigma2 = np.var(x) R = np.zeros(nMax) R[0] = 1 for (i, nDelay) in enumerate(nvec[1:]): R[(i + 1)] = (np.mean((x[0:(- nDelay)] * x[nDelay:])) / sigma2) tau = (nvec * dt) return (R, tau)
def correlated_signal(coeff, n=1000): '\n Create a correlated random signal of length `n` based on the correlation coefficient `coeff`\n value[t] = coeff * value[t-1] + (1-coeff) * random\n ' if ((coeff < 0) or (coeff > 1)): raise Exception('Correlation coefficient should be between 0 and 1') x = np.zeros(n) rvec = rand(n) x[0] = rvec[0] for m in np.arange(1, n): x[m] = ((coeff * x[(m - 1)]) + ((1 - coeff) * rvec[m])) x -= np.mean(x) return x
734,792,984,720,503,700
Create a correlated random signal of length `n` based on the correlation coefficient `coeff` value[t] = coeff * value[t-1] + (1-coeff) * random
pydatview/tools/signal.py
correlated_signal
cdrtm/pyDatView
python
def correlated_signal(coeff, n=1000): '\n Create a correlated random signal of length `n` based on the correlation coefficient `coeff`\n value[t] = coeff * value[t-1] + (1-coeff) * random\n ' if ((coeff < 0) or (coeff > 1)): raise Exception('Correlation coefficient should be between 0 and 1') x = np.zeros(n) rvec = rand(n) x[0] = rvec[0] for m in np.arange(1, n): x[m] = ((coeff * x[(m - 1)]) + ((1 - coeff) * rvec[m])) x -= np.mean(x) return x
def build_colormap2label(): 'Build a RGB color to label mapping for segmentation.' colormap2label = np.zeros((256 ** 3)) for (i, colormap) in enumerate(VOC_COLORMAP): colormap2label[((((colormap[0] * 256) + colormap[1]) * 256) + colormap[2])] = i return colormap2label
2,136,949,172,295,368,700
Build a RGB color to label mapping for segmentation.
tools/convet_voc2coco/voc2coco.py
build_colormap2label
yhpengtu/CenterIMask
python
def build_colormap2label(): colormap2label = np.zeros((256 ** 3)) for (i, colormap) in enumerate(VOC_COLORMAP): colormap2label[((((colormap[0] * 256) + colormap[1]) * 256) + colormap[2])] = i return colormap2label
def voc_label_indices(colormap, colormap2label): 'Map a RGB color to a label.' colormap = colormap.astype('int32') idx = ((((colormap[:, :, 0] * 256) + colormap[:, :, 1]) * 256) + colormap[:, :, 2]) return colormap2label[idx]
-3,168,059,356,542,787,600
Map a RGB color to a label.
tools/convet_voc2coco/voc2coco.py
voc_label_indices
yhpengtu/CenterIMask
python
def voc_label_indices(colormap, colormap2label): colormap = colormap.astype('int32') idx = ((((colormap[:, :, 0] * 256) + colormap[:, :, 1]) * 256) + colormap[:, :, 2]) return colormap2label[idx]
def inference(model, dataset, limit): 'Run detection on images in the given directory.' if (not os.path.exists(RESULTS_DIR)): os.makedirs(RESULTS_DIR) time_dir = '{:%Y%m%dT%H%M%S}'.format(datetime.datetime.now()) time_dir = os.path.join(RESULTS_DIR, time_dir) os.makedirs(time_dir) for image_id in dataset.image_ids[:limit]: image = dataset.load_image(image_id) r = model.detect([image], verbose=0)[0] source_id = dataset.image_info[image_id]['id'] if (len(r['class_ids']) > 0): print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids']))) visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, r['scores'], show_bbox=True, show_mask=True, title='Predictions') plt.savefig('{}/{}'.format(time_dir, dataset.image_info[image_id]['id'])) plt.close() else: plt.imshow(image) plt.savefig('{}/noinstance_{}'.format(time_dir, dataset.image_info[image_id]['id'])) print('[*] {}th image have no instance.'.format(image_id)) plt.close()
-6,147,800,993,385,219,000
Run detection on images in the given directory.
tools/convet_voc2coco/voc2coco.py
inference
yhpengtu/CenterIMask
python
def inference(model, dataset, limit): if (not os.path.exists(RESULTS_DIR)): os.makedirs(RESULTS_DIR) time_dir = '{:%Y%m%dT%H%M%S}'.format(datetime.datetime.now()) time_dir = os.path.join(RESULTS_DIR, time_dir) os.makedirs(time_dir) for image_id in dataset.image_ids[:limit]: image = dataset.load_image(image_id) r = model.detect([image], verbose=0)[0] source_id = dataset.image_info[image_id]['id'] if (len(r['class_ids']) > 0): print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids']))) visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, r['scores'], show_bbox=True, show_mask=True, title='Predictions') plt.savefig('{}/{}'.format(time_dir, dataset.image_info[image_id]['id'])) plt.close() else: plt.imshow(image) plt.savefig('{}/noinstance_{}'.format(time_dir, dataset.image_info[image_id]['id'])) print('[*] {}th image have no instance.'.format(image_id)) plt.close()
def load_voc(self, dataset_dir, trainval, year='2012'): "Load a voc_year of the VOC dataset.\n dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit'\n trainval: 'train' or 'val' for Training or Validation\n year: '2007' or '2012' for VOC dataset\n " voc_year = ('VOC' + year) Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation') JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages') Annotations = os.path.join(dataset_dir, voc_year, 'Annotations') SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass') SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject') for (idx, class_name) in enumerate(VOC_CLASSES[1:]): self.add_class('voc', (idx + 1), class_name) assert (trainval in ['train', 'val']) annotation_file = os.path.join(Segmentation, (trainval + '.txt')) image_ids = [] with open(annotation_file) as f: image_id_list = [line.strip() for line in f] image_ids += image_id_list for image_id in image_ids: image_file_name = '{}.jpg'.format(image_id) mask_file_name = '{}.png'.format(image_id) xml_file_name = '{}.xml'.format(image_id) image_path = os.path.join(JPEGImages, image_file_name) with open(os.path.join(Annotations, xml_file_name)) as f: soup = bs(f, 'lxml') objects = soup.find_all('object') image_contains_class_flag = False for obj in objects: class_name = obj.find('name').text if (class_name in VOC_CLASSES): image_contains_class_flag = True continue if image_contains_class_flag: class_mask_path = os.path.join(SegmentationClass, mask_file_name) object_mask_path = os.path.join(SegmentationObject, mask_file_name) self.add_image('voc', image_id=image_file_name, path=image_path, class_mask_path=class_mask_path, object_mask_path=object_mask_path)
-2,206,683,044,585,229,000
Load a voc_year of the VOC dataset. dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit' trainval: 'train' or 'val' for Training or Validation year: '2007' or '2012' for VOC dataset
tools/convet_voc2coco/voc2coco.py
load_voc
yhpengtu/CenterIMask
python
def load_voc(self, dataset_dir, trainval, year='2012'): "Load a voc_year of the VOC dataset.\n dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit'\n trainval: 'train' or 'val' for Training or Validation\n year: '2007' or '2012' for VOC dataset\n " voc_year = ('VOC' + year) Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation') JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages') Annotations = os.path.join(dataset_dir, voc_year, 'Annotations') SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass') SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject') for (idx, class_name) in enumerate(VOC_CLASSES[1:]): self.add_class('voc', (idx + 1), class_name) assert (trainval in ['train', 'val']) annotation_file = os.path.join(Segmentation, (trainval + '.txt')) image_ids = [] with open(annotation_file) as f: image_id_list = [line.strip() for line in f] image_ids += image_id_list for image_id in image_ids: image_file_name = '{}.jpg'.format(image_id) mask_file_name = '{}.png'.format(image_id) xml_file_name = '{}.xml'.format(image_id) image_path = os.path.join(JPEGImages, image_file_name) with open(os.path.join(Annotations, xml_file_name)) as f: soup = bs(f, 'lxml') objects = soup.find_all('object') image_contains_class_flag = False for obj in objects: class_name = obj.find('name').text if (class_name in VOC_CLASSES): image_contains_class_flag = True continue if image_contains_class_flag: class_mask_path = os.path.join(SegmentationClass, mask_file_name) object_mask_path = os.path.join(SegmentationObject, mask_file_name) self.add_image('voc', image_id=image_file_name, path=image_path, class_mask_path=class_mask_path, object_mask_path=object_mask_path)
def load_raw_mask(self, image_id, class_or_object): "load two kinds of mask of VOC dataset.\n image_id: id of mask\n class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject\n Returns:\n image: numpy of mask image.\n " assert (class_or_object in ['class_mask', 'object_mask']) image = skimage.io.imread(self.image_info[image_id][(class_or_object + '_path')]) if (image.ndim != 3): image = skimage.color.gray2rgb(image) if (image.shape[(- 1)] == 4): image = image[..., :3] return image
2,401,107,649,179,283,500
load two kinds of mask of VOC dataset. image_id: id of mask class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject Returns: image: numpy of mask image.
tools/convet_voc2coco/voc2coco.py
load_raw_mask
yhpengtu/CenterIMask
python
def load_raw_mask(self, image_id, class_or_object): "load two kinds of mask of VOC dataset.\n image_id: id of mask\n class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject\n Returns:\n image: numpy of mask image.\n " assert (class_or_object in ['class_mask', 'object_mask']) image = skimage.io.imread(self.image_info[image_id][(class_or_object + '_path')]) if (image.ndim != 3): image = skimage.color.gray2rgb(image) if (image.shape[(- 1)] == 4): image = image[..., :3] return image
def load_class_label(self, image_id): "Mapping SegmentationClass image's color to indice of ground truth \n image_id: id of mask\n Return:\n class_label: [height, width] matrix contains values form 0 to 20\n " raw_mask = self.load_raw_mask(image_id, 'class_mask') class_label = voc_label_indices(raw_mask, build_colormap2label()) return class_label
8,214,092,490,726,870,000
Mapping SegmentationClass image's color to indice of ground truth image_id: id of mask Return: class_label: [height, width] matrix contains values form 0 to 20
tools/convet_voc2coco/voc2coco.py
load_class_label
yhpengtu/CenterIMask
python
def load_class_label(self, image_id): "Mapping SegmentationClass image's color to indice of ground truth \n image_id: id of mask\n Return:\n class_label: [height, width] matrix contains values form 0 to 20\n " raw_mask = self.load_raw_mask(image_id, 'class_mask') class_label = voc_label_indices(raw_mask, build_colormap2label()) return class_label
def load_mask(self, image_id): 'Mapping annotation images to real Masks(MRCNN needed)\n image_id: id of mask\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n ' class_label = self.load_class_label(image_id) instance_mask = self.load_raw_mask(image_id, 'object_mask') max_indice = int(np.max(class_label)) instance_label = [] instance_class = [] for i in range(1, (max_indice + 1)): if (not np.any((class_label == i))): continue gt_indice = i object_filter = (class_label == i) object_filter = object_filter.astype(np.uint8) object_filter = np.dstack((object_filter, object_filter, object_filter)) filtered = np.multiply(object_filter, instance_mask) gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY) max_gray = np.max(gray) for sub_index in range(1, (max_gray + 1)): if (not np.any((gray == sub_index))): continue instance_filter = (gray == sub_index) instance_label += [instance_filter] instance_class += [gt_indice] masks = np.asarray(instance_label).transpose((1, 2, 0)) classes_ids = np.asarray(instance_class) return (masks, classes_ids)
-5,372,344,417,436,508,000
Mapping annotation images to real Masks(MRCNN needed) image_id: id of mask Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks.
tools/convet_voc2coco/voc2coco.py
load_mask
yhpengtu/CenterIMask
python
def load_mask(self, image_id): 'Mapping annotation images to real Masks(MRCNN needed)\n image_id: id of mask\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n ' class_label = self.load_class_label(image_id) instance_mask = self.load_raw_mask(image_id, 'object_mask') max_indice = int(np.max(class_label)) instance_label = [] instance_class = [] for i in range(1, (max_indice + 1)): if (not np.any((class_label == i))): continue gt_indice = i object_filter = (class_label == i) object_filter = object_filter.astype(np.uint8) object_filter = np.dstack((object_filter, object_filter, object_filter)) filtered = np.multiply(object_filter, instance_mask) gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY) max_gray = np.max(gray) for sub_index in range(1, (max_gray + 1)): if (not np.any((gray == sub_index))): continue instance_filter = (gray == sub_index) instance_label += [instance_filter] instance_class += [gt_indice] masks = np.asarray(instance_label).transpose((1, 2, 0)) classes_ids = np.asarray(instance_class) return (masks, classes_ids)
def getKeyId(self): "\n Get the keyId used by this peer (this peer's identifier).\n\n This is stored in the key store.\n " return self.keyStore.getKeyId()
-688,107,004,979,895,800
Get the keyId used by this peer (this peer's identifier). This is stored in the key store.
tint/peer.py
getKeyId
8468/tint
python
def getKeyId(self): "\n Get the keyId used by this peer (this peer's identifier).\n\n This is stored in the key store.\n " return self.keyStore.getKeyId()
def getPublicKey(self): "\n Get the keyId used by this peer (this peer's identifier).\n\n This is stored in the key store.\n " return self.keyStore.getPublicKey()
-1,210,259,793,139,717,600
Get the keyId used by this peer (this peer's identifier). This is stored in the key store.
tint/peer.py
getPublicKey
8468/tint
python
def getPublicKey(self): "\n Get the keyId used by this peer (this peer's identifier).\n\n This is stored in the key store.\n " return self.keyStore.getPublicKey()
def set(self, hostKeyId, storagePath, storageValue): "\n Set a value on a host.\n\n @param hostKeyId: The key id for the destination host to set the\n given key. This could be the local host, in which case the hostKey\n will be the same as this C{Peer}'s keyStore keyId.\n\n @param storagePath: The path to the key to set. For instance, this\n could be something like /chat/<somekey>/inbox.\n\n @param storageValue: The value to set.\n " if (hostKeyId == self.getKeyId()): return self.storage.set(hostKeyId, storagePath, storageValue) return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
6,969,237,322,244,427,000
Set a value on a host. @param hostKeyId: The key id for the destination host to set the given key. This could be the local host, in which case the hostKey will be the same as this C{Peer}'s keyStore keyId. @param storagePath: The path to the key to set. For instance, this could be something like /chat/<somekey>/inbox. @param storageValue: The value to set.
tint/peer.py
set
8468/tint
python
def set(self, hostKeyId, storagePath, storageValue): "\n Set a value on a host.\n\n @param hostKeyId: The key id for the destination host to set the\n given key. This could be the local host, in which case the hostKey\n will be the same as this C{Peer}'s keyStore keyId.\n\n @param storagePath: The path to the key to set. For instance, this\n could be something like /chat/<somekey>/inbox.\n\n @param storageValue: The value to set.\n " if (hostKeyId == self.getKeyId()): return self.storage.set(hostKeyId, storagePath, storageValue) return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
def get(self, hostKeyId, storagePath): "\n Get a value from a host.\n\n @param hostKeyId: The key id for the destination host to get the\n given key. This could be the local host, in which case the hostKey\n will be the same as this C{Peer}'s keyStore keyId.\n\n @param storagePath: The path to the key to get. For instance, this\n could be something like /chat/<somekey>/inbox.\n " if (hostKeyId == self.getKeyId()): self.log.debug(('getting storagePath %s on self' % storagePath)) return self.storage.get(hostKeyId, storagePath) self.log.debug(('getting storagePath %s on %s' % (storagePath, hostKeyId))) return self.pool.send(hostKeyId, 'get', storagePath)
1,392,539,997,521,182,500
Get a value from a host. @param hostKeyId: The key id for the destination host to get the given key. This could be the local host, in which case the hostKey will be the same as this C{Peer}'s keyStore keyId. @param storagePath: The path to the key to get. For instance, this could be something like /chat/<somekey>/inbox.
tint/peer.py
get
8468/tint
python
def get(self, hostKeyId, storagePath): "\n Get a value from a host.\n\n @param hostKeyId: The key id for the destination host to get the\n given key. This could be the local host, in which case the hostKey\n will be the same as this C{Peer}'s keyStore keyId.\n\n @param storagePath: The path to the key to get. For instance, this\n could be something like /chat/<somekey>/inbox.\n " if (hostKeyId == self.getKeyId()): self.log.debug(('getting storagePath %s on self' % storagePath)) return self.storage.get(hostKeyId, storagePath) self.log.debug(('getting storagePath %s on %s' % (storagePath, hostKeyId))) return self.pool.send(hostKeyId, 'get', storagePath)
def push(self, hostKeyId, storagePath, storageValue): '\n Given key, create a new key at <key>/<id> with the given value, where <id>\n is an auto-incrementing integer value starting at 0.\n ' if (hostKeyId == self.getKeyId()): return self.storage.push(hostKeyId, storagePath, storageValue) return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
7,086,835,767,524,889,000
Given key, create a new key at <key>/<id> with the given value, where <id> is an auto-incrementing integer value starting at 0.
tint/peer.py
push
8468/tint
python
def push(self, hostKeyId, storagePath, storageValue): '\n Given key, create a new key at <key>/<id> with the given value, where <id>\n is an auto-incrementing integer value starting at 0.\n ' if (hostKeyId == self.getKeyId()): return self.storage.push(hostKeyId, storagePath, storageValue) return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
def ls(self, hostKeyId, storagePath, offset, length): '\n Given key, get all children keys (with the given offset and length). Length cannot\n be more than 1000.\n ' if (hostKeyId == self.getKeyId()): return self.storage.ls(hostKeyId, storagePath, offset, length) return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
4,173,619,235,199,410,000
Given key, get all children keys (with the given offset and length). Length cannot be more than 1000.
tint/peer.py
ls
8468/tint
python
def ls(self, hostKeyId, storagePath, offset, length): '\n Given key, get all children keys (with the given offset and length). Length cannot\n be more than 1000.\n ' if (hostKeyId == self.getKeyId()): return self.storage.ls(hostKeyId, storagePath, offset, length) return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown=None, handle_missing='count', min_group_size=None, combine_min_nan_groups=True, min_group_name=None, normalize=False): 'Count encoding for categorical features.\n\n For a given categorical feature, replace the names of the groups\n with the group counts.\n\n Parameters\n ----------\n\n verbose: int\n integer indicating verbosity of output. 0 for none.\n cols: list\n a list of columns to encode, if None, all string and categorical columns\n will be encoded.\n drop_invariant: bool\n boolean for whether or not to drop columns with 0 variance.\n return_df: bool\n boolean for whether to return a pandas DataFrame from transform\n (otherwise it will be a numpy array).\n handle_missing: str\n how to handle missing values at fit time. Options are \'error\', \'return_nan\',\n and \'count\'. Default \'count\', which treat NaNs as a countable category at\n fit time.\n handle_unknown: str, int or dict of.\n how to handle unknown labels at transform time. Options are \'error\'\n \'return_nan\' and an int. Defaults to None which uses NaN behaviour\n specified at fit time. Passing an int will fill with this int value.\n normalize: bool or dict of.\n whether to normalize the counts to the range (0, 1). See Pandas `value_counts`\n for more details.\n min_group_size: int, float or dict of.\n the minimal count threshold of a group needed to ensure it is not\n combined into a "leftovers" group. If float in the range (0, 1),\n `min_group_size` is calculated as int(X.shape[0] * min_group_size).\n Note: This value may change type based on the `normalize` variable. If True\n this will become a float. If False, it will be an int.\n min_group_name: None, str or dict of.\n Set the name of the combined minimum groups when the defaults become\n too long. Default None. In this case the category names will be joined\n alphabetically with a `_` delimiter.\n Note: The default name can be long ae may keep changing, for example, \n in cross-validation.\n combine_min_nan_groups: bool or dict of.\n whether to combine the leftovers group with NaN group. Default True. Can\n also be forced to combine with \'force\' meaning small groups are effectively\n counted as NaNs. Force can only used when \'handle_missing\' is \'count\' or \'error\'.\n\n\n Example\n -------\n >>> import pandas as pd\n >>> from sklearn.datasets import load_boston\n >>> from category_encoders import CountEncoder\n\n >>> bunch = load_boston()\n >>> y = bunch.target\n >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)\n >>> enc = CountEncoder(cols=[\'CHAS\', \'RAD\']).fit(X, y)\n >>> numeric_dataset = enc.transform(X)\n\n >>> print(numeric_dataset.info())\n <class \'pandas.core.frame.DataFrame\'>\n RangeIndex: 506 entries, 0 to 505\n Data columns (total 13 columns):\n CRIM 506 non-null float64\n ZN 506 non-null float64\n INDUS 506 non-null float64\n CHAS 506 non-null int64\n NOX 506 non-null float64\n RM 506 non-null float64\n AGE 506 non-null float64\n DIS 506 non-null float64\n RAD 506 non-null int64\n TAX 506 non-null float64\n PTRATIO 506 non-null float64\n B 506 non-null float64\n LSTAT 506 non-null float64\n dtypes: float64(11), int64(2)\n memory usage: 51.5 KB\n None\n\n References\n ----------\n\n ' self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.normalize = normalize self.min_group_size = min_group_size self.min_group_name = min_group_name self.combine_min_nan_groups = combine_min_nan_groups self._min_group_categories = {} self._normalize = {} self._min_group_name = {} self._combine_min_nan_groups = {} self._min_group_size = {} self._handle_unknown = {} self._handle_missing = {}
4,560,433,223,553,619,000
Count encoding for categorical features. For a given categorical feature, replace the names of the groups with the group counts. Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'count'. Default 'count', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of. how to handle unknown labels at transform time. Options are 'error' 'return_nan' and an int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. normalize: bool or dict of. whether to normalize the counts to the range (0, 1). See Pandas `value_counts` for more details. min_group_size: int, float or dict of. the minimal count threshold of a group needed to ensure it is not combined into a "leftovers" group. If float in the range (0, 1), `min_group_size` is calculated as int(X.shape[0] * min_group_size). Note: This value may change type based on the `normalize` variable. If True this will become a float. If False, it will be an int. min_group_name: None, str or dict of. Set the name of the combined minimum groups when the defaults become too long. Default None. In this case the category names will be joined alphabetically with a `_` delimiter. Note: The default name can be long ae may keep changing, for example, in cross-validation. combine_min_nan_groups: bool or dict of. whether to combine the leftovers group with NaN group. Default True. Can also be forced to combine with 'force' meaning small groups are effectively counted as NaNs. Force can only used when 'handle_missing' is 'count' or 'error'. Example ------- >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> from category_encoders import CountEncoder >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(2) memory usage: 51.5 KB None References ----------
category_encoders/count.py
__init__
JoshuaC3/categorical-encoding
python
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown=None, handle_missing='count', min_group_size=None, combine_min_nan_groups=True, min_group_name=None, normalize=False): 'Count encoding for categorical features.\n\n For a given categorical feature, replace the names of the groups\n with the group counts.\n\n Parameters\n ----------\n\n verbose: int\n integer indicating verbosity of output. 0 for none.\n cols: list\n a list of columns to encode, if None, all string and categorical columns\n will be encoded.\n drop_invariant: bool\n boolean for whether or not to drop columns with 0 variance.\n return_df: bool\n boolean for whether to return a pandas DataFrame from transform\n (otherwise it will be a numpy array).\n handle_missing: str\n how to handle missing values at fit time. Options are \'error\', \'return_nan\',\n and \'count\'. Default \'count\', which treat NaNs as a countable category at\n fit time.\n handle_unknown: str, int or dict of.\n how to handle unknown labels at transform time. Options are \'error\'\n \'return_nan\' and an int. Defaults to None which uses NaN behaviour\n specified at fit time. Passing an int will fill with this int value.\n normalize: bool or dict of.\n whether to normalize the counts to the range (0, 1). See Pandas `value_counts`\n for more details.\n min_group_size: int, float or dict of.\n the minimal count threshold of a group needed to ensure it is not\n combined into a "leftovers" group. If float in the range (0, 1),\n `min_group_size` is calculated as int(X.shape[0] * min_group_size).\n Note: This value may change type based on the `normalize` variable. If True\n this will become a float. If False, it will be an int.\n min_group_name: None, str or dict of.\n Set the name of the combined minimum groups when the defaults become\n too long. Default None. In this case the category names will be joined\n alphabetically with a `_` delimiter.\n Note: The default name can be long ae may keep changing, for example, \n in cross-validation.\n combine_min_nan_groups: bool or dict of.\n whether to combine the leftovers group with NaN group. Default True. Can\n also be forced to combine with \'force\' meaning small groups are effectively\n counted as NaNs. Force can only used when \'handle_missing\' is \'count\' or \'error\'.\n\n\n Example\n -------\n >>> import pandas as pd\n >>> from sklearn.datasets import load_boston\n >>> from category_encoders import CountEncoder\n\n >>> bunch = load_boston()\n >>> y = bunch.target\n >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)\n >>> enc = CountEncoder(cols=[\'CHAS\', \'RAD\']).fit(X, y)\n >>> numeric_dataset = enc.transform(X)\n\n >>> print(numeric_dataset.info())\n <class \'pandas.core.frame.DataFrame\'>\n RangeIndex: 506 entries, 0 to 505\n Data columns (total 13 columns):\n CRIM 506 non-null float64\n ZN 506 non-null float64\n INDUS 506 non-null float64\n CHAS 506 non-null int64\n NOX 506 non-null float64\n RM 506 non-null float64\n AGE 506 non-null float64\n DIS 506 non-null float64\n RAD 506 non-null int64\n TAX 506 non-null float64\n PTRATIO 506 non-null float64\n B 506 non-null float64\n LSTAT 506 non-null float64\n dtypes: float64(11), int64(2)\n memory usage: 51.5 KB\n None\n\n References\n ----------\n\n ' self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.normalize = normalize self.min_group_size = min_group_size self.min_group_name = min_group_name self.combine_min_nan_groups = combine_min_nan_groups self._min_group_categories = {} self._normalize = {} self._min_group_name = {} self._combine_min_nan_groups = {} self._min_group_size = {} self._handle_unknown = {} self._handle_missing = {}
def fit(self, X, y=None, **kwargs): 'Fit encoder according to X.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : encoder\n Returns self.\n ' X = util.convert_input(X) self._dim = X.shape[1] if (self.cols is None): self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) self._check_set_create_dict_attrs() self._fit_count_encode(X, y) if self.drop_invariant: self.drop_cols = [] X_temp = self.transform(X) generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if (X_temp[x].var() <= 0.0001)] return self
8,885,542,150,674,907,000
Fit encoder according to X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self.
category_encoders/count.py
fit
JoshuaC3/categorical-encoding
python
def fit(self, X, y=None, **kwargs): 'Fit encoder according to X.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : encoder\n Returns self.\n ' X = util.convert_input(X) self._dim = X.shape[1] if (self.cols is None): self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) self._check_set_create_dict_attrs() self._fit_count_encode(X, y) if self.drop_invariant: self.drop_cols = [] X_temp = self.transform(X) generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if (X_temp[x].var() <= 0.0001)] return self
def transform(self, X, y=None): 'Perform the transformation to new categorical data.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples]\n \n Returns\n -------\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.\n ' if (self._dim is None): raise ValueError('Must train encoder before it can be used to transform data.') X = util.convert_input(X) if (X.shape[1] != self._dim): raise ValueError(('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim))) if (not self.cols): return X (X, _) = self._transform_count_encode(X, y) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df: return X else: return X.values
-7,646,785,548,588,537,000
Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied.
category_encoders/count.py
transform
JoshuaC3/categorical-encoding
python
def transform(self, X, y=None): 'Perform the transformation to new categorical data.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples]\n \n Returns\n -------\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.\n ' if (self._dim is None): raise ValueError('Must train encoder before it can be used to transform data.') X = util.convert_input(X) if (X.shape[1] != self._dim): raise ValueError(('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim))) if (not self.cols): return X (X, _) = self._transform_count_encode(X, y) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df: return X else: return X.values
def _fit_count_encode(self, X_in, y): 'Perform the count encoding.' X = X_in.copy(deep=True) if (self.cols is None): self.cols = X.columns.values self.mapping = {} for col in self.cols: if X[col].isna().any(): if (self._handle_missing[col] == 'error'): raise ValueError(('Missing data found in column %s at fit time.' % (col,))) elif (self._handle_missing[col] not in ['count', 'return_nan', 'error']): raise ValueError(('%s key in `handle_missing` should be one of: `value`, `return_nan` and `error`.' % (col,))) self.mapping[col] = X[col].value_counts(normalize=self._normalize[col], dropna=False) if (self._handle_missing[col] == 'return_nan'): self.mapping[col][np.NaN] = np.NaN if any([(val is not None) for val in self._min_group_size.values()]): self.combine_min_categories(X)
-7,622,429,411,437,038,000
Perform the count encoding.
category_encoders/count.py
_fit_count_encode
JoshuaC3/categorical-encoding
python
def _fit_count_encode(self, X_in, y): X = X_in.copy(deep=True) if (self.cols is None): self.cols = X.columns.values self.mapping = {} for col in self.cols: if X[col].isna().any(): if (self._handle_missing[col] == 'error'): raise ValueError(('Missing data found in column %s at fit time.' % (col,))) elif (self._handle_missing[col] not in ['count', 'return_nan', 'error']): raise ValueError(('%s key in `handle_missing` should be one of: `value`, `return_nan` and `error`.' % (col,))) self.mapping[col] = X[col].value_counts(normalize=self._normalize[col], dropna=False) if (self._handle_missing[col] == 'return_nan'): self.mapping[col][np.NaN] = np.NaN if any([(val is not None) for val in self._min_group_size.values()]): self.combine_min_categories(X)
def _transform_count_encode(self, X_in, y): 'Perform the transform count encoding.' X = X_in.copy(deep=True) for col in self.cols: if (self._min_group_size is not None): if (col in self._min_group_categories.keys()): X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col]) X[col] = X[col].map(self.mapping[col]) if isinstance(self._handle_unknown[col], np.integer): X[col] = X[col].fillna(self._handle_unknown[col]) elif ((self._handle_unknown[col] == 'error') and X[col].isna().any()): raise ValueError(('Missing data found in column %s at transform time.' % (col,))) return (X, self.mapping)
-5,871,763,005,789,190,000
Perform the transform count encoding.
category_encoders/count.py
_transform_count_encode
JoshuaC3/categorical-encoding
python
def _transform_count_encode(self, X_in, y): X = X_in.copy(deep=True) for col in self.cols: if (self._min_group_size is not None): if (col in self._min_group_categories.keys()): X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col]) X[col] = X[col].map(self.mapping[col]) if isinstance(self._handle_unknown[col], np.integer): X[col] = X[col].fillna(self._handle_unknown[col]) elif ((self._handle_unknown[col] == 'error') and X[col].isna().any()): raise ValueError(('Missing data found in column %s at transform time.' % (col,))) return (X, self.mapping)
def combine_min_categories(self, X): 'Combine small categories into a single category.' for (col, mapper) in self.mapping.items(): if (self._normalize[col] and isinstance(self._min_group_size[col], int)): self._min_group_size[col] = (self._min_group_size[col] / X.shape[0]) elif ((not self._normalize) and isinstance(self._min_group_size[col], float)): self._min_group_size[col] = (self._min_group_size[col] * X.shape[0]) if (self._combine_min_nan_groups[col] is True): min_groups_idx = (mapper < self._min_group_size[col]) elif (self._combine_min_nan_groups[col] == 'force'): min_groups_idx = ((mapper < self._min_group_size[col]) | mapper.index.isna()) else: min_groups_idx = ((mapper < self._min_group_size[col]) & (~ mapper.index.isna())) min_groups_sum = mapper.loc[min_groups_idx].sum() if ((min_groups_sum > 0) and (min_groups_idx.sum() > 1)): if isinstance(self._min_group_name[col], str): min_group_mapper_name = self._min_group_name else: min_group_mapper_name = '_'.join([str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values()]) self._min_group_categories[col] = {cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist()} if (not min_groups_idx.all()): mapper = mapper.loc[(~ min_groups_idx)] if mapper.index.is_categorical(): mapper.index = mapper.index.add_categories(min_group_mapper_name) mapper[min_group_mapper_name] = min_groups_sum self.mapping[col] = mapper
6,089,501,862,832,393,000
Combine small categories into a single category.
category_encoders/count.py
combine_min_categories
JoshuaC3/categorical-encoding
python
def combine_min_categories(self, X): for (col, mapper) in self.mapping.items(): if (self._normalize[col] and isinstance(self._min_group_size[col], int)): self._min_group_size[col] = (self._min_group_size[col] / X.shape[0]) elif ((not self._normalize) and isinstance(self._min_group_size[col], float)): self._min_group_size[col] = (self._min_group_size[col] * X.shape[0]) if (self._combine_min_nan_groups[col] is True): min_groups_idx = (mapper < self._min_group_size[col]) elif (self._combine_min_nan_groups[col] == 'force'): min_groups_idx = ((mapper < self._min_group_size[col]) | mapper.index.isna()) else: min_groups_idx = ((mapper < self._min_group_size[col]) & (~ mapper.index.isna())) min_groups_sum = mapper.loc[min_groups_idx].sum() if ((min_groups_sum > 0) and (min_groups_idx.sum() > 1)): if isinstance(self._min_group_name[col], str): min_group_mapper_name = self._min_group_name else: min_group_mapper_name = '_'.join([str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values()]) self._min_group_categories[col] = {cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist()} if (not min_groups_idx.all()): mapper = mapper.loc[(~ min_groups_idx)] if mapper.index.is_categorical(): mapper.index = mapper.index.add_categories(min_group_mapper_name) mapper[min_group_mapper_name] = min_groups_sum self.mapping[col] = mapper
def _check_set_create_dict_attrs(self): 'Check attributes that can be dicts and format for all self.cols.' dict_attrs = {'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value'} for (attr_name, attr_default) in dict_attrs.items(): attr = copy(getattr(self, attr_name)) if isinstance(attr, dict): for col in self.cols: if (col not in attr): attr[col] = attr_default setattr(self, ('_' + attr_name), attr) else: attr_dict = {} for col in self.cols: attr_dict[col] = attr setattr(self, ('_' + attr_name), attr_dict) for col in self.cols: if ((self._handle_missing[col] == 'return_nan') and (self._combine_min_nan_groups[col] == 'force')): raise ValueError(("Cannot have `handle_missing` == 'return_nan' and 'combine_min_nan_groups' == 'force' for columns `%s`." % (col,)))
-5,527,060,141,706,155,000
Check attributes that can be dicts and format for all self.cols.
category_encoders/count.py
_check_set_create_dict_attrs
JoshuaC3/categorical-encoding
python
def _check_set_create_dict_attrs(self): dict_attrs = {'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value'} for (attr_name, attr_default) in dict_attrs.items(): attr = copy(getattr(self, attr_name)) if isinstance(attr, dict): for col in self.cols: if (col not in attr): attr[col] = attr_default setattr(self, ('_' + attr_name), attr) else: attr_dict = {} for col in self.cols: attr_dict[col] = attr setattr(self, ('_' + attr_name), attr_dict) for col in self.cols: if ((self._handle_missing[col] == 'return_nan') and (self._combine_min_nan_groups[col] == 'force')): raise ValueError(("Cannot have `handle_missing` == 'return_nan' and 'combine_min_nan_groups' == 'force' for columns `%s`." % (col,)))
def _test_null_distribution_basic(self, test: str, lazy: bool, quick_scale: bool=False, n_cells: int=3000, n_genes: int=200, n_groups: int=3): '\n Test if de.wald() generates a uniform p-value distribution\n if it is given data simulated based on the null model. Returns the p-value\n of the two-side Kolmgorov-Smirnov test for equality of the observed \n p-value distriubution and a uniform distribution.\n\n :param n_cells: Number of cells to simulate (number of observations per test).\n :param n_genes: Number of genes to simulate (number of tests).\n ' (sim, sample_description) = self._prepate_data(n_cells=n_cells, n_genes=n_genes, n_groups=n_groups) test = de.test.pairwise(data=sim.input_data, sample_description=sample_description, grouping='condition', test=test, lazy=lazy, quick_scale=quick_scale, noise_model=self.noise_model) _ = test.summary() if lazy: pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue else: pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue logging.getLogger('diffxpy').info(('KS-test pvalue for null model match of wald(): %f' % pval_h0)) assert (pval_h0 > 0.05), ('KS-Test failed: pval_h0=%f is <= 0.05!' % np.round(pval_h0, 5)) return True
-1,909,257,724,457,016,800
Test if de.wald() generates a uniform p-value distribution if it is given data simulated based on the null model. Returns the p-value of the two-side Kolmgorov-Smirnov test for equality of the observed p-value distriubution and a uniform distribution. :param n_cells: Number of cells to simulate (number of observations per test). :param n_genes: Number of genes to simulate (number of tests).
diffxpy/unit_test/test_pairwise.py
_test_null_distribution_basic
gokceneraslan/diffxpy
python
def _test_null_distribution_basic(self, test: str, lazy: bool, quick_scale: bool=False, n_cells: int=3000, n_genes: int=200, n_groups: int=3): '\n Test if de.wald() generates a uniform p-value distribution\n if it is given data simulated based on the null model. Returns the p-value\n of the two-side Kolmgorov-Smirnov test for equality of the observed \n p-value distriubution and a uniform distribution.\n\n :param n_cells: Number of cells to simulate (number of observations per test).\n :param n_genes: Number of genes to simulate (number of tests).\n ' (sim, sample_description) = self._prepate_data(n_cells=n_cells, n_genes=n_genes, n_groups=n_groups) test = de.test.pairwise(data=sim.input_data, sample_description=sample_description, grouping='condition', test=test, lazy=lazy, quick_scale=quick_scale, noise_model=self.noise_model) _ = test.summary() if lazy: pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue else: pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue logging.getLogger('diffxpy').info(('KS-test pvalue for null model match of wald(): %f' % pval_h0)) assert (pval_h0 > 0.05), ('KS-Test failed: pval_h0=%f is <= 0.05!' % np.round(pval_h0, 5)) return True
def info(self): 'Print info about this unit, overrides superclass method.' print('Grrr, I am the Orc Figher!')
-3,397,929,164,373,522,400
Print info about this unit, overrides superclass method.
wargame/designpatterns/pythonic_orcfighter.py
info
jeantardelli/wargameRepo
python
def info(self): print('Grrr, I am the Orc Figher!')
def all_equal(left, right, cache=None): 'Check whether two objects `left` and `right` are equal.\n\n Parameters\n ----------\n left : Union[object, Expr, Node]\n right : Union[object, Expr, Node]\n cache : Optional[Dict[Tuple[Node, Node], bool]]\n A dictionary indicating whether two Nodes are equal\n ' if (cache is None): cache = {} if util.is_iterable(left): return (util.is_iterable(right) and (len(left) == len(right)) and all(itertools.starmap(functools.partial(all_equal, cache=cache), zip(left, right)))) if hasattr(left, 'equals'): return left.equals(right, cache=cache) return (left == right)
-8,700,499,191,523,920,000
Check whether two objects `left` and `right` are equal. Parameters ---------- left : Union[object, Expr, Node] right : Union[object, Expr, Node] cache : Optional[Dict[Tuple[Node, Node], bool]] A dictionary indicating whether two Nodes are equal
ibis/expr/operations.py
all_equal
odidev/ibis
python
def all_equal(left, right, cache=None): 'Check whether two objects `left` and `right` are equal.\n\n Parameters\n ----------\n left : Union[object, Expr, Node]\n right : Union[object, Expr, Node]\n cache : Optional[Dict[Tuple[Node, Node], bool]]\n A dictionary indicating whether two Nodes are equal\n ' if (cache is None): cache = {} if util.is_iterable(left): return (util.is_iterable(right) and (len(left) == len(right)) and all(itertools.starmap(functools.partial(all_equal, cache=cache), zip(left, right)))) if hasattr(left, 'equals'): return left.equals(right, cache=cache) return (left == right)
def __getstate__(self) -> Dict[(str, Any)]: 'The attributes _expr_cached and _hash are\n used as caches; they can be excluded from\n serialization without affecting correctness.\n\n Excluding _expr_cached and _hash from serialization\n will allow the serialized bytes to be the same for\n equivalent Node objets.\n\n Returns\n -------\n Dict[str, Any]\n A dictionary storing the objects attributes.\n ' excluded_slots = {'_expr_cached', '_hash'} return {slot: getattr(self, slot) for slot in self.__slots__ if (slot not in excluded_slots)}
5,526,020,258,835,681,000
The attributes _expr_cached and _hash are used as caches; they can be excluded from serialization without affecting correctness. Excluding _expr_cached and _hash from serialization will allow the serialized bytes to be the same for equivalent Node objets. Returns ------- Dict[str, Any] A dictionary storing the objects attributes.
ibis/expr/operations.py
__getstate__
odidev/ibis
python
def __getstate__(self) -> Dict[(str, Any)]: 'The attributes _expr_cached and _hash are\n used as caches; they can be excluded from\n serialization without affecting correctness.\n\n Excluding _expr_cached and _hash from serialization\n will allow the serialized bytes to be the same for\n equivalent Node objets.\n\n Returns\n -------\n Dict[str, Any]\n A dictionary storing the objects attributes.\n ' excluded_slots = {'_expr_cached', '_hash'} return {slot: getattr(self, slot) for slot in self.__slots__ if (slot not in excluded_slots)}
def __setstate__(self, state: Dict[(str, Any)]) -> None: '\n Parameters\n ----------\n state: Dict[str, Any]\n A dictionary storing the objects attributes.\n ' for slot in state: setattr(self, slot, state[slot])
5,854,483,499,252,395,000
Parameters ---------- state: Dict[str, Any] A dictionary storing the objects attributes.
ibis/expr/operations.py
__setstate__
odidev/ibis
python
def __setstate__(self, state: Dict[(str, Any)]) -> None: '\n Parameters\n ----------\n state: Dict[str, Any]\n A dictionary storing the objects attributes.\n ' for slot in state: setattr(self, slot, state[slot])
def output_type(self): '\n This function must resolve the output type of the expression and return\n the node wrapped in the appropriate ValueExpr type.\n ' raise NotImplementedError
5,740,557,941,522,150,000
This function must resolve the output type of the expression and return the node wrapped in the appropriate ValueExpr type.
ibis/expr/operations.py
output_type
odidev/ibis
python
def output_type(self): '\n This function must resolve the output type of the expression and return\n the node wrapped in the appropriate ValueExpr type.\n ' raise NotImplementedError
def count(self): 'Only valid if the distinct contains a single column' return CountDistinct(self.arg)
3,605,110,978,898,989,600
Only valid if the distinct contains a single column
ibis/expr/operations.py
count
odidev/ibis
python
def count(self): return CountDistinct(self.arg)
def else_(self, result_expr): '\n Specify\n\n Returns\n -------\n builder : CaseBuilder\n ' kwargs = {slot: getattr(self, slot) for slot in self.__slots__ if (slot != 'default')} result_expr = ir.as_value_expr(result_expr) kwargs['default'] = result_expr return type(self)(**kwargs)
-5,472,837,417,554,490,000
Specify Returns ------- builder : CaseBuilder
ibis/expr/operations.py
else_
odidev/ibis
python
def else_(self, result_expr): '\n Specify\n\n Returns\n -------\n builder : CaseBuilder\n ' kwargs = {slot: getattr(self, slot) for slot in self.__slots__ if (slot != 'default')} result_expr = ir.as_value_expr(result_expr) kwargs['default'] = result_expr return type(self)(**kwargs)
def when(self, case_expr, result_expr): '\n Add a new case-result pair.\n\n Parameters\n ----------\n case : Expr\n Expression to equality-compare with base expression. Must be\n comparable with the base.\n result : Expr\n Value when the case predicate evaluates to true.\n\n Returns\n -------\n builder : CaseBuilder\n ' case_expr = ir.as_value_expr(case_expr) result_expr = ir.as_value_expr(result_expr) if (not rlz.comparable(self.base, case_expr)): raise TypeError('Base expression and passed case are not comparable') cases = list(self.cases) cases.append(case_expr) results = list(self.results) results.append(result_expr) return type(self)(self.base, cases, results, self.default)
-23,041,507,715,912,564
Add a new case-result pair. Parameters ---------- case : Expr Expression to equality-compare with base expression. Must be comparable with the base. result : Expr Value when the case predicate evaluates to true. Returns ------- builder : CaseBuilder
ibis/expr/operations.py
when
odidev/ibis
python
def when(self, case_expr, result_expr): '\n Add a new case-result pair.\n\n Parameters\n ----------\n case : Expr\n Expression to equality-compare with base expression. Must be\n comparable with the base.\n result : Expr\n Value when the case predicate evaluates to true.\n\n Returns\n -------\n builder : CaseBuilder\n ' case_expr = ir.as_value_expr(case_expr) result_expr = ir.as_value_expr(result_expr) if (not rlz.comparable(self.base, case_expr)): raise TypeError('Base expression and passed case are not comparable') cases = list(self.cases) cases.append(case_expr) results = list(self.results) results.append(result_expr) return type(self)(self.base, cases, results, self.default)
def when(self, case_expr, result_expr): '\n Add a new case-result pair.\n\n Parameters\n ----------\n case : Expr\n Expression to equality-compare with base expression. Must be\n comparable with the base.\n result : Expr\n Value when the case predicate evaluates to true.\n\n Returns\n -------\n builder : CaseBuilder\n ' case_expr = ir.as_value_expr(case_expr) result_expr = ir.as_value_expr(result_expr) if (not isinstance(case_expr, ir.BooleanValue)): raise TypeError(case_expr) cases = list(self.cases) cases.append(case_expr) results = list(self.results) results.append(result_expr) return type(self)(cases, results, self.default)
7,831,332,286,804,111,000
Add a new case-result pair. Parameters ---------- case : Expr Expression to equality-compare with base expression. Must be comparable with the base. result : Expr Value when the case predicate evaluates to true. Returns ------- builder : CaseBuilder
ibis/expr/operations.py
when
odidev/ibis
python
def when(self, case_expr, result_expr): '\n Add a new case-result pair.\n\n Parameters\n ----------\n case : Expr\n Expression to equality-compare with base expression. Must be\n comparable with the base.\n result : Expr\n Value when the case predicate evaluates to true.\n\n Returns\n -------\n builder : CaseBuilder\n ' case_expr = ir.as_value_expr(case_expr) result_expr = ir.as_value_expr(result_expr) if (not isinstance(case_expr, ir.BooleanValue)): raise TypeError(case_expr) cases = list(self.cases) cases.append(case_expr) results = list(self.results) results.append(result_expr) return type(self)(cases, results, self.default)
def __init__(self, left, right): '\n Casting rules for type promotions (for resolving the output type) may\n depend in some cases on the target backend.\n\n TODO: how will overflows be handled? Can we provide anything useful in\n Ibis to help the user avoid them?\n\n :param left:\n :param right:\n ' super().__init__(*self._maybe_cast_args(left, right))
4,501,114,707,235,070,000
Casting rules for type promotions (for resolving the output type) may depend in some cases on the target backend. TODO: how will overflows be handled? Can we provide anything useful in Ibis to help the user avoid them? :param left: :param right:
ibis/expr/operations.py
__init__
odidev/ibis
python
def __init__(self, left, right): '\n Casting rules for type promotions (for resolving the output type) may\n depend in some cases on the target backend.\n\n TODO: how will overflows be handled? Can we provide anything useful in\n Ibis to help the user avoid them?\n\n :param left:\n :param right:\n ' super().__init__(*self._maybe_cast_args(left, right))
def __hash__(self) -> int: "Return the hash of a literal value.\n\n We override this method to make sure that we can handle things that\n aren't eminently hashable like an ``array<array<int64>>``.\n\n " return hash(self.dtype._literal_value_hash_key(self.value))
-8,880,341,266,466,899,000
Return the hash of a literal value. We override this method to make sure that we can handle things that aren't eminently hashable like an ``array<array<int64>>``.
ibis/expr/operations.py
__hash__
odidev/ibis
python
def __hash__(self) -> int: "Return the hash of a literal value.\n\n We override this method to make sure that we can handle things that\n aren't eminently hashable like an ``array<array<int64>>``.\n\n " return hash(self.dtype._literal_value_hash_key(self.value))
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]]=None, hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]]=None, lab_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[Union[(str, 'EnableStatus')]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, target_resource_id: Optional[pulumi.Input[str]]=None, task_type: Optional[pulumi.Input[str]]=None, time_zone_id: Optional[pulumi.Input[str]]=None, weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]]=None, __props__=None, __name__=None, __opts__=None): "\n A schedule.\n API Version: 2018-09-15.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence.\n :param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence.\n :param pulumi.Input[str] lab_name: The name of the lab.\n :param pulumi.Input[str] location: The location of the resource.\n :param pulumi.Input[str] name: The name of the schedule.\n :param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings.\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled)\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.\n :param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs\n :param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).\n :param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time).\n :param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['daily_recurrence'] = daily_recurrence __props__['hourly_recurrence'] = hourly_recurrence if ((lab_name is None) and (not opts.urn)): raise TypeError("Missing required property 'lab_name'") __props__['lab_name'] = lab_name __props__['location'] = location __props__['name'] = name __props__['notification_settings'] = notification_settings if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['status'] = status __props__['tags'] = tags __props__['target_resource_id'] = target_resource_id __props__['task_type'] = task_type __props__['time_zone_id'] = time_zone_id __props__['weekly_recurrence'] = weekly_recurrence __props__['created_date'] = None __props__['provisioning_state'] = None __props__['type'] = None __props__['unique_identifier'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:devtestlab/latest:Schedule'), pulumi.Alias(type_='azure-nextgen:devtestlab/v20150521preview:Schedule'), pulumi.Alias(type_='azure-nextgen:devtestlab/v20160515:Schedule'), pulumi.Alias(type_='azure-nextgen:devtestlab/v20180915:Schedule')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Schedule, __self__).__init__('azure-nextgen:devtestlab:Schedule', resource_name, __props__, opts)
6,186,674,530,000,249,000
A schedule. API Version: 2018-09-15. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence. :param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence. :param pulumi.Input[str] lab_name: The name of the lab. :param pulumi.Input[str] location: The location of the resource. :param pulumi.Input[str] name: The name of the schedule. :param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled) :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource. :param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs :param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart). :param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time). :param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
__init__
pulumi/pulumi-azure-nextgen
python
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]]=None, hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]]=None, lab_name: Optional[pulumi.Input[str]]=None, location: Optional[pulumi.Input[str]]=None, name: Optional[pulumi.Input[str]]=None, notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[Union[(str, 'EnableStatus')]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, target_resource_id: Optional[pulumi.Input[str]]=None, task_type: Optional[pulumi.Input[str]]=None, time_zone_id: Optional[pulumi.Input[str]]=None, weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]]=None, __props__=None, __name__=None, __opts__=None): "\n A schedule.\n API Version: 2018-09-15.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence.\n :param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence.\n :param pulumi.Input[str] lab_name: The name of the lab.\n :param pulumi.Input[str] location: The location of the resource.\n :param pulumi.Input[str] name: The name of the schedule.\n :param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings.\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled)\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.\n :param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs\n :param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).\n :param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time).\n :param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['daily_recurrence'] = daily_recurrence __props__['hourly_recurrence'] = hourly_recurrence if ((lab_name is None) and (not opts.urn)): raise TypeError("Missing required property 'lab_name'") __props__['lab_name'] = lab_name __props__['location'] = location __props__['name'] = name __props__['notification_settings'] = notification_settings if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['status'] = status __props__['tags'] = tags __props__['target_resource_id'] = target_resource_id __props__['task_type'] = task_type __props__['time_zone_id'] = time_zone_id __props__['weekly_recurrence'] = weekly_recurrence __props__['created_date'] = None __props__['provisioning_state'] = None __props__['type'] = None __props__['unique_identifier'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:devtestlab/latest:Schedule'), pulumi.Alias(type_='azure-nextgen:devtestlab/v20150521preview:Schedule'), pulumi.Alias(type_='azure-nextgen:devtestlab/v20160515:Schedule'), pulumi.Alias(type_='azure-nextgen:devtestlab/v20180915:Schedule')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Schedule, __self__).__init__('azure-nextgen:devtestlab:Schedule', resource_name, __props__, opts)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Schedule': "\n Get an existing Schedule resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return Schedule(resource_name, opts=opts, __props__=__props__)
8,867,794,031,495,096,000
Get an existing Schedule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
get
pulumi/pulumi-azure-nextgen
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Schedule': "\n Get an existing Schedule resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return Schedule(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter(name='createdDate') def created_date(self) -> pulumi.Output[str]: '\n The creation date of the schedule.\n ' return pulumi.get(self, 'created_date')
-4,870,187,973,321,437,000
The creation date of the schedule.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
created_date
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='createdDate') def created_date(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'created_date')
@property @pulumi.getter(name='dailyRecurrence') def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]: '\n If the schedule will occur once each day of the week, specify the daily recurrence.\n ' return pulumi.get(self, 'daily_recurrence')
6,480,351,104,012,898,000
If the schedule will occur once each day of the week, specify the daily recurrence.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
daily_recurrence
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='dailyRecurrence') def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]: '\n \n ' return pulumi.get(self, 'daily_recurrence')
@property @pulumi.getter(name='hourlyRecurrence') def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]: '\n If the schedule will occur multiple times a day, specify the hourly recurrence.\n ' return pulumi.get(self, 'hourly_recurrence')
-210,472,516,599,062,460
If the schedule will occur multiple times a day, specify the hourly recurrence.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
hourly_recurrence
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='hourlyRecurrence') def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]: '\n \n ' return pulumi.get(self, 'hourly_recurrence')
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n The location of the resource.\n ' return pulumi.get(self, 'location')
-6,989,812,945,498,137,000
The location of the resource.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
location
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n The name of the resource.\n ' return pulumi.get(self, 'name')
7,945,008,266,317,837,000
The name of the resource.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
name
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter(name='notificationSettings') def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]: '\n Notification settings.\n ' return pulumi.get(self, 'notification_settings')
873,310,138,035,010,600
Notification settings.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
notification_settings
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='notificationSettings') def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]: '\n \n ' return pulumi.get(self, 'notification_settings')
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[str]: '\n The provisioning status of the resource.\n ' return pulumi.get(self, 'provisioning_state')
-5,777,047,059,194,198,000
The provisioning status of the resource.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
provisioning_state
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'provisioning_state')
@property @pulumi.getter def status(self) -> pulumi.Output[Optional[str]]: '\n The status of the schedule (i.e. Enabled, Disabled)\n ' return pulumi.get(self, 'status')
1,623,179,802,714,244,400
The status of the schedule (i.e. Enabled, Disabled)
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
status
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter def status(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'status')
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n The tags of the resource.\n ' return pulumi.get(self, 'tags')
4,713,149,495,578,682,000
The tags of the resource.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
tags
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter(name='targetResourceId') def target_resource_id(self) -> pulumi.Output[Optional[str]]: '\n The resource ID to which the schedule belongs\n ' return pulumi.get(self, 'target_resource_id')
1,420,396,896,255,958,500
The resource ID to which the schedule belongs
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
target_resource_id
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='targetResourceId') def target_resource_id(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'target_resource_id')
@property @pulumi.getter(name='taskType') def task_type(self) -> pulumi.Output[Optional[str]]: '\n The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).\n ' return pulumi.get(self, 'task_type')
5,791,849,811,834,436,000
The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
task_type
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='taskType') def task_type(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'task_type')
@property @pulumi.getter(name='timeZoneId') def time_zone_id(self) -> pulumi.Output[Optional[str]]: '\n The time zone ID (e.g. Pacific Standard time).\n ' return pulumi.get(self, 'time_zone_id')
4,756,117,501,304,452,000
The time zone ID (e.g. Pacific Standard time).
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
time_zone_id
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='timeZoneId') def time_zone_id(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'time_zone_id')
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n The type of the resource.\n ' return pulumi.get(self, 'type')
3,589,901,220,239,403,500
The type of the resource.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
type
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter(name='uniqueIdentifier') def unique_identifier(self) -> pulumi.Output[str]: '\n The unique immutable identifier of a resource (Guid).\n ' return pulumi.get(self, 'unique_identifier')
2,468,897,841,730,923,500
The unique immutable identifier of a resource (Guid).
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
unique_identifier
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='uniqueIdentifier') def unique_identifier(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'unique_identifier')
@property @pulumi.getter(name='weeklyRecurrence') def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]: '\n If the schedule will occur only some days of the week, specify the weekly recurrence.\n ' return pulumi.get(self, 'weekly_recurrence')
-1,530,063,684,734,173,200
If the schedule will occur only some days of the week, specify the weekly recurrence.
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
weekly_recurrence
pulumi/pulumi-azure-nextgen
python
@property @pulumi.getter(name='weeklyRecurrence') def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]: '\n \n ' return pulumi.get(self, 'weekly_recurrence')
def run(self, document, number_sentences): '\n :param: number_sentences, starts with 0 for the fist sentence\n ' boundaries = (document.sentences_boundaries[0][0], document.sentences_boundaries[:(number_sentences + 1)][(- 1)][1]) document.text = document.text[boundaries[0]:boundaries[1]] document.sentences_boundaries = self._limitSenteceBoundaries(document.sentences_boundaries, boundaries[1]) document.words_boundaries = self._limitWordBoundaries(document.words_boundaries, boundaries[1]) document.entities = self._limitEntities(document.entities, boundaries[1]) document.triples = self._limitTriples(document.triples, boundaries[1]) return document
3,765,172,281,749,577,000
:param: number_sentences, starts with 0 for the fist sentence
pipeline/filter.py
run
hadyelsahar/RE-NLG-Dataset
python
def run(self, document, number_sentences): '\n \n ' boundaries = (document.sentences_boundaries[0][0], document.sentences_boundaries[:(number_sentences + 1)][(- 1)][1]) document.text = document.text[boundaries[0]:boundaries[1]] document.sentences_boundaries = self._limitSenteceBoundaries(document.sentences_boundaries, boundaries[1]) document.words_boundaries = self._limitWordBoundaries(document.words_boundaries, boundaries[1]) document.entities = self._limitEntities(document.entities, boundaries[1]) document.triples = self._limitTriples(document.triples, boundaries[1]) return document
def __init__(self, all_triples, entities): '\n :param: input TripleReaderTriples object\n :param: a list of entity that should be filtered\n ' self.wikidata_triples = all_triples self.entities = entities
-2,811,689,684,874,151,400
:param: input TripleReaderTriples object :param: a list of entity that should be filtered
pipeline/filter.py
__init__
hadyelsahar/RE-NLG-Dataset
python
def __init__(self, all_triples, entities): '\n :param: input TripleReaderTriples object\n :param: a list of entity that should be filtered\n ' self.wikidata_triples = all_triples self.entities = entities
def grabArtifactFromJenkins(**context): "\n Grab an artifact from the previous job\n The python-jenkins library doesn't expose a method for that\n But it's totally possible to build manually the request for that\n " hook = JenkinsHook('jenkins_nqa') jenkins_server = hook.get_jenkins_server() url = context['task_instance'].xcom_pull(task_ids='trigger_job') url = (url + 'artifact/myartifact.xml') self.log.info('url : %s', url) request = Request(url) response = jenkins_server.jenkins_open(request) self.log.info('response: %s', response) return response
2,763,114,726,950,187,500
Grab an artifact from the previous job The python-jenkins library doesn't expose a method for that But it's totally possible to build manually the request for that
dags/jenkins_dag.py
grabArtifactFromJenkins
shameerb/incubator-airflow
python
def grabArtifactFromJenkins(**context): "\n Grab an artifact from the previous job\n The python-jenkins library doesn't expose a method for that\n But it's totally possible to build manually the request for that\n " hook = JenkinsHook('jenkins_nqa') jenkins_server = hook.get_jenkins_server() url = context['task_instance'].xcom_pull(task_ids='trigger_job') url = (url + 'artifact/myartifact.xml') self.log.info('url : %s', url) request = Request(url) response = jenkins_server.jenkins_open(request) self.log.info('response: %s', response) return response
def api23_link_aggregation_groups_delete_with_http_info(self, ids=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'DELETE link-aggregation-groups\n\n Remove a link aggregation group to unbind the ports.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_delete_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.\n :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' if (ids is not None): if (not isinstance(ids, list)): ids = [ids] if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if ('ids' in params): query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
-3,378,384,162,495,183,400
DELETE link-aggregation-groups Remove a link aggregation group to unbind the ports. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api23_link_aggregation_groups_delete_with_http_info(async_req=True) >>> result = thread.get() :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: None If the method is called asynchronously, returns the request thread.
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
api23_link_aggregation_groups_delete_with_http_info
Flav-STOR-WL/py-pure-client
python
def api23_link_aggregation_groups_delete_with_http_info(self, ids=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'DELETE link-aggregation-groups\n\n Remove a link aggregation group to unbind the ports.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_delete_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.\n :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' if (ids is not None): if (not isinstance(ids, list)): ids = [ids] if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if ('ids' in params): query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api23_link_aggregation_groups_get_with_http_info(self, continuation_token=None, filter=None, ids=None, limit=None, names=None, offset=None, sort=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): "GET link-aggregation-groups\n\n List the status and attributes of the Ethernet ports in the configured link aggregation groups.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.\n :param str filter: Exclude resources that don't match the specified criteria.\n :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.\n :param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.\n :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.\n :param int offset: The offset of the first resource to return from a collection.\n :param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: LinkAggregationGroupGetResponse\n If the method is called asynchronously,\n returns the request thread.\n " if (ids is not None): if (not isinstance(ids, list)): ids = [ids] if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('continuation_token' in params): query_params.append(('continuation_token', params['continuation_token'])) if ('filter' in params): query_params.append(('filter', params['filter'])) if ('ids' in params): query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LinkAggregationGroupGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
-7,130,267,852,678,943,000
GET link-aggregation-groups List the status and attributes of the Ethernet ports in the configured link aggregation groups. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api23_link_aggregation_groups_get_with_http_info(async_req=True) >>> result = thread.get() :param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result. :param str filter: Exclude resources that don't match the specified criteria. :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. :param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request. :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. :param int offset: The offset of the first resource to return from a collection. :param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: LinkAggregationGroupGetResponse If the method is called asynchronously, returns the request thread.
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
api23_link_aggregation_groups_get_with_http_info
Flav-STOR-WL/py-pure-client
python
def api23_link_aggregation_groups_get_with_http_info(self, continuation_token=None, filter=None, ids=None, limit=None, names=None, offset=None, sort=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): "GET link-aggregation-groups\n\n List the status and attributes of the Ethernet ports in the configured link aggregation groups.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_get_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.\n :param str filter: Exclude resources that don't match the specified criteria.\n :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.\n :param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.\n :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.\n :param int offset: The offset of the first resource to return from a collection.\n :param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: LinkAggregationGroupGetResponse\n If the method is called asynchronously,\n returns the request thread.\n " if (ids is not None): if (not isinstance(ids, list)): ids = [ids] if (names is not None): if (not isinstance(names, list)): names = [names] if (sort is not None): if (not isinstance(sort, list)): sort = [sort] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (('limit' in params) and (params['limit'] < 1)): raise ValueError('Invalid value for parameter `limit` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `1`') if (('offset' in params) and (params['offset'] < 0)): raise ValueError('Invalid value for parameter `offset` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `0`') collection_formats = {} path_params = {} query_params = [] if ('continuation_token' in params): query_params.append(('continuation_token', params['continuation_token'])) if ('filter' in params): query_params.append(('filter', params['filter'])) if ('ids' in params): query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if ('limit' in params): query_params.append(('limit', params['limit'])) if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if ('offset' in params): query_params.append(('offset', params['offset'])) if ('sort' in params): query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LinkAggregationGroupGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api23_link_aggregation_groups_patch_with_http_info(self, link_aggregation_group=None, ids=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'PATCH link-aggregation-groups\n\n Modify link aggregation groups by adding and removing Ethernet ports.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_patch_with_http_info(link_aggregation_group, async_req=True)\n >>> result = thread.get()\n\n :param Linkaggregationgroup link_aggregation_group: (required)\n :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.\n :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: LinkAggregationGroupResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (ids is not None): if (not isinstance(ids, list)): ids = [ids] if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (link_aggregation_group is None): raise TypeError('Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_patch`') collection_formats = {} path_params = {} query_params = [] if ('ids' in params): query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None if ('link_aggregation_group' in params): body_params = params['link_aggregation_group'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LinkAggregationGroupResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
916,694,224,354,934,500
PATCH link-aggregation-groups Modify link aggregation groups by adding and removing Ethernet ports. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api23_link_aggregation_groups_patch_with_http_info(link_aggregation_group, async_req=True) >>> result = thread.get() :param Linkaggregationgroup link_aggregation_group: (required) :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: LinkAggregationGroupResponse If the method is called asynchronously, returns the request thread.
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
api23_link_aggregation_groups_patch_with_http_info
Flav-STOR-WL/py-pure-client
python
def api23_link_aggregation_groups_patch_with_http_info(self, link_aggregation_group=None, ids=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'PATCH link-aggregation-groups\n\n Modify link aggregation groups by adding and removing Ethernet ports.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_patch_with_http_info(link_aggregation_group, async_req=True)\n >>> result = thread.get()\n\n :param Linkaggregationgroup link_aggregation_group: (required)\n :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.\n :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: LinkAggregationGroupResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (ids is not None): if (not isinstance(ids, list)): ids = [ids] if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (link_aggregation_group is None): raise TypeError('Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_patch`') collection_formats = {} path_params = {} query_params = [] if ('ids' in params): query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None if ('link_aggregation_group' in params): body_params = params['link_aggregation_group'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LinkAggregationGroupResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def api23_link_aggregation_groups_post_with_http_info(self, link_aggregation_group=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'POST link-aggregation-groups\n\n Create a link aggregation group of Ethernet ports on the array.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_post_with_http_info(link_aggregation_group, names, async_req=True)\n >>> result = thread.get()\n\n :param LinkAggregationGroup link_aggregation_group: (required)\n :param list[str] names: A comma-separated list of resource names. (required)\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: LinkAggregationGroupResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (link_aggregation_group is None): raise TypeError('Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_post`') if (names is None): raise TypeError('Missing the required parameter `names` when calling `api23_link_aggregation_groups_post`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None if ('link_aggregation_group' in params): body_params = params['link_aggregation_group'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LinkAggregationGroupResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
5,937,333,888,890,664,000
POST link-aggregation-groups Create a link aggregation group of Ethernet ports on the array. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api23_link_aggregation_groups_post_with_http_info(link_aggregation_group, names, async_req=True) >>> result = thread.get() :param LinkAggregationGroup link_aggregation_group: (required) :param list[str] names: A comma-separated list of resource names. (required) :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: LinkAggregationGroupResponse If the method is called asynchronously, returns the request thread.
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
api23_link_aggregation_groups_post_with_http_info
Flav-STOR-WL/py-pure-client
python
def api23_link_aggregation_groups_post_with_http_info(self, link_aggregation_group=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None): 'POST link-aggregation-groups\n\n Create a link aggregation group of Ethernet ports on the array.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.api23_link_aggregation_groups_post_with_http_info(link_aggregation_group, names, async_req=True)\n >>> result = thread.get()\n\n :param LinkAggregationGroup link_aggregation_group: (required)\n :param list[str] names: A comma-separated list of resource names. (required)\n :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.\n :param bool _return_http_data_only: Returns only data field.\n :param bool _preload_content: Response is converted into objects.\n :param int _request_timeout: Total request timeout in seconds.\n It can also be a tuple of (connection time, read time) timeouts.\n :return: LinkAggregationGroupResponse\n If the method is called asynchronously,\n returns the request thread.\n ' if (names is not None): if (not isinstance(names, list)): names = [names] params = {k: v for (k, v) in six.iteritems(locals()) if (v is not None)} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if (link_aggregation_group is None): raise TypeError('Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_post`') if (names is None): raise TypeError('Missing the required parameter `names` when calling `api23_link_aggregation_groups_post`') collection_formats = {} path_params = {} query_params = [] if ('names' in params): query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None if ('link_aggregation_group' in params): body_params = params['link_aggregation_group'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api('/api/2.3/link-aggregation-groups', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LinkAggregationGroupResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats)
def onnxifi_caffe2_net(pred_net, input_shapes, max_batch_size=1, max_seq_size=1, debug=False, use_onnx=True, merge_fp32_inputs_into_fp16=False, adjust_batch=True, black_list=None, weight_names=None): '\n Transform the caffe2_net by collapsing ONNXIFI-runnable nodes into Onnxifi c2 ops\n ' shape_hints = {} for (k, v) in input_shapes.items(): shape_hints[k] = v pred_net_str = C.onnxifi(pred_net.SerializeToString(), shape_hints, (black_list if black_list else []), (weight_names if (weight_names is not None) else []), max_batch_size, max_seq_size, adjust_batch, debug, merge_fp32_inputs_into_fp16, use_onnx) pred_net_cut = caffe2_pb2.NetDef() pred_net_cut.ParseFromString(pred_net_str) return pred_net_cut
383,177,673,734,143,000
Transform the caffe2_net by collapsing ONNXIFI-runnable nodes into Onnxifi c2 ops
detectron/lib/python3.6/site-packages/caffe2/python/onnx/onnxifi.py
onnxifi_caffe2_net
JustinBear99/Mask_RCNN
python
def onnxifi_caffe2_net(pred_net, input_shapes, max_batch_size=1, max_seq_size=1, debug=False, use_onnx=True, merge_fp32_inputs_into_fp16=False, adjust_batch=True, black_list=None, weight_names=None): '\n \n ' shape_hints = {} for (k, v) in input_shapes.items(): shape_hints[k] = v pred_net_str = C.onnxifi(pred_net.SerializeToString(), shape_hints, (black_list if black_list else []), (weight_names if (weight_names is not None) else []), max_batch_size, max_seq_size, adjust_batch, debug, merge_fp32_inputs_into_fp16, use_onnx) pred_net_cut = caffe2_pb2.NetDef() pred_net_cut.ParseFromString(pred_net_str) return pred_net_cut