body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def test_start_stop_gateway(self): 'Test start and stop of MQTT gateway.' self.assertFalse(self.gateway.is_alive()) sensor = self._add_sensor(1) sensor.children[1] = ChildSensor(1, self.gateway.const.Presentation.S_HUM) sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20' self.gateway.recv('/1/1/2/0/1', '', 0) self.gateway.recv('/1/1/1/0/1', '30', 0) self.gateway.recv('/1/1/2/0/1', '', 0) self.gateway.start() self.assertTrue(self.gateway.is_alive()) calls = [mock.call('/+/+/0/+/+', self.gateway.recv, 0), mock.call('/+/+/3/+/+', self.gateway.recv, 0)] self.mock_sub.assert_has_calls(calls) time.sleep(0.05) calls = [mock.call('/1/1/1/0/1', '20', 0, True), mock.call('/1/1/1/0/1', '30', 0, True)] self.mock_pub.assert_has_calls(calls) self.gateway.stop() self.gateway.join(timeout=0.5) self.assertFalse(self.gateway.is_alive())
6,097,585,171,282,758,000
Test start and stop of MQTT gateway.
tests/test_gateway_mqtt.py
test_start_stop_gateway
jslove/pymysensors
python
def test_start_stop_gateway(self): self.assertFalse(self.gateway.is_alive()) sensor = self._add_sensor(1) sensor.children[1] = ChildSensor(1, self.gateway.const.Presentation.S_HUM) sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20' self.gateway.recv('/1/1/2/0/1', , 0) self.gateway.recv('/1/1/1/0/1', '30', 0) self.gateway.recv('/1/1/2/0/1', , 0) self.gateway.start() self.assertTrue(self.gateway.is_alive()) calls = [mock.call('/+/+/0/+/+', self.gateway.recv, 0), mock.call('/+/+/3/+/+', self.gateway.recv, 0)] self.mock_sub.assert_has_calls(calls) time.sleep(0.05) calls = [mock.call('/1/1/1/0/1', '20', 0, True), mock.call('/1/1/1/0/1', '30', 0, True)] self.mock_pub.assert_has_calls(calls) self.gateway.stop() self.gateway.join(timeout=0.5) self.assertFalse(self.gateway.is_alive())
def test_mqtt_load_persistence(self): 'Test load persistence file for MQTTGateway.' sensor = self._add_sensor(1) sensor.children[1] = ChildSensor(1, self.gateway.const.Presentation.S_HUM) sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20' with tempfile.TemporaryDirectory() as temp_dir: self.gateway.persistence_file = os.path.join(temp_dir, 'file.json') self.gateway._save_sensors() del self.gateway.sensors[1] self.assertNotIn(1, self.gateway.sensors) self.gateway._safe_load_sensors() self.assertEqual(self.gateway.sensors[1].children[1].id, sensor.children[1].id) self.assertEqual(self.gateway.sensors[1].children[1].type, sensor.children[1].type) self.assertEqual(self.gateway.sensors[1].children[1].values, sensor.children[1].values) calls = [mock.call('/1/1/1/+/+', self.gateway.recv, 0), mock.call('/1/1/2/+/+', self.gateway.recv, 0), mock.call('/1/+/4/+/+', self.gateway.recv, 0)] self.mock_sub.assert_has_calls(calls)
6,610,524,837,255,244,000
Test load persistence file for MQTTGateway.
tests/test_gateway_mqtt.py
test_mqtt_load_persistence
jslove/pymysensors
python
def test_mqtt_load_persistence(self): sensor = self._add_sensor(1) sensor.children[1] = ChildSensor(1, self.gateway.const.Presentation.S_HUM) sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20' with tempfile.TemporaryDirectory() as temp_dir: self.gateway.persistence_file = os.path.join(temp_dir, 'file.json') self.gateway._save_sensors() del self.gateway.sensors[1] self.assertNotIn(1, self.gateway.sensors) self.gateway._safe_load_sensors() self.assertEqual(self.gateway.sensors[1].children[1].id, sensor.children[1].id) self.assertEqual(self.gateway.sensors[1].children[1].type, sensor.children[1].type) self.assertEqual(self.gateway.sensors[1].children[1].values, sensor.children[1].values) calls = [mock.call('/1/1/1/+/+', self.gateway.recv, 0), mock.call('/1/1/2/+/+', self.gateway.recv, 0), mock.call('/1/+/4/+/+', self.gateway.recv, 0)] self.mock_sub.assert_has_calls(calls)
def setUp(self): 'Set up test.' self.mock_pub = mock.Mock() self.mock_sub = mock.Mock() self.gateway = None
4,819,394,504,927,349,000
Set up test.
tests/test_gateway_mqtt.py
setUp
jslove/pymysensors
python
def setUp(self): self.mock_pub = mock.Mock() self.mock_sub = mock.Mock() self.gateway = None
def _setup(self, in_prefix, out_prefix): 'Set up gateway.' self.gateway = MQTTGateway(self.mock_pub, self.mock_sub, in_prefix=in_prefix, out_prefix=out_prefix)
4,104,366,467,906,981,400
Set up gateway.
tests/test_gateway_mqtt.py
_setup
jslove/pymysensors
python
def _setup(self, in_prefix, out_prefix): self.gateway = MQTTGateway(self.mock_pub, self.mock_sub, in_prefix=in_prefix, out_prefix=out_prefix)
def _add_sensor(self, sensorid): 'Add sensor node. Return sensor node instance.' self.gateway.sensors[sensorid] = Sensor(sensorid) return self.gateway.sensors[sensorid]
8,655,351,742,864,993,000
Add sensor node. Return sensor node instance.
tests/test_gateway_mqtt.py
_add_sensor
jslove/pymysensors
python
def _add_sensor(self, sensorid): self.gateway.sensors[sensorid] = Sensor(sensorid) return self.gateway.sensors[sensorid]
def test_nested_prefix(self): 'Test recv method with nested topic prefix.' self._setup('test/test-in', 'test/test-out') sensor = self._add_sensor(1) sensor.children[1] = ChildSensor(1, self.gateway.const.Presentation.S_HUM) sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20' self.gateway.recv('test/test-in/1/1/2/0/1', '', 0) ret = self.gateway.handle_queue() self.assertEqual(ret, '1;1;1;0;1;20\n') self.gateway.recv('test/test-in/1/1/2/0/1', '', 1) ret = self.gateway.handle_queue() self.assertEqual(ret, '1;1;1;1;1;20\n')
5,369,771,523,001,334,000
Test recv method with nested topic prefix.
tests/test_gateway_mqtt.py
test_nested_prefix
jslove/pymysensors
python
def test_nested_prefix(self): self._setup('test/test-in', 'test/test-out') sensor = self._add_sensor(1) sensor.children[1] = ChildSensor(1, self.gateway.const.Presentation.S_HUM) sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20' self.gateway.recv('test/test-in/1/1/2/0/1', , 0) ret = self.gateway.handle_queue() self.assertEqual(ret, '1;1;1;0;1;20\n') self.gateway.recv('test/test-in/1/1/2/0/1', , 1) ret = self.gateway.handle_queue() self.assertEqual(ret, '1;1;1;1;1;20\n')
def __init__(self, key=None, name=None, local_vars_configuration=None): 'IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef - a model defined in OpenAPI' if (local_vars_configuration is None): local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._key = None self._name = None self.discriminator = None if (key is not None): self.key = key self.name = name
4,513,740,388,721,282,600
IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef - a model defined in OpenAPI
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
__init__
mariusgheorghies/python
python
def __init__(self, key=None, name=None, local_vars_configuration=None): if (local_vars_configuration is None): local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._key = None self._name = None self.discriminator = None if (key is not None): self.key = key self.name = name
@property def key(self): "Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n\n The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501\n\n :return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :rtype: str\n " return self._key
8,283,897,539,841,171,000
Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501 The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501 :return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501 :rtype: str
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
key
mariusgheorghies/python
python
@property def key(self): "Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n\n The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501\n\n :return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :rtype: str\n " return self._key
@key.setter def key(self, key): "Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.\n\n The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501\n\n :param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :type: str\n " self._key = key
705,367,206,117,233,400
Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501 :param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501 :type: str
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
key
mariusgheorghies/python
python
@key.setter def key(self, key): "Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.\n\n The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501\n\n :param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :type: str\n " self._key = key
@property def name(self): 'Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n\n Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501\n\n :return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :rtype: str\n ' return self._name
2,245,093,184,130,805,800
Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501 Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501 :return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501 :rtype: str
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
name
mariusgheorghies/python
python
@property def name(self): 'Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n\n Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501\n\n :return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :rtype: str\n ' return self._name
@name.setter def name(self, name): 'Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.\n\n Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501\n\n :param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (name is None)): raise ValueError('Invalid value for `name`, must not be `None`') self._name = name
4,718,289,652,447,560,000
Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501 :param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501 :type: str
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
name
mariusgheorghies/python
python
@name.setter def name(self, name): 'Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.\n\n Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501\n\n :param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501\n :type: str\n ' if (self.local_vars_configuration.client_side_validation and (name is None)): raise ValueError('Invalid value for `name`, must not be `None`') self._name = name
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
8,442,519,487,048,767,000
Returns the model properties as a dict
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
to_dict
mariusgheorghies/python
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
to_str
mariusgheorghies/python
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
__repr__
mariusgheorghies/python
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef)): return False return (self.to_dict() == other.to_dict())
3,029,111,975,764,075,000
Returns true if both objects are equal
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
__eq__
mariusgheorghies/python
python
def __eq__(self, other): if (not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef)): return False return (self.to_dict() == other.to_dict())
def __ne__(self, other): 'Returns true if both objects are not equal' if (not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef)): return True return (self.to_dict() != other.to_dict())
8,941,883,050,080,361,000
Returns true if both objects are not equal
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
__ne__
mariusgheorghies/python
python
def __ne__(self, other): if (not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef)): return True return (self.to_dict() != other.to_dict())
@abc.abstractmethod def create_api_object(self, *args, **kwargs): 'Create API object'
89,623,001,943,350,850
Create API object
bioconda_utils/githubhandler.py
create_api_object
erictleung/bioconda-utils
python
@abc.abstractmethod def create_api_object(self, *args, **kwargs):
def get_file_relurl(self, path: str, branch_name: str='master') -> str: 'Format domain relative url for **path** on **branch_name**' return '/{user}/{repo}/tree/{branch_name}/{path}'.format(branch_name=branch_name, path=path, **self.var_default)
6,098,138,925,639,406,000
Format domain relative url for **path** on **branch_name**
bioconda_utils/githubhandler.py
get_file_relurl
erictleung/bioconda-utils
python
def get_file_relurl(self, path: str, branch_name: str='master') -> str: return '/{user}/{repo}/tree/{branch_name}/{path}'.format(branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs): 'Log into API (fills `self.username`)' self.create_api_object(*args, **kwargs) if (not self.token): self.username = 'UNKNOWN [no token]' else: user = (await self.api.getitem('/user')) self.username = user['login']
4,357,351,523,083,547,000
Log into API (fills `self.username`)
bioconda_utils/githubhandler.py
login
erictleung/bioconda-utils
python
async def login(self, *args, **kwargs): self.create_api_object(*args, **kwargs) if (not self.token): self.username = 'UNKNOWN [no token]' else: user = (await self.api.getitem('/user')) self.username = user['login']
async def is_member(self, username) -> bool: 'Check if **username** is member of current org' if (not username): return False var_data = copy(self.var_default) var_data['username'] = username try: (await self.api.getitem(self.ORG_MEMBERS, var_data)) except gidgethub.BadRequest: logger.debug('User %s is not a member of %s', username, var_data['user']) return False logger.debug('User %s IS a member of %s', username, var_data['user']) return True
-9,075,334,939,153,031,000
Check if **username** is member of current org
bioconda_utils/githubhandler.py
is_member
erictleung/bioconda-utils
python
async def is_member(self, username) -> bool: if (not username): return False var_data = copy(self.var_default) var_data['username'] = username try: (await self.api.getitem(self.ORG_MEMBERS, var_data)) except gidgethub.BadRequest: logger.debug('User %s is not a member of %s', username, var_data['user']) return False logger.debug('User %s IS a member of %s', username, var_data['user']) return True
async def get_prs(self, from_branch: Optional[str]=None, from_user: Optional[str]=None, to_branch: Optional[str]=None, number: Optional[int]=None, state: Optional[IssueState]=None) -> List[Dict[(Any, Any)]]: 'Retrieve list of PRs matching parameters\n\n Arguments:\n from_branch: Name of branch from which PR asks to pull\n from_user: Name of user/org in from which to pull\n (default: from auth)\n to_branch: Name of branch into which to pull (default: master)\n number: PR number\n ' var_data = copy(self.var_default) if (not from_user): from_user = self.username if from_branch: if from_user: var_data['head'] = f'{from_user}:{from_branch}' else: var_data['head'] = from_branch if to_branch: var_data['base'] = to_branch if number: var_data['number'] = str(number) if state: var_data['state'] = state.name.lower() return (await self.api.getitem(self.PULLS, var_data))
-222,368,685,826,390,300
Retrieve list of PRs matching parameters Arguments: from_branch: Name of branch from which PR asks to pull from_user: Name of user/org in from which to pull (default: from auth) to_branch: Name of branch into which to pull (default: master) number: PR number
bioconda_utils/githubhandler.py
get_prs
erictleung/bioconda-utils
python
async def get_prs(self, from_branch: Optional[str]=None, from_user: Optional[str]=None, to_branch: Optional[str]=None, number: Optional[int]=None, state: Optional[IssueState]=None) -> List[Dict[(Any, Any)]]: 'Retrieve list of PRs matching parameters\n\n Arguments:\n from_branch: Name of branch from which PR asks to pull\n from_user: Name of user/org in from which to pull\n (default: from auth)\n to_branch: Name of branch into which to pull (default: master)\n number: PR number\n ' var_data = copy(self.var_default) if (not from_user): from_user = self.username if from_branch: if from_user: var_data['head'] = f'{from_user}:{from_branch}' else: var_data['head'] = from_branch if to_branch: var_data['base'] = to_branch if number: var_data['number'] = str(number) if state: var_data['state'] = state.name.lower() return (await self.api.getitem(self.PULLS, var_data))
async def create_pr(self, title: str, from_branch: Optional[str]=None, from_user: Optional[str]=None, to_branch: Optional[str]='master', body: Optional[str]=None, maintainer_can_modify: bool=True) -> Dict[(Any, Any)]: 'Create new PR\n\n Arguments:\n title: Title of new PR\n from_branch: Name of branch from which PR asks to pull\n from_user: Name of user/org in from which to pull\n to_branch: Name of branch into which to pull (default: master)\n body: Body text of PR\n maintainer_can_modify: Whether to allow maintainer to modify from_branch\n ' var_data = copy(self.var_default) if (not from_user): from_user = self.username data: Dict[(str, Any)] = {'title': title, 'body': '', 'maintainer_can_modify': maintainer_can_modify} if body: data['body'] += body if from_branch: if (from_user and (from_user != self.username)): data['head'] = f'{from_user}:{from_branch}' else: data['head'] = from_branch if to_branch: data['base'] = to_branch logger.debug('PR data %s', data) if self.dry_run: logger.info("Would create PR '%s'", title) return {'number': (- 1)} logger.info("Creating PR '%s'", title) return (await self.api.post(self.PULLS, var_data, data=data))
-591,203,747,423,478,500
Create new PR Arguments: title: Title of new PR from_branch: Name of branch from which PR asks to pull from_user: Name of user/org in from which to pull to_branch: Name of branch into which to pull (default: master) body: Body text of PR maintainer_can_modify: Whether to allow maintainer to modify from_branch
bioconda_utils/githubhandler.py
create_pr
erictleung/bioconda-utils
python
async def create_pr(self, title: str, from_branch: Optional[str]=None, from_user: Optional[str]=None, to_branch: Optional[str]='master', body: Optional[str]=None, maintainer_can_modify: bool=True) -> Dict[(Any, Any)]: 'Create new PR\n\n Arguments:\n title: Title of new PR\n from_branch: Name of branch from which PR asks to pull\n from_user: Name of user/org in from which to pull\n to_branch: Name of branch into which to pull (default: master)\n body: Body text of PR\n maintainer_can_modify: Whether to allow maintainer to modify from_branch\n ' var_data = copy(self.var_default) if (not from_user): from_user = self.username data: Dict[(str, Any)] = {'title': title, 'body': , 'maintainer_can_modify': maintainer_can_modify} if body: data['body'] += body if from_branch: if (from_user and (from_user != self.username)): data['head'] = f'{from_user}:{from_branch}' else: data['head'] = from_branch if to_branch: data['base'] = to_branch logger.debug('PR data %s', data) if self.dry_run: logger.info("Would create PR '%s'", title) return {'number': (- 1)} logger.info("Creating PR '%s'", title) return (await self.api.post(self.PULLS, var_data, data=data))
async def modify_issue(self, number: int, labels: Optional[List[str]]=None, title: Optional[str]=None, body: Optional[str]=None) -> Dict[(Any, Any)]: 'Modify existing issue (PRs are issues)\n\n Arguments:\n labels: list of labels to assign to issue\n title: new title\n body: new body\n ' var_data = copy(self.var_default) var_data['number'] = str(number) data: Dict[(str, Any)] = {} if labels: data['labels'] = labels if title: data['title'] = title if body: data['body'] = body if self.dry_run: logger.info('Would modify PR %s', number) if title: logger.info('New title: %s', title) if labels: logger.info('New labels: %s', labels) if body: logger.info('New Body:\n%s\n', body) return {'number': number} logger.info('Modifying PR %s', number) return (await self.api.patch(self.ISSUES, var_data, data=data))
5,488,880,197,617,060,000
Modify existing issue (PRs are issues) Arguments: labels: list of labels to assign to issue title: new title body: new body
bioconda_utils/githubhandler.py
modify_issue
erictleung/bioconda-utils
python
async def modify_issue(self, number: int, labels: Optional[List[str]]=None, title: Optional[str]=None, body: Optional[str]=None) -> Dict[(Any, Any)]: 'Modify existing issue (PRs are issues)\n\n Arguments:\n labels: list of labels to assign to issue\n title: new title\n body: new body\n ' var_data = copy(self.var_default) var_data['number'] = str(number) data: Dict[(str, Any)] = {} if labels: data['labels'] = labels if title: data['title'] = title if body: data['body'] = body if self.dry_run: logger.info('Would modify PR %s', number) if title: logger.info('New title: %s', title) if labels: logger.info('New labels: %s', labels) if body: logger.info('New Body:\n%s\n', body) return {'number': number} logger.info('Modifying PR %s', number) return (await self.api.patch(self.ISSUES, var_data, data=data))
@staticmethod def video(keywords: str): '\n :return: < video object > regarding the query\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D') video_ids = re.findall('\\"videoId\\":\\"(.*?)\\"', raw) return (Video(video_ids[0]) if video_ids else None)
7,022,613,803,821,999,000
:return: < video object > regarding the query
src/_query.py
video
SlumberDemon/AioTube
python
@staticmethod def video(keywords: str): '\n \n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D') video_ids = re.findall('\\"videoId\\":\\"(.*?)\\"', raw) return (Video(video_ids[0]) if video_ids else None)
@staticmethod def channel(keywords: str): '\n :return: < channel object > regarding the query\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D') channel_ids = re.findall('{\\"channelId\\":\\"(.*?)\\"', raw) return (Channel(channel_ids[0]) if channel_ids else None)
-3,794,072,335,066,502,000
:return: < channel object > regarding the query
src/_query.py
channel
SlumberDemon/AioTube
python
@staticmethod def channel(keywords: str): '\n \n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D') channel_ids = re.findall('{\\"channelId\\":\\"(.*?)\\"', raw) return (Channel(channel_ids[0]) if channel_ids else None)
@staticmethod def videos(keywords: str, limit: int): '\n :param str keywords: query to be searched on YouTube\n :param int limit: total number of videos to be searched\n :return: list of < video object > of each video regarding the query (consider limit)\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D') raw_ids = re.findall('\\"videoId\\":\\"(.*?)\\"', raw) pureList = _filter(limit=limit, iterable=raw_ids) return (_VideoBulk(pureList) if pureList else None)
-2,091,650,791,094,011,600
:param str keywords: query to be searched on YouTube :param int limit: total number of videos to be searched :return: list of < video object > of each video regarding the query (consider limit)
src/_query.py
videos
SlumberDemon/AioTube
python
@staticmethod def videos(keywords: str, limit: int): '\n :param str keywords: query to be searched on YouTube\n :param int limit: total number of videos to be searched\n :return: list of < video object > of each video regarding the query (consider limit)\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D') raw_ids = re.findall('\\"videoId\\":\\"(.*?)\\"', raw) pureList = _filter(limit=limit, iterable=raw_ids) return (_VideoBulk(pureList) if pureList else None)
@staticmethod def channels(keywords: str, limit: int): '\n :param str keywords: query to be searched on YouTube\n :param int limit: total number of channels to be searched\n :return: list of < channel object > of each video regarding the query (consider limit)\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D') raw_ids = re.findall('{\\"channelId\\":\\"(.*?)\\"', raw) pureList = _filter(limit=limit, iterable=raw_ids) return (_ChannelBulk(pureList) if pureList else None)
-4,197,474,112,352,651,300
:param str keywords: query to be searched on YouTube :param int limit: total number of channels to be searched :return: list of < channel object > of each video regarding the query (consider limit)
src/_query.py
channels
SlumberDemon/AioTube
python
@staticmethod def channels(keywords: str, limit: int): '\n :param str keywords: query to be searched on YouTube\n :param int limit: total number of channels to be searched\n :return: list of < channel object > of each video regarding the query (consider limit)\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D') raw_ids = re.findall('{\\"channelId\\":\\"(.*?)\\"', raw) pureList = _filter(limit=limit, iterable=raw_ids) return (_ChannelBulk(pureList) if pureList else None)
@staticmethod def playlist(keywords: str): '\n :return: < playlist object > regarding the query\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D') found = re.findall('playlistId\\":\\"(.*?)\\"', raw) return (Playlist(found[0]) if found else None)
-310,729,293,607,817,340
:return: < playlist object > regarding the query
src/_query.py
playlist
SlumberDemon/AioTube
python
@staticmethod def playlist(keywords: str): '\n \n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D') found = re.findall('playlistId\\":\\"(.*?)\\"', raw) return (Playlist(found[0]) if found else None)
@staticmethod def playlists(keywords: str, limit: int): '\n :param str keywords: query to be searched on YouTube\n :param int limit: total playlists be searched\n :return: list of < playlist object > of each playlist regarding the query (consider limit)\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D') found = re.findall('playlistId\\":\\"(.*?)\\"', raw) pure = _filter(limit=limit, iterable=found) return (_PlaylistBulk(pure) if pure else None)
1,905,786,711,633,011,500
:param str keywords: query to be searched on YouTube :param int limit: total playlists be searched :return: list of < playlist object > of each playlist regarding the query (consider limit)
src/_query.py
playlists
SlumberDemon/AioTube
python
@staticmethod def playlists(keywords: str, limit: int): '\n :param str keywords: query to be searched on YouTube\n :param int limit: total playlists be searched\n :return: list of < playlist object > of each playlist regarding the query (consider limit)\n ' raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D') found = re.findall('playlistId\\":\\"(.*?)\\"', raw) pure = _filter(limit=limit, iterable=found) return (_PlaylistBulk(pure) if pure else None)
def create_world(self, **kwargs): 'Create World # noqa: E501\n\n Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_world(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n create_world_request (CreateWorldRequest): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n World\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.create_world_endpoint.call_with_http_info(**kwargs)
1,839,116,180,590,506,500
Create World # noqa: E501 Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_world(async_req=True) >>> result = thread.get() Keyword Args: create_world_request (CreateWorldRequest): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: World If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
create_world
vrchatapi/vrchatapi-python
python
def create_world(self, **kwargs): 'Create World # noqa: E501\n\n Create a new world. This endpoint requires `assetUrl` to be a valid File object with `.vrcw` file extension, and `imageUrl` to be a valid File object with an image file extension. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_world(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n create_world_request (CreateWorldRequest): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n World\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.create_world_endpoint.call_with_http_info(**kwargs)
def delete_world(self, world_id, **kwargs): 'Delete World # noqa: E501\n\n Delete a world. Notice a world is never fully "deleted", only its ReleaseStatus is set to "hidden" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.delete_world_endpoint.call_with_http_info(**kwargs)
3,682,070,476,055,268,400
Delete World # noqa: E501 Delete a world. Notice a world is never fully "deleted", only its ReleaseStatus is set to "hidden" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_world(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: None If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
delete_world
vrchatapi/vrchatapi-python
python
def delete_world(self, world_id, **kwargs): 'Delete World # noqa: E501\n\n Delete a world. Notice a world is never fully "deleted", only its ReleaseStatus is set to "hidden" and the linked Files are deleted. The WorldID is permanently reserved. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.delete_world_endpoint.call_with_http_info(**kwargs)
def get_active_worlds(self, **kwargs): 'List Active Worlds # noqa: E501\n\n Search and list currently Active worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_active_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.get_active_worlds_endpoint.call_with_http_info(**kwargs)
1,943,135,176,156,038,400
List Active Worlds # noqa: E501 Search and list currently Active worlds by query filters. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_active_worlds(async_req=True) >>> result = thread.get() Keyword Args: featured (str): Filters on featured results.. [optional] sort (str): [optional] if omitted the server will use the default value of "popularity" n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60 order (str): [optional] if omitted the server will use the default value of "descending" offset (int): A zero-based offset from the default object sorting from where search results start.. [optional] search (str): Filters by world name.. [optional] tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional] notag (str): Tags to exclude (comma-separated).. [optional] release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public" max_unity_version (str): The maximum Unity version supported by the asset.. [optional] min_unity_version (str): The minimum Unity version supported by the asset.. [optional] platform (str): The platform the asset supports.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: [LimitedWorld] If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_active_worlds
vrchatapi/vrchatapi-python
python
def get_active_worlds(self, **kwargs): 'List Active Worlds # noqa: E501\n\n Search and list currently Active worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_active_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.get_active_worlds_endpoint.call_with_http_info(**kwargs)
def get_favorited_worlds(self, **kwargs): 'List Favorited Worlds # noqa: E501\n\n Search and list favorited worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_favorited_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n user_id (str): Target user to see information on, admin-only.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.get_favorited_worlds_endpoint.call_with_http_info(**kwargs)
-2,288,150,651,427,718,700
List Favorited Worlds # noqa: E501 Search and list favorited worlds by query filters. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_favorited_worlds(async_req=True) >>> result = thread.get() Keyword Args: featured (str): Filters on featured results.. [optional] sort (str): [optional] if omitted the server will use the default value of "popularity" n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60 order (str): [optional] if omitted the server will use the default value of "descending" offset (int): A zero-based offset from the default object sorting from where search results start.. [optional] search (str): Filters by world name.. [optional] tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional] notag (str): Tags to exclude (comma-separated).. [optional] release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public" max_unity_version (str): The maximum Unity version supported by the asset.. [optional] min_unity_version (str): The minimum Unity version supported by the asset.. [optional] platform (str): The platform the asset supports.. [optional] user_id (str): Target user to see information on, admin-only.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: [LimitedWorld] If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_favorited_worlds
vrchatapi/vrchatapi-python
python
def get_favorited_worlds(self, **kwargs): 'List Favorited Worlds # noqa: E501\n\n Search and list favorited worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_favorited_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n user_id (str): Target user to see information on, admin-only.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.get_favorited_worlds_endpoint.call_with_http_info(**kwargs)
def get_recent_worlds(self, **kwargs): 'List Recent Worlds # noqa: E501\n\n Search and list recently visited worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_recent_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n user_id (str): Target user to see information on, admin-only.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.get_recent_worlds_endpoint.call_with_http_info(**kwargs)
8,313,723,588,409,878,000
List Recent Worlds # noqa: E501 Search and list recently visited worlds by query filters. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_recent_worlds(async_req=True) >>> result = thread.get() Keyword Args: featured (str): Filters on featured results.. [optional] sort (str): [optional] if omitted the server will use the default value of "popularity" n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60 order (str): [optional] if omitted the server will use the default value of "descending" offset (int): A zero-based offset from the default object sorting from where search results start.. [optional] search (str): Filters by world name.. [optional] tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional] notag (str): Tags to exclude (comma-separated).. [optional] release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public" max_unity_version (str): The maximum Unity version supported by the asset.. [optional] min_unity_version (str): The minimum Unity version supported by the asset.. [optional] platform (str): The platform the asset supports.. [optional] user_id (str): Target user to see information on, admin-only.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: [LimitedWorld] If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_recent_worlds
vrchatapi/vrchatapi-python
python
def get_recent_worlds(self, **kwargs): 'List Recent Worlds # noqa: E501\n\n Search and list recently visited worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_recent_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n user_id (str): Target user to see information on, admin-only.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.get_recent_worlds_endpoint.call_with_http_info(**kwargs)
def get_world(self, world_id, **kwargs): 'Get World by ID # noqa: E501\n\n Get information about a specific World. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n World\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.get_world_endpoint.call_with_http_info(**kwargs)
397,403,147,349,226,750
Get World by ID # noqa: E501 Get information about a specific World. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_world(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: World If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_world
vrchatapi/vrchatapi-python
python
def get_world(self, world_id, **kwargs): 'Get World by ID # noqa: E501\n\n Get information about a specific World. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n World\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.get_world_endpoint.call_with_http_info(**kwargs)
def get_world_instance(self, world_id, instance_id, **kwargs): 'Get World Instance # noqa: E501\n\n Returns a worlds instance. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world_instance(world_id, instance_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n instance_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Instance\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id kwargs['instance_id'] = instance_id return self.get_world_instance_endpoint.call_with_http_info(**kwargs)
-4,952,419,516,927,023,000
Get World Instance # noqa: E501 Returns a worlds instance. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_world_instance(world_id, instance_id, async_req=True) >>> result = thread.get() Args: world_id (str): instance_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: Instance If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_world_instance
vrchatapi/vrchatapi-python
python
def get_world_instance(self, world_id, instance_id, **kwargs): 'Get World Instance # noqa: E501\n\n Returns a worlds instance. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world_instance(world_id, instance_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n instance_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Instance\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id kwargs['instance_id'] = instance_id return self.get_world_instance_endpoint.call_with_http_info(**kwargs)
def get_world_metadata(self, world_id, **kwargs): 'Get World Metadata # noqa: E501\n\n Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world_metadata(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n WorldMetadata\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.get_world_metadata_endpoint.call_with_http_info(**kwargs)
-4,620,318,039,577,507,000
Get World Metadata # noqa: E501 Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_world_metadata(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: WorldMetadata If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_world_metadata
vrchatapi/vrchatapi-python
python
def get_world_metadata(self, world_id, **kwargs): 'Get World Metadata # noqa: E501\n\n Return a worlds custom metadata. This is currently believed to be unused. Metadata can be set with `updateWorld` and can be any arbitrary object. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world_metadata(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n WorldMetadata\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.get_world_metadata_endpoint.call_with_http_info(**kwargs)
def get_world_publish_status(self, world_id, **kwargs): 'Get World Publish Status # noqa: E501\n\n Returns a worlds publish status. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world_publish_status(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n WorldPublishStatus\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.get_world_publish_status_endpoint.call_with_http_info(**kwargs)
-7,381,891,028,071,097,000
Get World Publish Status # noqa: E501 Returns a worlds publish status. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_world_publish_status(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: WorldPublishStatus If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
get_world_publish_status
vrchatapi/vrchatapi-python
python
def get_world_publish_status(self, world_id, **kwargs): 'Get World Publish Status # noqa: E501\n\n Returns a worlds publish status. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_world_publish_status(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n WorldPublishStatus\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.get_world_publish_status_endpoint.call_with_http_info(**kwargs)
def publish_world(self, world_id, **kwargs): 'Publish World # noqa: E501\n\n Publish a world. You can only publish one world per week. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.publish_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.publish_world_endpoint.call_with_http_info(**kwargs)
1,127,032,521,614,357,200
Publish World # noqa: E501 Publish a world. You can only publish one world per week. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.publish_world(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: None If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
publish_world
vrchatapi/vrchatapi-python
python
def publish_world(self, world_id, **kwargs): 'Publish World # noqa: E501\n\n Publish a world. You can only publish one world per week. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.publish_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.publish_world_endpoint.call_with_http_info(**kwargs)
def search_worlds(self, **kwargs): 'Search All Worlds # noqa: E501\n\n Search and list any worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.search_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me"\n user_id (str): Filter by UserID.. [optional]\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.search_worlds_endpoint.call_with_http_info(**kwargs)
-2,585,619,684,312,409,000
Search All Worlds # noqa: E501 Search and list any worlds by query filters. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_worlds(async_req=True) >>> result = thread.get() Keyword Args: featured (str): Filters on featured results.. [optional] sort (str): [optional] if omitted the server will use the default value of "popularity" user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me" user_id (str): Filter by UserID.. [optional] n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60 order (str): [optional] if omitted the server will use the default value of "descending" offset (int): A zero-based offset from the default object sorting from where search results start.. [optional] search (str): Filters by world name.. [optional] tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional] notag (str): Tags to exclude (comma-separated).. [optional] release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public" max_unity_version (str): The maximum Unity version supported by the asset.. [optional] min_unity_version (str): The minimum Unity version supported by the asset.. [optional] platform (str): The platform the asset supports.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: [LimitedWorld] If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
search_worlds
vrchatapi/vrchatapi-python
python
def search_worlds(self, **kwargs): 'Search All Worlds # noqa: E501\n\n Search and list any worlds by query filters. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.search_worlds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n featured (str): Filters on featured results.. [optional]\n sort (str): [optional] if omitted the server will use the default value of "popularity"\n user (str): Set to `me` for searching own worlds.. [optional] if omitted the server will use the default value of "me"\n user_id (str): Filter by UserID.. [optional]\n n (int): The number of objects to return.. [optional] if omitted the server will use the default value of 60\n order (str): [optional] if omitted the server will use the default value of "descending"\n offset (int): A zero-based offset from the default object sorting from where search results start.. [optional]\n search (str): Filters by world name.. [optional]\n tag (str): Tags to include (comma-separated). Any of the tags needs to be present.. [optional]\n notag (str): Tags to exclude (comma-separated).. [optional]\n release_status (str): Filter by ReleaseStatus.. [optional] if omitted the server will use the default value of "public"\n max_unity_version (str): The maximum Unity version supported by the asset.. [optional]\n min_unity_version (str): The minimum Unity version supported by the asset.. [optional]\n platform (str): The platform the asset supports.. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LimitedWorld]\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.search_worlds_endpoint.call_with_http_info(**kwargs)
def unpublish_world(self, world_id, **kwargs): 'Unpublish World # noqa: E501\n\n Unpublish a world. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.unpublish_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.unpublish_world_endpoint.call_with_http_info(**kwargs)
9,081,196,738,494,386,000
Unpublish World # noqa: E501 Unpublish a world. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.unpublish_world(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: None If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
unpublish_world
vrchatapi/vrchatapi-python
python
def unpublish_world(self, world_id, **kwargs): 'Unpublish World # noqa: E501\n\n Unpublish a world. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.unpublish_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.unpublish_world_endpoint.call_with_http_info(**kwargs)
def update_world(self, world_id, **kwargs): 'Update World # noqa: E501\n\n Update information about a specific World. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n update_world_request (UpdateWorldRequest): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n World\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.update_world_endpoint.call_with_http_info(**kwargs)
8,267,214,315,005,162,000
Update World # noqa: E501 Update information about a specific World. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_world(world_id, async_req=True) >>> result = thread.get() Args: world_id (str): Keyword Args: update_world_request (UpdateWorldRequest): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: World If the method is called asynchronously, returns the request thread.
vrchatapi/api/worlds_api.py
update_world
vrchatapi/vrchatapi-python
python
def update_world(self, world_id, **kwargs): 'Update World # noqa: E501\n\n Update information about a specific World. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_world(world_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n world_id (str):\n\n Keyword Args:\n update_world_request (UpdateWorldRequest): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n World\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['world_id'] = world_id return self.update_world_endpoint.call_with_http_info(**kwargs)
def validate(self, lint_task_report, expected_messages, failed_count): 'Assert linter output messages with expected messages.' for stdout in lint_task_report: if stdout.failed: for message in expected_messages: self.assert_same_list_elements([message], stdout.trimmed_messages) self.assert_failed_messages_count(stdout.get_report(), failed_count) else: continue
9,054,549,909,367,451,000
Assert linter output messages with expected messages.
scripts/linters/js_ts_linter_test.py
validate
Aryan-Abhishek/oppia
python
def validate(self, lint_task_report, expected_messages, failed_count): for stdout in lint_task_report: if stdout.failed: for message in expected_messages: self.assert_same_list_elements([message], stdout.trimmed_messages) self.assert_failed_messages_count(stdout.get_report(), failed_count) else: continue
def tokenize(tokenizer, text_array, max_seq_len=64, pad_to_max_length=True, add_special_tokens=True): ' Returns tokenized IDs and attention mask\n The transformers encode_plus method returns the following:\n {\n input_ids: list[int],\n token_type_ids: list[int] if return_token_type_ids is True (default)\n attention_mask: list[int] if return_attention_mask is True (default)\n overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True\n num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True\n special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True\n }' all_tokens = [] all_attention_mask = [] for (i, text) in enumerate(tqdm(text_array)): encoded = tokenizer.encode_plus(text, add_special_tokens=add_special_tokens, max_length=max_seq_len, pad_to_max_length=pad_to_max_length) tokens = torch.tensor(encoded['input_ids']) attention_mask = torch.tensor(encoded['attention_mask']) all_tokens.append(tokens) all_attention_mask.append(attention_mask) return (all_tokens, all_attention_mask)
-6,831,231,487,204,000,000
Returns tokenized IDs and attention mask The transformers encode_plus method returns the following: { input_ids: list[int], token_type_ids: list[int] if return_token_type_ids is True (default) attention_mask: list[int] if return_attention_mask is True (default) overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True }
utils/utils.py
tokenize
suhasgupta791/mids-w251-final-project
python
def tokenize(tokenizer, text_array, max_seq_len=64, pad_to_max_length=True, add_special_tokens=True): ' Returns tokenized IDs and attention mask\n The transformers encode_plus method returns the following:\n {\n input_ids: list[int],\n token_type_ids: list[int] if return_token_type_ids is True (default)\n attention_mask: list[int] if return_attention_mask is True (default)\n overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True\n num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True\n special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True\n }' all_tokens = [] all_attention_mask = [] for (i, text) in enumerate(tqdm(text_array)): encoded = tokenizer.encode_plus(text, add_special_tokens=add_special_tokens, max_length=max_seq_len, pad_to_max_length=pad_to_max_length) tokens = torch.tensor(encoded['input_ids']) attention_mask = torch.tensor(encoded['attention_mask']) all_tokens.append(tokens) all_attention_mask.append(attention_mask) return (all_tokens, all_attention_mask)
def buildinfo_file_name(prefix): '\n Filename of the binary package meta-data file\n ' name = os.path.join(prefix, '.spack/binary_distribution') return name
3,146,870,843,183,292,000
Filename of the binary package meta-data file
lib/spack/spack/binary_distribution.py
buildinfo_file_name
AndrewGaspar/spack
python
def buildinfo_file_name(prefix): '\n \n ' name = os.path.join(prefix, '.spack/binary_distribution') return name
def read_buildinfo_file(prefix): '\n Read buildinfo file\n ' filename = buildinfo_file_name(prefix) with open(filename, 'r') as inputfile: content = inputfile.read() buildinfo = yaml.load(content) return buildinfo
7,406,886,191,899,131,000
Read buildinfo file
lib/spack/spack/binary_distribution.py
read_buildinfo_file
AndrewGaspar/spack
python
def read_buildinfo_file(prefix): '\n \n ' filename = buildinfo_file_name(prefix) with open(filename, 'r') as inputfile: content = inputfile.read() buildinfo = yaml.load(content) return buildinfo
def write_buildinfo_file(spec, workdir, rel=False): '\n Create a cache file containing information\n required for the relocation\n ' prefix = spec.prefix text_to_relocate = [] binary_to_relocate = [] link_to_relocate = [] blacklist = ('.spack', 'man') prefix_to_hash = dict() prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash() deps = spack.build_environment.get_rpath_deps(spec.package) for d in deps: prefix_to_hash[str(d.prefix)] = d.dag_hash() for (root, dirs, files) in os.walk(prefix, topdown=True): dirs[:] = [d for d in dirs if (d not in blacklist)] for filename in files: path_name = os.path.join(root, filename) (m_type, m_subtype) = relocate.mime_type(path_name) if os.path.islink(path_name): link = os.readlink(path_name) if os.path.isabs(link): if link.startswith(spack.store.layout.root): rel_path_name = os.path.relpath(path_name, prefix) link_to_relocate.append(rel_path_name) else: msg = ('Absolute link %s to %s ' % (path_name, link)) msg += ('outside of prefix %s ' % prefix) msg += 'should not be relocated.' tty.warn(msg) if relocate.needs_binary_relocation(m_type, m_subtype): if (not filename.endswith('.o')): rel_path_name = os.path.relpath(path_name, prefix) binary_to_relocate.append(rel_path_name) if relocate.needs_text_relocation(m_type, m_subtype): rel_path_name = os.path.relpath(path_name, prefix) text_to_relocate.append(rel_path_name) buildinfo = {} buildinfo['relative_rpaths'] = rel buildinfo['buildpath'] = spack.store.layout.root buildinfo['spackprefix'] = spack.paths.prefix buildinfo['relative_prefix'] = os.path.relpath(prefix, spack.store.layout.root) buildinfo['relocate_textfiles'] = text_to_relocate buildinfo['relocate_binaries'] = binary_to_relocate buildinfo['relocate_links'] = link_to_relocate buildinfo['prefix_to_hash'] = prefix_to_hash filename = buildinfo_file_name(workdir) with open(filename, 'w') as outfile: outfile.write(syaml.dump(buildinfo, default_flow_style=True))
-354,899,277,047,931,700
Create a cache file containing information required for the relocation
lib/spack/spack/binary_distribution.py
write_buildinfo_file
AndrewGaspar/spack
python
def write_buildinfo_file(spec, workdir, rel=False): '\n Create a cache file containing information\n required for the relocation\n ' prefix = spec.prefix text_to_relocate = [] binary_to_relocate = [] link_to_relocate = [] blacklist = ('.spack', 'man') prefix_to_hash = dict() prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash() deps = spack.build_environment.get_rpath_deps(spec.package) for d in deps: prefix_to_hash[str(d.prefix)] = d.dag_hash() for (root, dirs, files) in os.walk(prefix, topdown=True): dirs[:] = [d for d in dirs if (d not in blacklist)] for filename in files: path_name = os.path.join(root, filename) (m_type, m_subtype) = relocate.mime_type(path_name) if os.path.islink(path_name): link = os.readlink(path_name) if os.path.isabs(link): if link.startswith(spack.store.layout.root): rel_path_name = os.path.relpath(path_name, prefix) link_to_relocate.append(rel_path_name) else: msg = ('Absolute link %s to %s ' % (path_name, link)) msg += ('outside of prefix %s ' % prefix) msg += 'should not be relocated.' tty.warn(msg) if relocate.needs_binary_relocation(m_type, m_subtype): if (not filename.endswith('.o')): rel_path_name = os.path.relpath(path_name, prefix) binary_to_relocate.append(rel_path_name) if relocate.needs_text_relocation(m_type, m_subtype): rel_path_name = os.path.relpath(path_name, prefix) text_to_relocate.append(rel_path_name) buildinfo = {} buildinfo['relative_rpaths'] = rel buildinfo['buildpath'] = spack.store.layout.root buildinfo['spackprefix'] = spack.paths.prefix buildinfo['relative_prefix'] = os.path.relpath(prefix, spack.store.layout.root) buildinfo['relocate_textfiles'] = text_to_relocate buildinfo['relocate_binaries'] = binary_to_relocate buildinfo['relocate_links'] = link_to_relocate buildinfo['prefix_to_hash'] = prefix_to_hash filename = buildinfo_file_name(workdir) with open(filename, 'w') as outfile: outfile.write(syaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec): '\n Return name of the tarball directory according to the convention\n <os>-<architecture>/<compiler>/<package>-<version>/\n ' return ('%s/%s/%s-%s' % (spec.architecture, str(spec.compiler).replace('@', '-'), spec.name, spec.version))
-9,080,255,287,714,440,000
Return name of the tarball directory according to the convention <os>-<architecture>/<compiler>/<package>-<version>/
lib/spack/spack/binary_distribution.py
tarball_directory_name
AndrewGaspar/spack
python
def tarball_directory_name(spec): '\n Return name of the tarball directory according to the convention\n <os>-<architecture>/<compiler>/<package>-<version>/\n ' return ('%s/%s/%s-%s' % (spec.architecture, str(spec.compiler).replace('@', '-'), spec.name, spec.version))
def tarball_name(spec, ext): '\n Return the name of the tarfile according to the convention\n <os>-<architecture>-<package>-<dag_hash><ext>\n ' return ('%s-%s-%s-%s-%s%s' % (spec.architecture, str(spec.compiler).replace('@', '-'), spec.name, spec.version, spec.dag_hash(), ext))
851,457,283,425,775,400
Return the name of the tarfile according to the convention <os>-<architecture>-<package>-<dag_hash><ext>
lib/spack/spack/binary_distribution.py
tarball_name
AndrewGaspar/spack
python
def tarball_name(spec, ext): '\n Return the name of the tarfile according to the convention\n <os>-<architecture>-<package>-<dag_hash><ext>\n ' return ('%s-%s-%s-%s-%s%s' % (spec.architecture, str(spec.compiler).replace('@', '-'), spec.name, spec.version, spec.dag_hash(), ext))
def tarball_path_name(spec, ext): '\n Return the full path+name for a given spec according to the convention\n <tarball_directory_name>/<tarball_name>\n ' return os.path.join(tarball_directory_name(spec), tarball_name(spec, ext))
-774,360,739,942,398,700
Return the full path+name for a given spec according to the convention <tarball_directory_name>/<tarball_name>
lib/spack/spack/binary_distribution.py
tarball_path_name
AndrewGaspar/spack
python
def tarball_path_name(spec, ext): '\n Return the full path+name for a given spec according to the convention\n <tarball_directory_name>/<tarball_name>\n ' return os.path.join(tarball_directory_name(spec), tarball_name(spec, ext))
def generate_package_index(cache_prefix): 'Create the build cache index page.\n\n Creates (or replaces) the "index.html" page at the location given in\n cache_prefix. This page contains a link for each binary package (*.yaml)\n and public key (*.key) under cache_prefix.\n ' tmpdir = tempfile.mkdtemp() try: index_html_path = os.path.join(tmpdir, 'index.html') file_list = (entry for entry in web_util.list_url(cache_prefix) if (entry.endswith('.yaml') or entry.endswith('.key'))) with open(index_html_path, 'w') as f: f.write(BUILD_CACHE_INDEX_TEMPLATE.format(title='Spack Package Index', path_list='\n'.join((BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path) for path in file_list)))) web_util.push_to_url(index_html_path, url_util.join(cache_prefix, 'index.html'), keep_original=False, extra_args={'ContentType': 'text/html'}) finally: shutil.rmtree(tmpdir)
2,301,271,199,688,875,000
Create the build cache index page. Creates (or replaces) the "index.html" page at the location given in cache_prefix. This page contains a link for each binary package (*.yaml) and public key (*.key) under cache_prefix.
lib/spack/spack/binary_distribution.py
generate_package_index
AndrewGaspar/spack
python
def generate_package_index(cache_prefix): 'Create the build cache index page.\n\n Creates (or replaces) the "index.html" page at the location given in\n cache_prefix. This page contains a link for each binary package (*.yaml)\n and public key (*.key) under cache_prefix.\n ' tmpdir = tempfile.mkdtemp() try: index_html_path = os.path.join(tmpdir, 'index.html') file_list = (entry for entry in web_util.list_url(cache_prefix) if (entry.endswith('.yaml') or entry.endswith('.key'))) with open(index_html_path, 'w') as f: f.write(BUILD_CACHE_INDEX_TEMPLATE.format(title='Spack Package Index', path_list='\n'.join((BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path) for path in file_list)))) web_util.push_to_url(index_html_path, url_util.join(cache_prefix, 'index.html'), keep_original=False, extra_args={'ContentType': 'text/html'}) finally: shutil.rmtree(tmpdir)
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False, allow_root=False, key=None, regenerate_index=False): '\n Build a tarball from given spec and put it into the directory structure\n used at the mirror (following <tarball_directory_name>).\n ' if (not spec.concrete): raise ValueError('spec must be concrete to build tarball') tmpdir = tempfile.mkdtemp() cache_prefix = build_cache_prefix(tmpdir) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec)) tarfile_path = os.path.join(tarfile_dir, tarfile_name) spackfile_path = os.path.join(cache_prefix, tarball_path_name(spec, '.spack')) remote_spackfile_path = url_util.join(outdir, os.path.relpath(spackfile_path, tmpdir)) mkdirp(tarfile_dir) if web_util.url_exists(remote_spackfile_path): if force: web_util.remove_url(remote_spackfile_path) else: raise NoOverwriteException(url_util.format(remote_spackfile_path)) spec_file = os.path.join(spec.prefix, '.spack', 'spec.yaml') specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name)) remote_specfile_path = url_util.join(outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir))) if web_util.url_exists(remote_specfile_path): if force: web_util.remove_url(remote_specfile_path) else: raise NoOverwriteException(url_util.format(remote_specfile_path)) workdir = os.path.join(tmpdir, os.path.basename(spec.prefix)) temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name=('%s' % spec.prefix), arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(workdir) os.remove(temp_tarfile_path) write_buildinfo_file(spec, workdir, rel) if rel: try: make_package_relative(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) else: try: check_package_relocatable(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) with closing(tarfile.open(tarfile_path, 'w:gz')) as tar: tar.add(name=('%s' % workdir), arcname=('%s' % os.path.basename(spec.prefix))) shutil.rmtree(workdir) checksum = checksum_tarball(tarfile_path) with open(spec_file, 'r') as inputfile: content = inputfile.read() spec_dict = yaml.load(content) bchecksum = {} bchecksum['hash_algorithm'] = 'sha256' bchecksum['hash'] = checksum spec_dict['binary_cache_checksum'] = bchecksum buildinfo = {} buildinfo['relative_prefix'] = os.path.relpath(spec.prefix, spack.store.layout.root) buildinfo['relative_rpaths'] = rel spec_dict['buildinfo'] = buildinfo spec_dict['full_hash'] = spec.full_hash() tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(spec_dict['full_hash'], spec.name, url_util.format(remote_specfile_path))) tty.debug(spec.tree()) with open(specfile_path, 'w') as outfile: outfile.write(syaml.dump(spec_dict)) if (not unsigned): sign_tarball(key, force, specfile_path) with closing(tarfile.open(spackfile_path, 'w')) as tar: tar.add(name=tarfile_path, arcname=('%s' % tarfile_name)) tar.add(name=specfile_path, arcname=('%s' % specfile_name)) if (not unsigned): tar.add(name=('%s.asc' % specfile_path), arcname=('%s.asc' % specfile_name)) os.remove(tarfile_path) if (not unsigned): os.remove(('%s.asc' % specfile_path)) web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url(specfile_path, remote_specfile_path, keep_original=False) tty.msg(('Buildache for "%s" written to \n %s' % (spec, remote_spackfile_path))) try: if regenerate_index: generate_package_index(url_util.join(outdir, os.path.relpath(cache_prefix, tmpdir))) finally: shutil.rmtree(tmpdir) return None
7,027,555,427,446,063,000
Build a tarball from given spec and put it into the directory structure used at the mirror (following <tarball_directory_name>).
lib/spack/spack/binary_distribution.py
build_tarball
AndrewGaspar/spack
python
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False, allow_root=False, key=None, regenerate_index=False): '\n Build a tarball from given spec and put it into the directory structure\n used at the mirror (following <tarball_directory_name>).\n ' if (not spec.concrete): raise ValueError('spec must be concrete to build tarball') tmpdir = tempfile.mkdtemp() cache_prefix = build_cache_prefix(tmpdir) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec)) tarfile_path = os.path.join(tarfile_dir, tarfile_name) spackfile_path = os.path.join(cache_prefix, tarball_path_name(spec, '.spack')) remote_spackfile_path = url_util.join(outdir, os.path.relpath(spackfile_path, tmpdir)) mkdirp(tarfile_dir) if web_util.url_exists(remote_spackfile_path): if force: web_util.remove_url(remote_spackfile_path) else: raise NoOverwriteException(url_util.format(remote_spackfile_path)) spec_file = os.path.join(spec.prefix, '.spack', 'spec.yaml') specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name)) remote_specfile_path = url_util.join(outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir))) if web_util.url_exists(remote_specfile_path): if force: web_util.remove_url(remote_specfile_path) else: raise NoOverwriteException(url_util.format(remote_specfile_path)) workdir = os.path.join(tmpdir, os.path.basename(spec.prefix)) temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name=('%s' % spec.prefix), arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(workdir) os.remove(temp_tarfile_path) write_buildinfo_file(spec, workdir, rel) if rel: try: make_package_relative(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) else: try: check_package_relocatable(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) with closing(tarfile.open(tarfile_path, 'w:gz')) as tar: tar.add(name=('%s' % workdir), arcname=('%s' % os.path.basename(spec.prefix))) shutil.rmtree(workdir) checksum = checksum_tarball(tarfile_path) with open(spec_file, 'r') as inputfile: content = inputfile.read() spec_dict = yaml.load(content) bchecksum = {} bchecksum['hash_algorithm'] = 'sha256' bchecksum['hash'] = checksum spec_dict['binary_cache_checksum'] = bchecksum buildinfo = {} buildinfo['relative_prefix'] = os.path.relpath(spec.prefix, spack.store.layout.root) buildinfo['relative_rpaths'] = rel spec_dict['buildinfo'] = buildinfo spec_dict['full_hash'] = spec.full_hash() tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(spec_dict['full_hash'], spec.name, url_util.format(remote_specfile_path))) tty.debug(spec.tree()) with open(specfile_path, 'w') as outfile: outfile.write(syaml.dump(spec_dict)) if (not unsigned): sign_tarball(key, force, specfile_path) with closing(tarfile.open(spackfile_path, 'w')) as tar: tar.add(name=tarfile_path, arcname=('%s' % tarfile_name)) tar.add(name=specfile_path, arcname=('%s' % specfile_name)) if (not unsigned): tar.add(name=('%s.asc' % specfile_path), arcname=('%s.asc' % specfile_name)) os.remove(tarfile_path) if (not unsigned): os.remove(('%s.asc' % specfile_path)) web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url(specfile_path, remote_specfile_path, keep_original=False) tty.msg(('Buildache for "%s" written to \n %s' % (spec, remote_spackfile_path))) try: if regenerate_index: generate_package_index(url_util.join(outdir, os.path.relpath(cache_prefix, tmpdir))) finally: shutil.rmtree(tmpdir) return None
def download_tarball(spec): '\n Download binary tarball for given package into stage area\n Return True if successful\n ' if (not spack.mirror.MirrorCollection()): tty.die(('Please add a spack mirror to allow ' + 'download of pre-compiled packages.')) tarball = tarball_path_name(spec, '.spack') for mirror in spack.mirror.MirrorCollection().values(): url = url_util.join(mirror.fetch_url, _build_cache_relative_path, tarball) stage = Stage(url, name='build_cache', keep=True) try: stage.fetch() return stage.save_filename except fs.FetchError: continue return None
-2,976,945,472,007,021,000
Download binary tarball for given package into stage area Return True if successful
lib/spack/spack/binary_distribution.py
download_tarball
AndrewGaspar/spack
python
def download_tarball(spec): '\n Download binary tarball for given package into stage area\n Return True if successful\n ' if (not spack.mirror.MirrorCollection()): tty.die(('Please add a spack mirror to allow ' + 'download of pre-compiled packages.')) tarball = tarball_path_name(spec, '.spack') for mirror in spack.mirror.MirrorCollection().values(): url = url_util.join(mirror.fetch_url, _build_cache_relative_path, tarball) stage = Stage(url, name='build_cache', keep=True) try: stage.fetch() return stage.save_filename except fs.FetchError: continue return None
def make_package_relative(workdir, spec, allow_root): '\n Change paths in binaries to relative paths. Change absolute symlinks\n to relative symlinks.\n ' prefix = spec.prefix buildinfo = read_buildinfo_file(workdir) old_layout_root = buildinfo['buildpath'] orig_path_names = list() cur_path_names = list() for filename in buildinfo['relocate_binaries']: orig_path_names.append(os.path.join(prefix, filename)) cur_path_names.append(os.path.join(workdir, filename)) if ((spec.architecture.platform == 'darwin') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'darwin'))): relocate.make_macho_binaries_relative(cur_path_names, orig_path_names, old_layout_root) if ((spec.architecture.platform == 'linux') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'linux'))): relocate.make_elf_binaries_relative(cur_path_names, orig_path_names, old_layout_root) relocate.check_files_relocatable(cur_path_names, allow_root) orig_path_names = list() cur_path_names = list() for linkname in buildinfo.get('relocate_links', []): orig_path_names.append(os.path.join(prefix, linkname)) cur_path_names.append(os.path.join(workdir, linkname)) relocate.make_link_relative(cur_path_names, orig_path_names)
2,882,771,149,939,471,000
Change paths in binaries to relative paths. Change absolute symlinks to relative symlinks.
lib/spack/spack/binary_distribution.py
make_package_relative
AndrewGaspar/spack
python
def make_package_relative(workdir, spec, allow_root): '\n Change paths in binaries to relative paths. Change absolute symlinks\n to relative symlinks.\n ' prefix = spec.prefix buildinfo = read_buildinfo_file(workdir) old_layout_root = buildinfo['buildpath'] orig_path_names = list() cur_path_names = list() for filename in buildinfo['relocate_binaries']: orig_path_names.append(os.path.join(prefix, filename)) cur_path_names.append(os.path.join(workdir, filename)) if ((spec.architecture.platform == 'darwin') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'darwin'))): relocate.make_macho_binaries_relative(cur_path_names, orig_path_names, old_layout_root) if ((spec.architecture.platform == 'linux') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'linux'))): relocate.make_elf_binaries_relative(cur_path_names, orig_path_names, old_layout_root) relocate.check_files_relocatable(cur_path_names, allow_root) orig_path_names = list() cur_path_names = list() for linkname in buildinfo.get('relocate_links', []): orig_path_names.append(os.path.join(prefix, linkname)) cur_path_names.append(os.path.join(workdir, linkname)) relocate.make_link_relative(cur_path_names, orig_path_names)
def check_package_relocatable(workdir, spec, allow_root): '\n Check if package binaries are relocatable.\n Change links to placeholder links.\n ' buildinfo = read_buildinfo_file(workdir) cur_path_names = list() for filename in buildinfo['relocate_binaries']: cur_path_names.append(os.path.join(workdir, filename)) relocate.check_files_relocatable(cur_path_names, allow_root)
1,812,976,719,999,216,600
Check if package binaries are relocatable. Change links to placeholder links.
lib/spack/spack/binary_distribution.py
check_package_relocatable
AndrewGaspar/spack
python
def check_package_relocatable(workdir, spec, allow_root): '\n Check if package binaries are relocatable.\n Change links to placeholder links.\n ' buildinfo = read_buildinfo_file(workdir) cur_path_names = list() for filename in buildinfo['relocate_binaries']: cur_path_names.append(os.path.join(workdir, filename)) relocate.check_files_relocatable(cur_path_names, allow_root)
def relocate_package(spec, allow_root): '\n Relocate the given package\n ' workdir = str(spec.prefix) buildinfo = read_buildinfo_file(workdir) new_layout_root = str(spack.store.layout.root) new_prefix = str(spec.prefix) new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root)) new_spack_prefix = str(spack.paths.prefix) old_layout_root = str(buildinfo['buildpath']) old_spack_prefix = str(buildinfo.get('spackprefix')) old_rel_prefix = buildinfo.get('relative_prefix') old_prefix = os.path.join(old_layout_root, old_rel_prefix) rel = buildinfo.get('relative_rpaths') prefix_to_hash = buildinfo.get('prefix_to_hash', None) if ((old_rel_prefix != new_rel_prefix) and (not prefix_to_hash)): msg = 'Package tarball was created from an install ' msg += 'prefix with a different directory layout and an older ' msg += 'buildcache create implementation. It cannot be relocated.' raise NewLayoutException(msg) if (not prefix_to_hash): prefix_to_hash = dict() hash_to_prefix = dict() hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix) new_deps = spack.build_environment.get_rpath_deps(spec.package) for d in new_deps: hash_to_prefix[d.format('{hash}')] = str(d.prefix) prefix_to_prefix = dict() for (orig_prefix, hash) in prefix_to_hash.items(): prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None) prefix_to_prefix[old_prefix] = new_prefix prefix_to_prefix[old_layout_root] = new_layout_root tty.debug('Relocating package from', ('%s to %s.' % (old_layout_root, new_layout_root))) def is_backup_file(file): return file.endswith('~') text_names = list() for filename in buildinfo['relocate_textfiles']: text_name = os.path.join(workdir, filename) if (not is_backup_file(text_name)): text_names.append(text_name) if (old_layout_root != new_layout_root): paths_to_relocate = [old_spack_prefix, old_layout_root] paths_to_relocate.extend(prefix_to_hash.keys()) files_to_relocate = list(filter((lambda pathname: (not relocate.file_is_relocatable(pathname, paths_to_relocate=paths_to_relocate))), map((lambda filename: os.path.join(workdir, filename)), buildinfo['relocate_binaries']))) if ((spec.architecture.platform == 'darwin') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'darwin'))): relocate.relocate_macho_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) if ((spec.architecture.platform == 'linux') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'linux'))): relocate.relocate_elf_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) link_names = [linkname for linkname in buildinfo.get('relocate_links', [])] relocate.relocate_links(link_names, old_layout_root, new_layout_root, old_prefix, new_prefix, prefix_to_prefix) relocate.relocate_text(text_names, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix) relocate.relocate_text_bin(files_to_relocate, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix)
-1,651,716,833,346,948,600
Relocate the given package
lib/spack/spack/binary_distribution.py
relocate_package
AndrewGaspar/spack
python
def relocate_package(spec, allow_root): '\n \n ' workdir = str(spec.prefix) buildinfo = read_buildinfo_file(workdir) new_layout_root = str(spack.store.layout.root) new_prefix = str(spec.prefix) new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root)) new_spack_prefix = str(spack.paths.prefix) old_layout_root = str(buildinfo['buildpath']) old_spack_prefix = str(buildinfo.get('spackprefix')) old_rel_prefix = buildinfo.get('relative_prefix') old_prefix = os.path.join(old_layout_root, old_rel_prefix) rel = buildinfo.get('relative_rpaths') prefix_to_hash = buildinfo.get('prefix_to_hash', None) if ((old_rel_prefix != new_rel_prefix) and (not prefix_to_hash)): msg = 'Package tarball was created from an install ' msg += 'prefix with a different directory layout and an older ' msg += 'buildcache create implementation. It cannot be relocated.' raise NewLayoutException(msg) if (not prefix_to_hash): prefix_to_hash = dict() hash_to_prefix = dict() hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix) new_deps = spack.build_environment.get_rpath_deps(spec.package) for d in new_deps: hash_to_prefix[d.format('{hash}')] = str(d.prefix) prefix_to_prefix = dict() for (orig_prefix, hash) in prefix_to_hash.items(): prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None) prefix_to_prefix[old_prefix] = new_prefix prefix_to_prefix[old_layout_root] = new_layout_root tty.debug('Relocating package from', ('%s to %s.' % (old_layout_root, new_layout_root))) def is_backup_file(file): return file.endswith('~') text_names = list() for filename in buildinfo['relocate_textfiles']: text_name = os.path.join(workdir, filename) if (not is_backup_file(text_name)): text_names.append(text_name) if (old_layout_root != new_layout_root): paths_to_relocate = [old_spack_prefix, old_layout_root] paths_to_relocate.extend(prefix_to_hash.keys()) files_to_relocate = list(filter((lambda pathname: (not relocate.file_is_relocatable(pathname, paths_to_relocate=paths_to_relocate))), map((lambda filename: os.path.join(workdir, filename)), buildinfo['relocate_binaries']))) if ((spec.architecture.platform == 'darwin') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'darwin'))): relocate.relocate_macho_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) if ((spec.architecture.platform == 'linux') or ((spec.architecture.platform == 'test') and (platform.system().lower() == 'linux'))): relocate.relocate_elf_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) link_names = [linkname for linkname in buildinfo.get('relocate_links', [])] relocate.relocate_links(link_names, old_layout_root, new_layout_root, old_prefix, new_prefix, prefix_to_prefix) relocate.relocate_text(text_names, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix) relocate.relocate_text_bin(files_to_relocate, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix)
def extract_tarball(spec, filename, allow_root=False, unsigned=False, force=False): '\n extract binary tarball for given package into install area\n ' if os.path.exists(spec.prefix): if force: shutil.rmtree(spec.prefix) else: raise NoOverwriteException(str(spec.prefix)) tmpdir = tempfile.mkdtemp() stagepath = os.path.dirname(filename) spackfile_name = tarball_name(spec, '.spack') spackfile_path = os.path.join(stagepath, spackfile_name) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_path = os.path.join(tmpdir, tarfile_name) specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.join(tmpdir, specfile_name) with closing(tarfile.open(spackfile_path, 'r')) as tar: tar.extractall(tmpdir) if (not os.path.exists(tarfile_path)): tarfile_name = tarball_name(spec, '.tar.bz2') tarfile_path = os.path.join(tmpdir, tarfile_name) if (not unsigned): if os.path.exists(('%s.asc' % specfile_path)): try: suppress = config.get('config:suppress_gpg_warnings', False) Gpg.verify(('%s.asc' % specfile_path), specfile_path, suppress) except Exception as e: shutil.rmtree(tmpdir) raise e else: shutil.rmtree(tmpdir) raise NoVerifyException('Package spec file failed signature verification.\nUse spack buildcache keys to download and install a key for verification from the mirror.') checksum = checksum_tarball(tarfile_path) spec_dict = {} with open(specfile_path, 'r') as inputfile: content = inputfile.read() spec_dict = syaml.load(content) bchecksum = spec_dict['binary_cache_checksum'] if (bchecksum['hash'] != checksum): shutil.rmtree(tmpdir) raise NoChecksumException('Package tarball failed checksum verification.\nIt cannot be installed.') new_relative_prefix = str(os.path.relpath(spec.prefix, spack.store.layout.root)) buildinfo = spec_dict.get('buildinfo', {}) old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix) rel = buildinfo.get('relative_rpaths') info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s' tty.debug((info % (old_relative_prefix, new_relative_prefix, rel))) with closing(tarfile.open(tarfile_path, 'r')) as tar: tar.extractall(path=tmpdir) bindist_file = glob.glob(('%s/*/.spack/binary_distribution' % tmpdir))[0] workdir = re.sub('/.spack/binary_distribution$', '', bindist_file) tty.debug(('workdir %s' % workdir)) temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name=('%s' % workdir), arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(spec.prefix) os.remove(temp_tarfile_path) os.remove(tarfile_path) os.remove(specfile_path) try: relocate_package(spec, allow_root) except Exception as e: shutil.rmtree(spec.prefix) raise e else: manifest_file = os.path.join(spec.prefix, spack.store.layout.metadata_dir, spack.store.layout.manifest_file_name) if (not os.path.exists(manifest_file)): spec_id = spec.format('{name}/{hash:7}') tty.warn(('No manifest file in tarball for spec %s' % spec_id)) finally: shutil.rmtree(tmpdir) if os.path.exists(filename): os.remove(filename)
-5,361,161,429,013,797,000
extract binary tarball for given package into install area
lib/spack/spack/binary_distribution.py
extract_tarball
AndrewGaspar/spack
python
def extract_tarball(spec, filename, allow_root=False, unsigned=False, force=False): '\n \n ' if os.path.exists(spec.prefix): if force: shutil.rmtree(spec.prefix) else: raise NoOverwriteException(str(spec.prefix)) tmpdir = tempfile.mkdtemp() stagepath = os.path.dirname(filename) spackfile_name = tarball_name(spec, '.spack') spackfile_path = os.path.join(stagepath, spackfile_name) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_path = os.path.join(tmpdir, tarfile_name) specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.join(tmpdir, specfile_name) with closing(tarfile.open(spackfile_path, 'r')) as tar: tar.extractall(tmpdir) if (not os.path.exists(tarfile_path)): tarfile_name = tarball_name(spec, '.tar.bz2') tarfile_path = os.path.join(tmpdir, tarfile_name) if (not unsigned): if os.path.exists(('%s.asc' % specfile_path)): try: suppress = config.get('config:suppress_gpg_warnings', False) Gpg.verify(('%s.asc' % specfile_path), specfile_path, suppress) except Exception as e: shutil.rmtree(tmpdir) raise e else: shutil.rmtree(tmpdir) raise NoVerifyException('Package spec file failed signature verification.\nUse spack buildcache keys to download and install a key for verification from the mirror.') checksum = checksum_tarball(tarfile_path) spec_dict = {} with open(specfile_path, 'r') as inputfile: content = inputfile.read() spec_dict = syaml.load(content) bchecksum = spec_dict['binary_cache_checksum'] if (bchecksum['hash'] != checksum): shutil.rmtree(tmpdir) raise NoChecksumException('Package tarball failed checksum verification.\nIt cannot be installed.') new_relative_prefix = str(os.path.relpath(spec.prefix, spack.store.layout.root)) buildinfo = spec_dict.get('buildinfo', {}) old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix) rel = buildinfo.get('relative_rpaths') info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s' tty.debug((info % (old_relative_prefix, new_relative_prefix, rel))) with closing(tarfile.open(tarfile_path, 'r')) as tar: tar.extractall(path=tmpdir) bindist_file = glob.glob(('%s/*/.spack/binary_distribution' % tmpdir))[0] workdir = re.sub('/.spack/binary_distribution$', , bindist_file) tty.debug(('workdir %s' % workdir)) temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name=('%s' % workdir), arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(spec.prefix) os.remove(temp_tarfile_path) os.remove(tarfile_path) os.remove(specfile_path) try: relocate_package(spec, allow_root) except Exception as e: shutil.rmtree(spec.prefix) raise e else: manifest_file = os.path.join(spec.prefix, spack.store.layout.metadata_dir, spack.store.layout.manifest_file_name) if (not os.path.exists(manifest_file)): spec_id = spec.format('{name}/{hash:7}') tty.warn(('No manifest file in tarball for spec %s' % spec_id)) finally: shutil.rmtree(tmpdir) if os.path.exists(filename): os.remove(filename)
def try_download_specs(urls=None, force=False): '\n Try to download the urls and cache them\n ' global _cached_specs if (urls is None): return {} for link in urls: with Stage(link, name='build_cache', keep=True) as stage: if (force and os.path.exists(stage.save_filename)): os.remove(stage.save_filename) if (not os.path.exists(stage.save_filename)): try: stage.fetch() except fs.FetchError: continue with open(stage.save_filename, 'r') as f: spec = Spec.from_yaml(f) spec._mark_concrete() _cached_specs.add(spec) return _cached_specs
-4,851,680,199,834,589,000
Try to download the urls and cache them
lib/spack/spack/binary_distribution.py
try_download_specs
AndrewGaspar/spack
python
def try_download_specs(urls=None, force=False): '\n \n ' global _cached_specs if (urls is None): return {} for link in urls: with Stage(link, name='build_cache', keep=True) as stage: if (force and os.path.exists(stage.save_filename)): os.remove(stage.save_filename) if (not os.path.exists(stage.save_filename)): try: stage.fetch() except fs.FetchError: continue with open(stage.save_filename, 'r') as f: spec = Spec.from_yaml(f) spec._mark_concrete() _cached_specs.add(spec) return _cached_specs
def get_spec(spec=None, force=False): '\n Check if spec.yaml exists on mirrors and return it if it does\n ' global _cached_specs urls = set() if (spec is None): return {} specfile_name = tarball_name(spec, '.spec.yaml') if (not spack.mirror.MirrorCollection()): tty.debug('No Spack mirrors are currently configured') return {} if (_cached_specs and (spec in _cached_specs)): return _cached_specs for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join(mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg(('Finding buildcaches in %s' % mirror_dir)) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) else: tty.msg(('Finding buildcaches at %s' % url_util.format(fetch_url_build_cache))) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) return try_download_specs(urls=urls, force=force)
-2,687,261,658,887,346,700
Check if spec.yaml exists on mirrors and return it if it does
lib/spack/spack/binary_distribution.py
get_spec
AndrewGaspar/spack
python
def get_spec(spec=None, force=False): '\n \n ' global _cached_specs urls = set() if (spec is None): return {} specfile_name = tarball_name(spec, '.spec.yaml') if (not spack.mirror.MirrorCollection()): tty.debug('No Spack mirrors are currently configured') return {} if (_cached_specs and (spec in _cached_specs)): return _cached_specs for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join(mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg(('Finding buildcaches in %s' % mirror_dir)) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) else: tty.msg(('Finding buildcaches at %s' % url_util.format(fetch_url_build_cache))) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) return try_download_specs(urls=urls, force=force)
def get_specs(force=False, allarch=False): "\n Get spec.yaml's for build caches available on mirror\n " arch = architecture.Arch(architecture.platform(), 'default_os', 'default_target') arch_pattern = '([^-]*-[^-]*-[^-]*)' if (not allarch): arch_pattern = ('(%s-%s-[^-]*)' % (arch.platform, arch.os)) regex_pattern = ('%s(.*)(spec.yaml$)' % arch_pattern) arch_re = re.compile(regex_pattern) if (not spack.mirror.MirrorCollection()): tty.debug('No Spack mirrors are currently configured') return {} urls = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join(mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg(('Finding buildcaches in %s' % mirror_dir)) if os.path.exists(mirror_dir): files = os.listdir(mirror_dir) for file in files: m = arch_re.search(file) if m: link = url_util.join(fetch_url_build_cache, file) urls.add(link) else: tty.msg(('Finding buildcaches at %s' % url_util.format(fetch_url_build_cache))) (p, links) = web_util.spider(url_util.join(fetch_url_build_cache, 'index.html')) for link in links: m = arch_re.search(link) if m: urls.add(link) return try_download_specs(urls=urls, force=force)
-211,493,935,287,844,900
Get spec.yaml's for build caches available on mirror
lib/spack/spack/binary_distribution.py
get_specs
AndrewGaspar/spack
python
def get_specs(force=False, allarch=False): "\n \n " arch = architecture.Arch(architecture.platform(), 'default_os', 'default_target') arch_pattern = '([^-]*-[^-]*-[^-]*)' if (not allarch): arch_pattern = ('(%s-%s-[^-]*)' % (arch.platform, arch.os)) regex_pattern = ('%s(.*)(spec.yaml$)' % arch_pattern) arch_re = re.compile(regex_pattern) if (not spack.mirror.MirrorCollection()): tty.debug('No Spack mirrors are currently configured') return {} urls = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join(mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg(('Finding buildcaches in %s' % mirror_dir)) if os.path.exists(mirror_dir): files = os.listdir(mirror_dir) for file in files: m = arch_re.search(file) if m: link = url_util.join(fetch_url_build_cache, file) urls.add(link) else: tty.msg(('Finding buildcaches at %s' % url_util.format(fetch_url_build_cache))) (p, links) = web_util.spider(url_util.join(fetch_url_build_cache, 'index.html')) for link in links: m = arch_re.search(link) if m: urls.add(link) return try_download_specs(urls=urls, force=force)
def get_keys(install=False, trust=False, force=False): '\n Get pgp public keys available on mirror\n with suffix .key or .pub\n ' if (not spack.mirror.MirrorCollection()): tty.die(('Please add a spack mirror to allow ' + 'download of build caches.')) keys = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join(mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg(('Finding public keys in %s' % mirror_dir)) files = os.listdir(str(mirror_dir)) for file in files: if (re.search('\\.key', file) or re.search('\\.pub', file)): link = url_util.join(fetch_url_build_cache, file) keys.add(link) else: tty.msg(('Finding public keys at %s' % url_util.format(fetch_url_build_cache))) (p, links) = web_util.spider(url_util.join(fetch_url_build_cache, 'index.html'), depth=1) for link in links: if (re.search('\\.key', link) or re.search('\\.pub', link)): keys.add(link) for link in keys: with Stage(link, name='build_cache', keep=True) as stage: if (os.path.exists(stage.save_filename) and force): os.remove(stage.save_filename) if (not os.path.exists(stage.save_filename)): try: stage.fetch() except fs.FetchError: continue tty.msg(('Found key %s' % link)) if install: if trust: Gpg.trust(stage.save_filename) tty.msg('Added this key to trusted keys.') else: tty.msg('Will not add this key to trusted keys.Use -t to install all downloaded keys')
386,161,436,998,631,300
Get pgp public keys available on mirror with suffix .key or .pub
lib/spack/spack/binary_distribution.py
get_keys
AndrewGaspar/spack
python
def get_keys(install=False, trust=False, force=False): '\n Get pgp public keys available on mirror\n with suffix .key or .pub\n ' if (not spack.mirror.MirrorCollection()): tty.die(('Please add a spack mirror to allow ' + 'download of build caches.')) keys = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join(mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg(('Finding public keys in %s' % mirror_dir)) files = os.listdir(str(mirror_dir)) for file in files: if (re.search('\\.key', file) or re.search('\\.pub', file)): link = url_util.join(fetch_url_build_cache, file) keys.add(link) else: tty.msg(('Finding public keys at %s' % url_util.format(fetch_url_build_cache))) (p, links) = web_util.spider(url_util.join(fetch_url_build_cache, 'index.html'), depth=1) for link in links: if (re.search('\\.key', link) or re.search('\\.pub', link)): keys.add(link) for link in keys: with Stage(link, name='build_cache', keep=True) as stage: if (os.path.exists(stage.save_filename) and force): os.remove(stage.save_filename) if (not os.path.exists(stage.save_filename)): try: stage.fetch() except fs.FetchError: continue tty.msg(('Found key %s' % link)) if install: if trust: Gpg.trust(stage.save_filename) tty.msg('Added this key to trusted keys.') else: tty.msg('Will not add this key to trusted keys.Use -t to install all downloaded keys')
def check_specs_against_mirrors(mirrors, specs, output_file=None, rebuild_on_errors=False): "Check all the given specs against buildcaches on the given mirrors and\n determine if any of the specs need to be rebuilt. Reasons for needing to\n rebuild include binary cache for spec isn't present on a mirror, or it is\n present but the full_hash has changed since last time spec was built.\n\n Arguments:\n mirrors (dict): Mirrors to check against\n specs (iterable): Specs to check against mirrors\n output_file (string): Path to output file to be written. If provided,\n mirrors with missing or out-of-date specs will be formatted as a\n JSON object and written to this file.\n rebuild_on_errors (boolean): Treat any errors encountered while\n checking specs as a signal to rebuild package.\n\n Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.\n\n " rebuilds = {} for mirror in spack.mirror.MirrorCollection(mirrors).values(): tty.msg(('Checking for built specs at %s' % mirror.fetch_url)) rebuild_list = [] for spec in specs: if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors): rebuild_list.append({'short_spec': spec.short_spec, 'hash': spec.dag_hash()}) if rebuild_list: rebuilds[mirror.fetch_url] = {'mirrorName': mirror.name, 'mirrorUrl': mirror.fetch_url, 'rebuildSpecs': rebuild_list} if output_file: with open(output_file, 'w') as outf: outf.write(json.dumps(rebuilds)) return (1 if rebuilds else 0)
-2,557,679,248,075,223,600
Check all the given specs against buildcaches on the given mirrors and determine if any of the specs need to be rebuilt. Reasons for needing to rebuild include binary cache for spec isn't present on a mirror, or it is present but the full_hash has changed since last time spec was built. Arguments: mirrors (dict): Mirrors to check against specs (iterable): Specs to check against mirrors output_file (string): Path to output file to be written. If provided, mirrors with missing or out-of-date specs will be formatted as a JSON object and written to this file. rebuild_on_errors (boolean): Treat any errors encountered while checking specs as a signal to rebuild package. Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
lib/spack/spack/binary_distribution.py
check_specs_against_mirrors
AndrewGaspar/spack
python
def check_specs_against_mirrors(mirrors, specs, output_file=None, rebuild_on_errors=False): "Check all the given specs against buildcaches on the given mirrors and\n determine if any of the specs need to be rebuilt. Reasons for needing to\n rebuild include binary cache for spec isn't present on a mirror, or it is\n present but the full_hash has changed since last time spec was built.\n\n Arguments:\n mirrors (dict): Mirrors to check against\n specs (iterable): Specs to check against mirrors\n output_file (string): Path to output file to be written. If provided,\n mirrors with missing or out-of-date specs will be formatted as a\n JSON object and written to this file.\n rebuild_on_errors (boolean): Treat any errors encountered while\n checking specs as a signal to rebuild package.\n\n Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.\n\n " rebuilds = {} for mirror in spack.mirror.MirrorCollection(mirrors).values(): tty.msg(('Checking for built specs at %s' % mirror.fetch_url)) rebuild_list = [] for spec in specs: if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors): rebuild_list.append({'short_spec': spec.short_spec, 'hash': spec.dag_hash()}) if rebuild_list: rebuilds[mirror.fetch_url] = {'mirrorName': mirror.name, 'mirrorUrl': mirror.fetch_url, 'rebuildSpecs': rebuild_list} if output_file: with open(output_file, 'w') as outf: outf.write(json.dumps(rebuilds)) return (1 if rebuilds else 0)
def get_model(name, **kwargs): "Returns a pre-defined model by name\n\n Parameters\n ----------\n name : str\n Name of the model.\n pretrained : bool\n Whether to load the pretrained weights for model.\n classes : int\n Number of classes for the output layer.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n\n Returns\n -------\n HybridBlock\n The model.\n " models = {'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc, 'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco, 'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc, 'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco, 'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc, 'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc, 'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco, 'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc, 'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc, 'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc, 'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco, 'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc, 'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco, 'cifar_resnet20_v1': cifar_resnet20_v1, 'cifar_resnet56_v1': cifar_resnet56_v1, 'cifar_resnet110_v1': cifar_resnet110_v1, 'cifar_resnet20_v2': cifar_resnet20_v2, 'cifar_resnet56_v2': cifar_resnet56_v2, 'cifar_resnet110_v2': cifar_resnet110_v2, 'cifar_wideresnet16_10': cifar_wideresnet16_10, 'cifar_wideresnet28_10': cifar_wideresnet28_10, 'cifar_wideresnet40_8': cifar_wideresnet40_8, 'cifar_resnext29_32x4d': cifar_resnext29_32x4d, 'cifar_resnext29_16x64d': cifar_resnext29_16x64d, 'fcn_resnet50_voc': get_fcn_voc_resnet50, 'fcn_resnet101_voc': get_fcn_voc_resnet101, 'fcn_resnet50_ade': get_fcn_ade_resnet50, 'psp_resnet50_ade': get_psp_ade_resnet50, 'resnet18_v1b': resnet18_v1b, 'resnet34_v1b': resnet34_v1b, 'resnet50_v1b': resnet50_v1b, 'resnet101_v1b': resnet101_v1b, 'resnet152_v1b': resnet152_v1b, 'resnet50_v2a': resnet50_v2a, 'resnext50_32x4d': resnext50_32x4d, 'resnext101_32x4d': resnext101_32x4d, 'resnext101_64x4d': resnext101_64x4d, 'se_resnext50_32x4d': se_resnext50_32x4d, 'se_resnext101_32x4d': se_resnext101_32x4d, 'se_resnext101_64x4d': se_resnext101_64x4d, 'senet_52': senet_52, 'senet_103': senet_103, 'senet_154': senet_154, 'se_resnet18_v1': se_resnet18_v1, 'se_resnet34_v1': se_resnet34_v1, 'se_resnet50_v1': se_resnet50_v1, 'se_resnet101_v1': se_resnet101_v1, 'se_resnet152_v1': se_resnet152_v1, 'se_resnet18_v2': se_resnet18_v2, 'se_resnet34_v2': se_resnet34_v2, 'se_resnet50_v2': se_resnet50_v2, 'se_resnet101_v2': se_resnet101_v2, 'se_resnet152_v2': se_resnet152_v2, 'darknet53': darknet53, 'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc, 'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco} try: net = gluon.model_zoo.vision.get_model(name, **kwargs) return net except ValueError as e: upstream_supported = str(e) name = name.lower() if (name not in models): raise ValueError(('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))) net = models[name](**kwargs) return net
-3,677,196,965,192,728,600
Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. Returns ------- HybridBlock The model.
gluoncv/model_zoo/model_zoo.py
get_model
Ellinier/gluon-cv
python
def get_model(name, **kwargs): "Returns a pre-defined model by name\n\n Parameters\n ----------\n name : str\n Name of the model.\n pretrained : bool\n Whether to load the pretrained weights for model.\n classes : int\n Number of classes for the output layer.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n\n Returns\n -------\n HybridBlock\n The model.\n " models = {'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc, 'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco, 'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc, 'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco, 'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc, 'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc, 'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco, 'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc, 'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc, 'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc, 'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco, 'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc, 'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco, 'cifar_resnet20_v1': cifar_resnet20_v1, 'cifar_resnet56_v1': cifar_resnet56_v1, 'cifar_resnet110_v1': cifar_resnet110_v1, 'cifar_resnet20_v2': cifar_resnet20_v2, 'cifar_resnet56_v2': cifar_resnet56_v2, 'cifar_resnet110_v2': cifar_resnet110_v2, 'cifar_wideresnet16_10': cifar_wideresnet16_10, 'cifar_wideresnet28_10': cifar_wideresnet28_10, 'cifar_wideresnet40_8': cifar_wideresnet40_8, 'cifar_resnext29_32x4d': cifar_resnext29_32x4d, 'cifar_resnext29_16x64d': cifar_resnext29_16x64d, 'fcn_resnet50_voc': get_fcn_voc_resnet50, 'fcn_resnet101_voc': get_fcn_voc_resnet101, 'fcn_resnet50_ade': get_fcn_ade_resnet50, 'psp_resnet50_ade': get_psp_ade_resnet50, 'resnet18_v1b': resnet18_v1b, 'resnet34_v1b': resnet34_v1b, 'resnet50_v1b': resnet50_v1b, 'resnet101_v1b': resnet101_v1b, 'resnet152_v1b': resnet152_v1b, 'resnet50_v2a': resnet50_v2a, 'resnext50_32x4d': resnext50_32x4d, 'resnext101_32x4d': resnext101_32x4d, 'resnext101_64x4d': resnext101_64x4d, 'se_resnext50_32x4d': se_resnext50_32x4d, 'se_resnext101_32x4d': se_resnext101_32x4d, 'se_resnext101_64x4d': se_resnext101_64x4d, 'senet_52': senet_52, 'senet_103': senet_103, 'senet_154': senet_154, 'se_resnet18_v1': se_resnet18_v1, 'se_resnet34_v1': se_resnet34_v1, 'se_resnet50_v1': se_resnet50_v1, 'se_resnet101_v1': se_resnet101_v1, 'se_resnet152_v1': se_resnet152_v1, 'se_resnet18_v2': se_resnet18_v2, 'se_resnet34_v2': se_resnet34_v2, 'se_resnet50_v2': se_resnet50_v2, 'se_resnet101_v2': se_resnet101_v2, 'se_resnet152_v2': se_resnet152_v2, 'darknet53': darknet53, 'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc, 'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco} try: net = gluon.model_zoo.vision.get_model(name, **kwargs) return net except ValueError as e: upstream_supported = str(e) name = name.lower() if (name not in models): raise ValueError(('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))) net = models[name](**kwargs) return net
def do_import(self, timestamp): 'Call one key import RPC.' rescan = (self.rescan == Rescan.yes) assert_equal(self.address['solvable'], True) assert_equal(self.address['isscript'], (self.address_type == AddressType.p2sh_segwit)) assert_equal(self.address['iswitness'], (self.address_type == AddressType.bech32)) if self.address['isscript']: assert_equal(self.address['embedded']['isscript'], False) assert_equal(self.address['embedded']['iswitness'], True) if (self.call == Call.single): if (self.data == Data.address): response = self.node.importaddress(address=self.address['address'], label=self.label, rescan=rescan) elif (self.data == Data.pub): response = self.node.importpubkey(pubkey=self.address['pubkey'], label=self.label, rescan=rescan) elif (self.data == Data.priv): response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan) assert_equal(response, None) elif (self.call in (Call.multiaddress, Call.multiscript)): request = {'scriptPubKey': ({'address': self.address['address']} if (self.call == Call.multiaddress) else self.address['scriptPubKey']), 'timestamp': ((timestamp + TIMESTAMP_WINDOW) + (1 if (self.rescan == Rescan.late_timestamp) else 0)), 'pubkeys': ([self.address['pubkey']] if (self.data == Data.pub) else []), 'keys': ([self.key] if (self.data == Data.priv) else []), 'label': self.label, 'watchonly': (self.data != Data.priv)} if ((self.address_type == AddressType.p2sh_segwit) and (self.data != Data.address)): request.update({'redeemscript': self.address['embedded']['scriptPubKey']}) response = self.node.importmulti(requests=[request], options={'rescan': (self.rescan in (Rescan.yes, Rescan.late_timestamp))}) assert_equal(response, [{'success': True}])
6,954,097,585,418,948,000
Call one key import RPC.
test/functional/wallet_import_rescan.py
do_import
124327288/bitcoin
python
def do_import(self, timestamp): rescan = (self.rescan == Rescan.yes) assert_equal(self.address['solvable'], True) assert_equal(self.address['isscript'], (self.address_type == AddressType.p2sh_segwit)) assert_equal(self.address['iswitness'], (self.address_type == AddressType.bech32)) if self.address['isscript']: assert_equal(self.address['embedded']['isscript'], False) assert_equal(self.address['embedded']['iswitness'], True) if (self.call == Call.single): if (self.data == Data.address): response = self.node.importaddress(address=self.address['address'], label=self.label, rescan=rescan) elif (self.data == Data.pub): response = self.node.importpubkey(pubkey=self.address['pubkey'], label=self.label, rescan=rescan) elif (self.data == Data.priv): response = self.node.importprivkey(privkey=self.key, label=self.label, rescan=rescan) assert_equal(response, None) elif (self.call in (Call.multiaddress, Call.multiscript)): request = {'scriptPubKey': ({'address': self.address['address']} if (self.call == Call.multiaddress) else self.address['scriptPubKey']), 'timestamp': ((timestamp + TIMESTAMP_WINDOW) + (1 if (self.rescan == Rescan.late_timestamp) else 0)), 'pubkeys': ([self.address['pubkey']] if (self.data == Data.pub) else []), 'keys': ([self.key] if (self.data == Data.priv) else []), 'label': self.label, 'watchonly': (self.data != Data.priv)} if ((self.address_type == AddressType.p2sh_segwit) and (self.data != Data.address)): request.update({'redeemscript': self.address['embedded']['scriptPubKey']}) response = self.node.importmulti(requests=[request], options={'rescan': (self.rescan in (Rescan.yes, Rescan.late_timestamp))}) assert_equal(response, [{'success': True}])
def check(self, txid=None, amount=None, confirmation_height=None): 'Verify that listtransactions/listreceivedbyaddress return expected values.' txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True) current_height = self.node.getblockcount() assert_equal(len(txs), self.expected_txs) addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address']) if self.expected_txs: assert_equal(len(addresses[0]['txids']), self.expected_txs) if (txid is not None): (tx,) = [tx for tx in txs if (tx['txid'] == txid)] assert_equal(tx['label'], self.label) assert_equal(tx['address'], self.address['address']) assert_equal(tx['amount'], amount) assert_equal(tx['category'], 'receive') assert_equal(tx['label'], self.label) assert_equal(tx['txid'], txid) assert_equal(tx['confirmations'], ((1 + current_height) - confirmation_height)) assert_equal(('trusted' not in tx), True) (address,) = [ad for ad in addresses if (txid in ad['txids'])] assert_equal(address['address'], self.address['address']) assert_equal(address['amount'], self.expected_balance) assert_equal(address['confirmations'], ((1 + current_height) - confirmation_height)) if (self.data != Data.priv): assert_equal(address['involvesWatchonly'], True) else: assert_equal(('involvesWatchonly' not in address), True)
-7,781,182,308,574,544,000
Verify that listtransactions/listreceivedbyaddress return expected values.
test/functional/wallet_import_rescan.py
check
124327288/bitcoin
python
def check(self, txid=None, amount=None, confirmation_height=None): txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True) current_height = self.node.getblockcount() assert_equal(len(txs), self.expected_txs) addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address']) if self.expected_txs: assert_equal(len(addresses[0]['txids']), self.expected_txs) if (txid is not None): (tx,) = [tx for tx in txs if (tx['txid'] == txid)] assert_equal(tx['label'], self.label) assert_equal(tx['address'], self.address['address']) assert_equal(tx['amount'], amount) assert_equal(tx['category'], 'receive') assert_equal(tx['label'], self.label) assert_equal(tx['txid'], txid) assert_equal(tx['confirmations'], ((1 + current_height) - confirmation_height)) assert_equal(('trusted' not in tx), True) (address,) = [ad for ad in addresses if (txid in ad['txids'])] assert_equal(address['address'], self.address['address']) assert_equal(address['amount'], self.expected_balance) assert_equal(address['confirmations'], ((1 + current_height) - confirmation_height)) if (self.data != Data.priv): assert_equal(address['involvesWatchonly'], True) else: assert_equal(('involvesWatchonly' not in address), True)
def _maybe_to_categorical(array): '\n Coerce to a categorical if a series is given.\n\n Internal use ONLY.\n ' if isinstance(array, (ABCSeries, ABCCategoricalIndex)): return array._values elif isinstance(array, np.ndarray): return Categorical(array) return array
2,237,787,623,372,904,200
Coerce to a categorical if a series is given. Internal use ONLY.
pandas/core/arrays/categorical.py
_maybe_to_categorical
Adirio/pandas
python
def _maybe_to_categorical(array): '\n Coerce to a categorical if a series is given.\n\n Internal use ONLY.\n ' if isinstance(array, (ABCSeries, ABCCategoricalIndex)): return array._values elif isinstance(array, np.ndarray): return Categorical(array) return array
def contains(cat, key, container): '\n Helper for membership check for ``key`` in ``cat``.\n\n This is a helper method for :method:`__contains__`\n and :class:`CategoricalIndex.__contains__`.\n\n Returns True if ``key`` is in ``cat.categories`` and the\n location of ``key`` in ``categories`` is in ``container``.\n\n Parameters\n ----------\n cat : :class:`Categorical`or :class:`categoricalIndex`\n key : a hashable object\n The key to check membership for.\n container : Container (e.g. list-like or mapping)\n The container to check for membership in.\n\n Returns\n -------\n is_in : bool\n True if ``key`` is in ``self.categories`` and location of\n ``key`` in ``categories`` is in ``container``, else False.\n\n Notes\n -----\n This method does not check for NaN values. Do that separately\n before calling this method.\n ' hash(key) try: loc = cat.categories.get_loc(key) except KeyError: return False if is_scalar(loc): return (loc in container) else: return any(((loc_ in container) for loc_ in loc))
-3,630,981,666,279,502,300
Helper for membership check for ``key`` in ``cat``. This is a helper method for :method:`__contains__` and :class:`CategoricalIndex.__contains__`. Returns True if ``key`` is in ``cat.categories`` and the location of ``key`` in ``categories`` is in ``container``. Parameters ---------- cat : :class:`Categorical`or :class:`categoricalIndex` key : a hashable object The key to check membership for. container : Container (e.g. list-like or mapping) The container to check for membership in. Returns ------- is_in : bool True if ``key`` is in ``self.categories`` and location of ``key`` in ``categories`` is in ``container``, else False. Notes ----- This method does not check for NaN values. Do that separately before calling this method.
pandas/core/arrays/categorical.py
contains
Adirio/pandas
python
def contains(cat, key, container): '\n Helper for membership check for ``key`` in ``cat``.\n\n This is a helper method for :method:`__contains__`\n and :class:`CategoricalIndex.__contains__`.\n\n Returns True if ``key`` is in ``cat.categories`` and the\n location of ``key`` in ``categories`` is in ``container``.\n\n Parameters\n ----------\n cat : :class:`Categorical`or :class:`categoricalIndex`\n key : a hashable object\n The key to check membership for.\n container : Container (e.g. list-like or mapping)\n The container to check for membership in.\n\n Returns\n -------\n is_in : bool\n True if ``key`` is in ``self.categories`` and location of\n ``key`` in ``categories`` is in ``container``, else False.\n\n Notes\n -----\n This method does not check for NaN values. Do that separately\n before calling this method.\n ' hash(key) try: loc = cat.categories.get_loc(key) except KeyError: return False if is_scalar(loc): return (loc in container) else: return any(((loc_ in container) for loc_ in loc))
def _get_codes_for_values(values, categories): '\n utility routine to turn values into codes given the specified categories\n ' from pandas.core.algorithms import _get_data_algo, _hashtables if is_dtype_equal(values.dtype, categories.dtype): values = getattr(values, 'values', values) categories = getattr(categories, 'values', categories) else: values = ensure_object(values) categories = ensure_object(categories) ((hash_klass, vec_klass), vals) = _get_data_algo(values, _hashtables) ((_, _), cats) = _get_data_algo(categories, _hashtables) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats)
-9,121,178,497,724,465,000
utility routine to turn values into codes given the specified categories
pandas/core/arrays/categorical.py
_get_codes_for_values
Adirio/pandas
python
def _get_codes_for_values(values, categories): '\n \n ' from pandas.core.algorithms import _get_data_algo, _hashtables if is_dtype_equal(values.dtype, categories.dtype): values = getattr(values, 'values', values) categories = getattr(categories, 'values', categories) else: values = ensure_object(values) categories = ensure_object(categories) ((hash_klass, vec_klass), vals) = _get_data_algo(values, _hashtables) ((_, _), cats) = _get_data_algo(categories, _hashtables) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories): "\n Convert a set of codes for to a new set of categories\n\n Parameters\n ----------\n codes : array\n old_categories, new_categories : Index\n\n Returns\n -------\n new_codes : array\n\n Examples\n --------\n >>> old_cat = pd.Index(['b', 'a', 'c'])\n >>> new_cat = pd.Index(['a', 'b'])\n >>> codes = np.array([0, 1, 1, 2])\n >>> _recode_for_categories(codes, old_cat, new_cat)\n array([ 1, 0, 0, -1])\n " from pandas.core.algorithms import take_1d if (len(old_categories) == 0): return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=(- 1)) return new_codes
-1,359,974,398,921,035,300
Convert a set of codes for to a new set of categories Parameters ---------- codes : array old_categories, new_categories : Index Returns ------- new_codes : array Examples -------- >>> old_cat = pd.Index(['b', 'a', 'c']) >>> new_cat = pd.Index(['a', 'b']) >>> codes = np.array([0, 1, 1, 2]) >>> _recode_for_categories(codes, old_cat, new_cat) array([ 1, 0, 0, -1])
pandas/core/arrays/categorical.py
_recode_for_categories
Adirio/pandas
python
def _recode_for_categories(codes, old_categories, new_categories): "\n Convert a set of codes for to a new set of categories\n\n Parameters\n ----------\n codes : array\n old_categories, new_categories : Index\n\n Returns\n -------\n new_codes : array\n\n Examples\n --------\n >>> old_cat = pd.Index(['b', 'a', 'c'])\n >>> new_cat = pd.Index(['a', 'b'])\n >>> codes = np.array([0, 1, 1, 2])\n >>> _recode_for_categories(codes, old_cat, new_cat)\n array([ 1, 0, 0, -1])\n " from pandas.core.algorithms import take_1d if (len(old_categories) == 0): return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=(- 1)) return new_codes
def _factorize_from_iterable(values): '\n Factorize an input `values` into `categories` and `codes`. Preserves\n categorical dtype in `categories`.\n\n *This is an internal function*\n\n Parameters\n ----------\n values : list-like\n\n Returns\n -------\n codes : ndarray\n categories : Index\n If `values` has a categorical dtype, then `categories` is\n a CategoricalIndex keeping the categories and order of `values`.\n ' from pandas.core.indexes.category import CategoricalIndex if (not is_list_like(values)): raise TypeError('Input must be list-like') if is_categorical(values): if isinstance(values, (ABCCategoricalIndex, ABCSeries)): values = values._values categories = CategoricalIndex(values.categories, categories=values.categories, ordered=values.ordered) codes = values.codes else: cat = Categorical(values, ordered=False) categories = cat.categories codes = cat.codes return (codes, categories)
8,827,019,310,461,227,000
Factorize an input `values` into `categories` and `codes`. Preserves categorical dtype in `categories`. *This is an internal function* Parameters ---------- values : list-like Returns ------- codes : ndarray categories : Index If `values` has a categorical dtype, then `categories` is a CategoricalIndex keeping the categories and order of `values`.
pandas/core/arrays/categorical.py
_factorize_from_iterable
Adirio/pandas
python
def _factorize_from_iterable(values): '\n Factorize an input `values` into `categories` and `codes`. Preserves\n categorical dtype in `categories`.\n\n *This is an internal function*\n\n Parameters\n ----------\n values : list-like\n\n Returns\n -------\n codes : ndarray\n categories : Index\n If `values` has a categorical dtype, then `categories` is\n a CategoricalIndex keeping the categories and order of `values`.\n ' from pandas.core.indexes.category import CategoricalIndex if (not is_list_like(values)): raise TypeError('Input must be list-like') if is_categorical(values): if isinstance(values, (ABCCategoricalIndex, ABCSeries)): values = values._values categories = CategoricalIndex(values.categories, categories=values.categories, ordered=values.ordered) codes = values.codes else: cat = Categorical(values, ordered=False) categories = cat.categories codes = cat.codes return (codes, categories)
def _factorize_from_iterables(iterables): '\n A higher-level wrapper over `_factorize_from_iterable`.\n\n *This is an internal function*\n\n Parameters\n ----------\n iterables : list-like of list-likes\n\n Returns\n -------\n codes_list : list of ndarrays\n categories_list : list of Indexes\n\n Notes\n -----\n See `_factorize_from_iterable` for more info.\n ' if (len(iterables) == 0): return [[], []] return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
-2,717,126,921,247,107,600
A higher-level wrapper over `_factorize_from_iterable`. *This is an internal function* Parameters ---------- iterables : list-like of list-likes Returns ------- codes_list : list of ndarrays categories_list : list of Indexes Notes ----- See `_factorize_from_iterable` for more info.
pandas/core/arrays/categorical.py
_factorize_from_iterables
Adirio/pandas
python
def _factorize_from_iterables(iterables): '\n A higher-level wrapper over `_factorize_from_iterable`.\n\n *This is an internal function*\n\n Parameters\n ----------\n iterables : list-like of list-likes\n\n Returns\n -------\n codes_list : list of ndarrays\n categories_list : list of Indexes\n\n Notes\n -----\n See `_factorize_from_iterable` for more info.\n ' if (len(iterables) == 0): return [[], []] return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
@property def categories(self): 'The categories of this categorical.\n\n Setting assigns new values to each category (effectively a rename of\n each individual category).\n\n The assigned value has to be a list-like object. All items must be\n unique and the number of items in the new categories must be the same\n as the number of items in the old categories.\n\n Assigning to `categories` is a inplace operation!\n\n Raises\n ------\n ValueError\n If the new categories do not validate as categories or if the\n number of new categories is unequal the number of old categories\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n ' return self.dtype.categories
3,570,044,184,654,974,500
The categories of this categorical. Setting assigns new values to each category (effectively a rename of each individual category). The assigned value has to be a list-like object. All items must be unique and the number of items in the new categories must be the same as the number of items in the old categories. Assigning to `categories` is a inplace operation! Raises ------ ValueError If the new categories do not validate as categories or if the number of new categories is unequal the number of old categories See also -------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories set_categories
pandas/core/arrays/categorical.py
categories
Adirio/pandas
python
@property def categories(self): 'The categories of this categorical.\n\n Setting assigns new values to each category (effectively a rename of\n each individual category).\n\n The assigned value has to be a list-like object. All items must be\n unique and the number of items in the new categories must be the same\n as the number of items in the old categories.\n\n Assigning to `categories` is a inplace operation!\n\n Raises\n ------\n ValueError\n If the new categories do not validate as categories or if the\n number of new categories is unequal the number of old categories\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n ' return self.dtype.categories
@property def ordered(self): 'Whether the categories have an ordered relationship' return self.dtype.ordered
5,481,432,134,851,482,000
Whether the categories have an ordered relationship
pandas/core/arrays/categorical.py
ordered
Adirio/pandas
python
@property def ordered(self): return self.dtype.ordered
@property def dtype(self): 'The :class:`~pandas.api.types.CategoricalDtype` for this instance' return self._dtype
1,338,917,948,512,637,200
The :class:`~pandas.api.types.CategoricalDtype` for this instance
pandas/core/arrays/categorical.py
dtype
Adirio/pandas
python
@property def dtype(self): return self._dtype
def copy(self): ' Copy constructor. ' return self._constructor(values=self._codes.copy(), dtype=self.dtype, fastpath=True)
-2,809,920,600,592,299,500
Copy constructor.
pandas/core/arrays/categorical.py
copy
Adirio/pandas
python
def copy(self): ' ' return self._constructor(values=self._codes.copy(), dtype=self.dtype, fastpath=True)
def astype(self, dtype, copy=True): '\n Coerce this type to another dtype\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and dtype is categorical, the original\n object is returned.\n\n .. versionadded:: 0.19.0\n\n ' if is_categorical_dtype(dtype): dtype = self.dtype.update_dtype(dtype) self = (self.copy() if copy else self) if (dtype == self.dtype): return self return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy)
1,461,506,608,232,021,000
Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned. .. versionadded:: 0.19.0
pandas/core/arrays/categorical.py
astype
Adirio/pandas
python
def astype(self, dtype, copy=True): '\n Coerce this type to another dtype\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and dtype is categorical, the original\n object is returned.\n\n .. versionadded:: 0.19.0\n\n ' if is_categorical_dtype(dtype): dtype = self.dtype.update_dtype(dtype) self = (self.copy() if copy else self) if (dtype == self.dtype): return self return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy)
@cache_readonly def ndim(self): 'Number of dimensions of the Categorical ' return self._codes.ndim
736,434,692,953,517,700
Number of dimensions of the Categorical
pandas/core/arrays/categorical.py
ndim
Adirio/pandas
python
@cache_readonly def ndim(self): ' ' return self._codes.ndim
@cache_readonly def size(self): ' return the len of myself ' return len(self)
4,024,009,921,628,190,700
return the len of myself
pandas/core/arrays/categorical.py
size
Adirio/pandas
python
@cache_readonly def size(self): ' ' return len(self)
@cache_readonly def itemsize(self): ' return the size of a single category ' return self.categories.itemsize
4,548,285,990,129,604,000
return the size of a single category
pandas/core/arrays/categorical.py
itemsize
Adirio/pandas
python
@cache_readonly def itemsize(self): ' ' return self.categories.itemsize
def tolist(self): '\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n ' return list(self)
7,588,057,621,989,776,000
Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period)
pandas/core/arrays/categorical.py
tolist
Adirio/pandas
python
def tolist(self): '\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n ' return list(self)
@property def base(self): ' compat, we are always our own object ' return None
-4,100,637,562,349,485,600
compat, we are always our own object
pandas/core/arrays/categorical.py
base
Adirio/pandas
python
@property def base(self): ' ' return None
@classmethod def _from_inferred_categories(cls, inferred_categories, inferred_codes, dtype): "Construct a Categorical from inferred values\n\n For inferred categories (`dtype` is None) the categories are sorted.\n For explicit `dtype`, the `inferred_categories` are cast to the\n appropriate type.\n\n Parameters\n ----------\n\n inferred_categories : Index\n inferred_codes : Index\n dtype : CategoricalDtype or 'category'\n\n Returns\n -------\n Categorical\n " from pandas import Index, to_numeric, to_datetime, to_timedelta cats = Index(inferred_categories) known_categories = (isinstance(dtype, CategoricalDtype) and (dtype.categories is not None)) if known_categories: if dtype.categories.is_numeric(): cats = to_numeric(inferred_categories, errors='coerce') elif is_datetime64_dtype(dtype.categories): cats = to_datetime(inferred_categories, errors='coerce') elif is_timedelta64_dtype(dtype.categories): cats = to_timedelta(inferred_categories, errors='coerce') if known_categories: categories = dtype.categories codes = _recode_for_categories(inferred_codes, cats, categories) elif (not cats.is_monotonic_increasing): unsorted = cats.copy() categories = cats.sort_values() codes = _recode_for_categories(inferred_codes, unsorted, categories) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls(codes, dtype=dtype, fastpath=True)
6,075,375,981,098,576,000
Construct a Categorical from inferred values For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' Returns ------- Categorical
pandas/core/arrays/categorical.py
_from_inferred_categories
Adirio/pandas
python
@classmethod def _from_inferred_categories(cls, inferred_categories, inferred_codes, dtype): "Construct a Categorical from inferred values\n\n For inferred categories (`dtype` is None) the categories are sorted.\n For explicit `dtype`, the `inferred_categories` are cast to the\n appropriate type.\n\n Parameters\n ----------\n\n inferred_categories : Index\n inferred_codes : Index\n dtype : CategoricalDtype or 'category'\n\n Returns\n -------\n Categorical\n " from pandas import Index, to_numeric, to_datetime, to_timedelta cats = Index(inferred_categories) known_categories = (isinstance(dtype, CategoricalDtype) and (dtype.categories is not None)) if known_categories: if dtype.categories.is_numeric(): cats = to_numeric(inferred_categories, errors='coerce') elif is_datetime64_dtype(dtype.categories): cats = to_datetime(inferred_categories, errors='coerce') elif is_timedelta64_dtype(dtype.categories): cats = to_timedelta(inferred_categories, errors='coerce') if known_categories: categories = dtype.categories codes = _recode_for_categories(inferred_codes, cats, categories) elif (not cats.is_monotonic_increasing): unsorted = cats.copy() categories = cats.sort_values() codes = _recode_for_categories(inferred_codes, unsorted, categories) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls(codes, dtype=dtype, fastpath=True)
@classmethod def from_codes(cls, codes, categories, ordered=False): '\n Make a Categorical type from codes and categories arrays.\n\n This constructor is useful if you already have codes and categories and\n so do not need the (computation intensive) factorization step, which is\n usually done on the constructor.\n\n If your data does not follow this convention, please use the normal\n constructor.\n\n Parameters\n ----------\n codes : array-like, integers\n An integer array, where each integer points to a category in\n categories or -1 for NaN\n categories : index-like\n The categories for the categorical. Items need to be unique.\n ordered : boolean, (default False)\n Whether or not this categorical is treated as a ordered\n categorical. If not given, the resulting categorical will be\n unordered.\n ' codes = np.asarray(codes) if (not is_integer_dtype(codes)): msg = 'codes need to be array-like integers' if is_float_dtype(codes): icodes = codes.astype('i8') if (icodes == codes).all(): msg = None codes = icodes warn('float codes will be disallowed in the future and raise a ValueError', FutureWarning, stacklevel=2) if msg: raise ValueError(msg) try: codes = coerce_indexer_dtype(codes, categories) except (ValueError, TypeError): raise ValueError('codes need to be convertible to an arrays of integers') categories = CategoricalDtype.validate_categories(categories) if (len(codes) and ((codes.max() >= len(categories)) or (codes.min() < (- 1)))): raise ValueError('codes need to be between -1 and len(categories)-1') return cls(codes, categories=categories, ordered=ordered, fastpath=True)
-2,980,025,623,766,717,000
Make a Categorical type from codes and categories arrays. This constructor is useful if you already have codes and categories and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like, integers An integer array, where each integer points to a category in categories or -1 for NaN categories : index-like The categories for the categorical. Items need to be unique. ordered : boolean, (default False) Whether or not this categorical is treated as a ordered categorical. If not given, the resulting categorical will be unordered.
pandas/core/arrays/categorical.py
from_codes
Adirio/pandas
python
@classmethod def from_codes(cls, codes, categories, ordered=False): '\n Make a Categorical type from codes and categories arrays.\n\n This constructor is useful if you already have codes and categories and\n so do not need the (computation intensive) factorization step, which is\n usually done on the constructor.\n\n If your data does not follow this convention, please use the normal\n constructor.\n\n Parameters\n ----------\n codes : array-like, integers\n An integer array, where each integer points to a category in\n categories or -1 for NaN\n categories : index-like\n The categories for the categorical. Items need to be unique.\n ordered : boolean, (default False)\n Whether or not this categorical is treated as a ordered\n categorical. If not given, the resulting categorical will be\n unordered.\n ' codes = np.asarray(codes) if (not is_integer_dtype(codes)): msg = 'codes need to be array-like integers' if is_float_dtype(codes): icodes = codes.astype('i8') if (icodes == codes).all(): msg = None codes = icodes warn('float codes will be disallowed in the future and raise a ValueError', FutureWarning, stacklevel=2) if msg: raise ValueError(msg) try: codes = coerce_indexer_dtype(codes, categories) except (ValueError, TypeError): raise ValueError('codes need to be convertible to an arrays of integers') categories = CategoricalDtype.validate_categories(categories) if (len(codes) and ((codes.max() >= len(categories)) or (codes.min() < (- 1)))): raise ValueError('codes need to be between -1 and len(categories)-1') return cls(codes, categories=categories, ordered=ordered, fastpath=True)
def _get_codes(self): ' Get the codes.\n\n Returns\n -------\n codes : integer array view\n A non writable view of the `codes` array.\n ' v = self._codes.view() v.flags.writeable = False return v
-7,600,924,652,233,758,000
Get the codes. Returns ------- codes : integer array view A non writable view of the `codes` array.
pandas/core/arrays/categorical.py
_get_codes
Adirio/pandas
python
def _get_codes(self): ' Get the codes.\n\n Returns\n -------\n codes : integer array view\n A non writable view of the `codes` array.\n ' v = self._codes.view() v.flags.writeable = False return v
def _set_codes(self, codes): '\n Not settable by the user directly\n ' raise ValueError('cannot set Categorical codes directly')
5,406,395,936,789,598,000
Not settable by the user directly
pandas/core/arrays/categorical.py
_set_codes
Adirio/pandas
python
def _set_codes(self, codes): '\n \n ' raise ValueError('cannot set Categorical codes directly')
def _set_categories(self, categories, fastpath=False): " Sets new categories inplace\n\n Parameters\n ----------\n fastpath : boolean (default: False)\n Don't perform validation of the categories for uniqueness or nulls\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'b'])\n >>> c\n [a, b]\n Categories (2, object): [a, b]\n\n >>> c._set_categories(pd.Index(['a', 'c']))\n >>> c\n [a, c]\n Categories (2, object): [a, c]\n " if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if ((not fastpath) and (self.dtype.categories is not None) and (len(new_dtype.categories) != len(self.dtype.categories))): raise ValueError('new categories need to have the same number of items than the old categories!') self._dtype = new_dtype
5,246,180,266,847,194,000
Sets new categories inplace Parameters ---------- fastpath : boolean (default: False) Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical(['a', 'b']) >>> c [a, b] Categories (2, object): [a, b] >>> c._set_categories(pd.Index(['a', 'c'])) >>> c [a, c] Categories (2, object): [a, c]
pandas/core/arrays/categorical.py
_set_categories
Adirio/pandas
python
def _set_categories(self, categories, fastpath=False): " Sets new categories inplace\n\n Parameters\n ----------\n fastpath : boolean (default: False)\n Don't perform validation of the categories for uniqueness or nulls\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'b'])\n >>> c\n [a, b]\n Categories (2, object): [a, b]\n\n >>> c._set_categories(pd.Index(['a', 'c']))\n >>> c\n [a, c]\n Categories (2, object): [a, c]\n " if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if ((not fastpath) and (self.dtype.categories is not None) and (len(new_dtype.categories) != len(self.dtype.categories))): raise ValueError('new categories need to have the same number of items than the old categories!') self._dtype = new_dtype
def _set_dtype(self, dtype): "Internal method for directly updating the CategoricalDtype\n\n Parameters\n ----------\n dtype : CategoricalDtype\n\n Notes\n -----\n We don't do any validation here. It's assumed that the dtype is\n a (valid) instance of `CategoricalDtype`.\n " codes = _recode_for_categories(self.codes, self.categories, dtype.categories) return type(self)(codes, dtype=dtype, fastpath=True)
-6,067,429,269,616,330,000
Internal method for directly updating the CategoricalDtype Parameters ---------- dtype : CategoricalDtype Notes ----- We don't do any validation here. It's assumed that the dtype is a (valid) instance of `CategoricalDtype`.
pandas/core/arrays/categorical.py
_set_dtype
Adirio/pandas
python
def _set_dtype(self, dtype): "Internal method for directly updating the CategoricalDtype\n\n Parameters\n ----------\n dtype : CategoricalDtype\n\n Notes\n -----\n We don't do any validation here. It's assumed that the dtype is\n a (valid) instance of `CategoricalDtype`.\n " codes = _recode_for_categories(self.codes, self.categories, dtype.categories) return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False): '\n Sets the ordered attribute to the boolean value\n\n Parameters\n ----------\n value : boolean to set whether this categorical is ordered (True) or\n not (False)\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to the value\n ' inplace = validate_bool_kwarg(inplace, 'inplace') new_dtype = CategoricalDtype(self.categories, ordered=value) cat = (self if inplace else self.copy()) cat._dtype = new_dtype if (not inplace): return cat
-5,951,419,796,902,331,000
Sets the ordered attribute to the boolean value Parameters ---------- value : boolean to set whether this categorical is ordered (True) or not (False) inplace : boolean (default: False) Whether or not to set the ordered attribute inplace or return a copy of this categorical with ordered set to the value
pandas/core/arrays/categorical.py
set_ordered
Adirio/pandas
python
def set_ordered(self, value, inplace=False): '\n Sets the ordered attribute to the boolean value\n\n Parameters\n ----------\n value : boolean to set whether this categorical is ordered (True) or\n not (False)\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to the value\n ' inplace = validate_bool_kwarg(inplace, 'inplace') new_dtype = CategoricalDtype(self.categories, ordered=value) cat = (self if inplace else self.copy()) cat._dtype = new_dtype if (not inplace): return cat
def as_ordered(self, inplace=False): '\n Sets the Categorical to be ordered\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to True\n ' inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace)
578,445,005,402,782
Sets the Categorical to be ordered Parameters ---------- inplace : boolean (default: False) Whether or not to set the ordered attribute inplace or return a copy of this categorical with ordered set to True
pandas/core/arrays/categorical.py
as_ordered
Adirio/pandas
python
def as_ordered(self, inplace=False): '\n Sets the Categorical to be ordered\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to True\n ' inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False): '\n Sets the Categorical to be unordered\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to False\n ' inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace)
-5,309,315,199,635,879,000
Sets the Categorical to be unordered Parameters ---------- inplace : boolean (default: False) Whether or not to set the ordered attribute inplace or return a copy of this categorical with ordered set to False
pandas/core/arrays/categorical.py
as_unordered
Adirio/pandas
python
def as_unordered(self, inplace=False): '\n Sets the Categorical to be unordered\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to False\n ' inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False): ' Sets the categories to the specified new_categories.\n\n `new_categories` can include new categories (which will result in\n unused categories) or remove old categories (which results in values\n set to NaN). If `rename==True`, the categories will simple be renamed\n (less or more items than in old categories will result in values set to\n NaN or in unused categories respectively).\n\n This method can be used to perform more than one action of adding,\n removing, and reordering simultaneously and is therefore faster than\n performing the individual steps via the more specialised methods.\n\n On the other hand this methods does not do checks (e.g., whether the\n old categories are included in the new categories on a reorder), which\n can result in surprising changes, for example when using special string\n dtypes on python3, which does not considers a S1 string equal to a\n single char python string.\n\n Raises\n ------\n ValueError\n If new_categories does not validate as categories\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : boolean, (default: False)\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n rename : boolean (default: False)\n Whether or not the new_categories should be considered as a rename\n of the old categories or as reordered categories.\n inplace : boolean (default: False)\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (ordered is None): ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = (self if inplace else self.copy()) if rename: if ((cat.dtype.categories is not None) and (len(new_dtype.categories) < len(cat.dtype.categories))): self._codes[(self._codes >= len(new_dtype.categories))] = (- 1) else: codes = _recode_for_categories(self.codes, self.categories, new_dtype.categories) cat._codes = codes cat._dtype = new_dtype if (not inplace): return cat
-3,923,649,045,691,250,000
Sets the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values set to NaN). If `rename==True`, the categories will simple be renamed (less or more items than in old categories will result in values set to NaN or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes on python3, which does not considers a S1 string equal to a single char python string. Raises ------ ValueError If new_categories does not validate as categories Parameters ---------- new_categories : Index-like The categories in new order. ordered : boolean, (default: False) Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : boolean (default: False) Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. inplace : boolean (default: False) Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. See also -------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories
pandas/core/arrays/categorical.py
set_categories
Adirio/pandas
python
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False): ' Sets the categories to the specified new_categories.\n\n `new_categories` can include new categories (which will result in\n unused categories) or remove old categories (which results in values\n set to NaN). If `rename==True`, the categories will simple be renamed\n (less or more items than in old categories will result in values set to\n NaN or in unused categories respectively).\n\n This method can be used to perform more than one action of adding,\n removing, and reordering simultaneously and is therefore faster than\n performing the individual steps via the more specialised methods.\n\n On the other hand this methods does not do checks (e.g., whether the\n old categories are included in the new categories on a reorder), which\n can result in surprising changes, for example when using special string\n dtypes on python3, which does not considers a S1 string equal to a\n single char python string.\n\n Raises\n ------\n ValueError\n If new_categories does not validate as categories\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : boolean, (default: False)\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n rename : boolean (default: False)\n Whether or not the new_categories should be considered as a rename\n of the old categories or as reordered categories.\n inplace : boolean (default: False)\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (ordered is None): ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = (self if inplace else self.copy()) if rename: if ((cat.dtype.categories is not None) and (len(new_dtype.categories) < len(cat.dtype.categories))): self._codes[(self._codes >= len(new_dtype.categories))] = (- 1) else: codes = _recode_for_categories(self.codes, self.categories, new_dtype.categories) cat._codes = codes cat._dtype = new_dtype if (not inplace): return cat
def rename_categories(self, new_categories, inplace=False): " Renames categories.\n\n Raises\n ------\n ValueError\n If new categories are list-like and do not have the same number of\n items than the current categories or do not validate as categories\n\n Parameters\n ----------\n new_categories : list-like, dict-like or callable\n\n * list-like: all items must be unique and the number of items in\n the new categories must match the existing number of categories.\n\n * dict-like: specifies a mapping from\n old categories to new. Categories not contained in the mapping\n are passed through and extra categories in the mapping are\n ignored.\n\n .. versionadded:: 0.21.0\n\n * callable : a callable that is called on all items in the old\n categories and whose return values comprise the new categories.\n\n .. versionadded:: 0.23.0\n\n .. warning::\n\n Currently, Series are considered list like. In a future version\n of pandas they'll be considered dict-like.\n\n inplace : boolean (default: False)\n Whether or not to rename the categories inplace or return a copy of\n this categorical with renamed categories.\n\n Returns\n -------\n cat : Categorical or None\n With ``inplace=False``, the new categorical is returned.\n With ``inplace=True``, there is no return value.\n\n See also\n --------\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'a', 'b'])\n >>> c.rename_categories([0, 1])\n [0, 0, 1]\n Categories (2, int64): [0, 1]\n\n For dict-like ``new_categories``, extra keys are ignored and\n categories not in the dictionary are passed through\n\n >>> c.rename_categories({'a': 'A', 'c': 'C'})\n [A, A, b]\n Categories (2, object): [A, b]\n\n You may also provide a callable to create the new categories\n\n >>> c.rename_categories(lambda x: x.upper())\n [A, A, B]\n Categories (2, object): [A, B]\n " inplace = validate_bool_kwarg(inplace, 'inplace') cat = (self if inplace else self.copy()) if isinstance(new_categories, ABCSeries): msg = "Treating Series 'new_categories' as a list-like and using the values. In a future version, 'rename_categories' will treat Series like a dictionary.\nFor dict-like, use 'new_categories.to_dict()'\nFor list-like, use 'new_categories.values'." warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if (not inplace): return cat
4,900,925,190,630,737,000
Renames categories. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : boolean (default: False) Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. See also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B]
pandas/core/arrays/categorical.py
rename_categories
Adirio/pandas
python
def rename_categories(self, new_categories, inplace=False): " Renames categories.\n\n Raises\n ------\n ValueError\n If new categories are list-like and do not have the same number of\n items than the current categories or do not validate as categories\n\n Parameters\n ----------\n new_categories : list-like, dict-like or callable\n\n * list-like: all items must be unique and the number of items in\n the new categories must match the existing number of categories.\n\n * dict-like: specifies a mapping from\n old categories to new. Categories not contained in the mapping\n are passed through and extra categories in the mapping are\n ignored.\n\n .. versionadded:: 0.21.0\n\n * callable : a callable that is called on all items in the old\n categories and whose return values comprise the new categories.\n\n .. versionadded:: 0.23.0\n\n .. warning::\n\n Currently, Series are considered list like. In a future version\n of pandas they'll be considered dict-like.\n\n inplace : boolean (default: False)\n Whether or not to rename the categories inplace or return a copy of\n this categorical with renamed categories.\n\n Returns\n -------\n cat : Categorical or None\n With ``inplace=False``, the new categorical is returned.\n With ``inplace=True``, there is no return value.\n\n See also\n --------\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'a', 'b'])\n >>> c.rename_categories([0, 1])\n [0, 0, 1]\n Categories (2, int64): [0, 1]\n\n For dict-like ``new_categories``, extra keys are ignored and\n categories not in the dictionary are passed through\n\n >>> c.rename_categories({'a': 'A', 'c': 'C'})\n [A, A, b]\n Categories (2, object): [A, b]\n\n You may also provide a callable to create the new categories\n\n >>> c.rename_categories(lambda x: x.upper())\n [A, A, B]\n Categories (2, object): [A, B]\n " inplace = validate_bool_kwarg(inplace, 'inplace') cat = (self if inplace else self.copy()) if isinstance(new_categories, ABCSeries): msg = "Treating Series 'new_categories' as a list-like and using the values. In a future version, 'rename_categories' will treat Series like a dictionary.\nFor dict-like, use 'new_categories.to_dict()'\nFor list-like, use 'new_categories.values'." warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if (not inplace): return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False): ' Reorders categories as specified in new_categories.\n\n `new_categories` need to include all old categories and no new category\n items.\n\n Raises\n ------\n ValueError\n If the new categories do not contain all old category items or any\n new ones\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : boolean, optional\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n inplace : boolean (default: False)\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n See also\n --------\n rename_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (set(self.dtype.categories) != set(new_categories)): raise ValueError('items in new_categories are not the same as in old categories') return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
-1,027,613,687,673,260,500
Reorders categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Raises ------ ValueError If the new categories do not contain all old category items or any new ones Parameters ---------- new_categories : Index-like The categories in new order. ordered : boolean, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. inplace : boolean (default: False) Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. See also -------- rename_categories add_categories remove_categories remove_unused_categories set_categories
pandas/core/arrays/categorical.py
reorder_categories
Adirio/pandas
python
def reorder_categories(self, new_categories, ordered=None, inplace=False): ' Reorders categories as specified in new_categories.\n\n `new_categories` need to include all old categories and no new category\n items.\n\n Raises\n ------\n ValueError\n If the new categories do not contain all old category items or any\n new ones\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : boolean, optional\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n inplace : boolean (default: False)\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n See also\n --------\n rename_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (set(self.dtype.categories) != set(new_categories)): raise ValueError('items in new_categories are not the same as in old categories') return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False): ' Add new categories.\n\n `new_categories` will be included at the last/highest place in the\n categories and will be unused directly after this call.\n\n Raises\n ------\n ValueError\n If the new categories include old categories or do not validate as\n categories\n\n Parameters\n ----------\n new_categories : category or list-like of category\n The new categories to be included.\n inplace : boolean (default: False)\n Whether or not to add the categories inplace or return a copy of\n this categorical with added categories.\n\n Returns\n -------\n cat : Categorical with new categories added or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n remove_categories\n remove_unused_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (not is_list_like(new_categories)): new_categories = [new_categories] already_included = (set(new_categories) & set(self.dtype.categories)) if (len(already_included) != 0): msg = 'new categories must not include old categories: {already_included!s}' raise ValueError(msg.format(already_included=already_included)) new_categories = (list(self.dtype.categories) + list(new_categories)) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = (self if inplace else self.copy()) cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) if (not inplace): return cat
-7,761,468,792,164,887,000
Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Raises ------ ValueError If the new categories include old categories or do not validate as categories Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : boolean (default: False) Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. See also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories
pandas/core/arrays/categorical.py
add_categories
Adirio/pandas
python
def add_categories(self, new_categories, inplace=False): ' Add new categories.\n\n `new_categories` will be included at the last/highest place in the\n categories and will be unused directly after this call.\n\n Raises\n ------\n ValueError\n If the new categories include old categories or do not validate as\n categories\n\n Parameters\n ----------\n new_categories : category or list-like of category\n The new categories to be included.\n inplace : boolean (default: False)\n Whether or not to add the categories inplace or return a copy of\n this categorical with added categories.\n\n Returns\n -------\n cat : Categorical with new categories added or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n remove_categories\n remove_unused_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (not is_list_like(new_categories)): new_categories = [new_categories] already_included = (set(new_categories) & set(self.dtype.categories)) if (len(already_included) != 0): msg = 'new categories must not include old categories: {already_included!s}' raise ValueError(msg.format(already_included=already_included)) new_categories = (list(self.dtype.categories) + list(new_categories)) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = (self if inplace else self.copy()) cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) if (not inplace): return cat
def remove_categories(self, removals, inplace=False): ' Removes the specified categories.\n\n `removals` must be included in the old categories. Values which were in\n the removed categories will be set to NaN\n\n Raises\n ------\n ValueError\n If the removals are not contained in the categories\n\n Parameters\n ----------\n removals : category or list of categories\n The categories which should be removed.\n inplace : boolean (default: False)\n Whether or not to remove the categories inplace or return a copy of\n this categorical with removed categories.\n\n Returns\n -------\n cat : Categorical with removed categories or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_unused_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (not is_list_like(removals)): removals = [removals] removal_set = set(list(removals)) not_included = (removal_set - set(self.dtype.categories)) new_categories = [c for c in self.dtype.categories if (c not in removal_set)] if any(isna(removals)): not_included = [x for x in not_included if notna(x)] new_categories = [x for x in new_categories if notna(x)] if (len(not_included) != 0): msg = 'removals must all be in old categories: {not_included!s}' raise ValueError(msg.format(not_included=not_included)) return self.set_categories(new_categories, ordered=self.ordered, rename=False, inplace=inplace)
613,014,990,297,959,800
Removes the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Raises ------ ValueError If the removals are not contained in the categories Parameters ---------- removals : category or list of categories The categories which should be removed. inplace : boolean (default: False) Whether or not to remove the categories inplace or return a copy of this categorical with removed categories. Returns ------- cat : Categorical with removed categories or None if inplace. See also -------- rename_categories reorder_categories add_categories remove_unused_categories set_categories
pandas/core/arrays/categorical.py
remove_categories
Adirio/pandas
python
def remove_categories(self, removals, inplace=False): ' Removes the specified categories.\n\n `removals` must be included in the old categories. Values which were in\n the removed categories will be set to NaN\n\n Raises\n ------\n ValueError\n If the removals are not contained in the categories\n\n Parameters\n ----------\n removals : category or list of categories\n The categories which should be removed.\n inplace : boolean (default: False)\n Whether or not to remove the categories inplace or return a copy of\n this categorical with removed categories.\n\n Returns\n -------\n cat : Categorical with removed categories or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_unused_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (not is_list_like(removals)): removals = [removals] removal_set = set(list(removals)) not_included = (removal_set - set(self.dtype.categories)) new_categories = [c for c in self.dtype.categories if (c not in removal_set)] if any(isna(removals)): not_included = [x for x in not_included if notna(x)] new_categories = [x for x in new_categories if notna(x)] if (len(not_included) != 0): msg = 'removals must all be in old categories: {not_included!s}' raise ValueError(msg.format(not_included=not_included)) return self.set_categories(new_categories, ordered=self.ordered, rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False): ' Removes categories which are not used.\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to drop unused categories inplace or return a copy of\n this categorical with unused categories dropped.\n\n Returns\n -------\n cat : Categorical with unused categories dropped or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') cat = (self if inplace else self.copy()) (idx, inv) = np.unique(cat._codes, return_inverse=True) if ((idx.size != 0) and (idx[0] == (- 1))): (idx, inv) = (idx[1:], (inv - 1)) new_categories = cat.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath(new_categories, ordered=self.ordered) cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(inv, new_dtype.categories) if (not inplace): return cat
-2,912,470,624,796,751,000
Removes categories which are not used. Parameters ---------- inplace : boolean (default: False) Whether or not to drop unused categories inplace or return a copy of this categorical with unused categories dropped. Returns ------- cat : Categorical with unused categories dropped or None if inplace. See also -------- rename_categories reorder_categories add_categories remove_categories set_categories
pandas/core/arrays/categorical.py
remove_unused_categories
Adirio/pandas
python
def remove_unused_categories(self, inplace=False): ' Removes categories which are not used.\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to drop unused categories inplace or return a copy of\n this categorical with unused categories dropped.\n\n Returns\n -------\n cat : Categorical with unused categories dropped or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n set_categories\n ' inplace = validate_bool_kwarg(inplace, 'inplace') cat = (self if inplace else self.copy()) (idx, inv) = np.unique(cat._codes, return_inverse=True) if ((idx.size != 0) and (idx[0] == (- 1))): (idx, inv) = (idx[1:], (inv - 1)) new_categories = cat.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath(new_categories, ordered=self.ordered) cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(inv, new_dtype.categories) if (not inplace): return cat
def map(self, mapper): "\n Map categories using input correspondence (dict, Series, or function).\n\n Maps the categories to new categories. If the mapping correspondence is\n one-to-one the result is a :class:`~pandas.Categorical` which has the\n same order property as the original, otherwise a :class:`~pandas.Index`\n is returned.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.Categorical or pandas.Index\n Mapped categorical.\n\n See Also\n --------\n CategoricalIndex.map : Apply a mapping correspondence on a\n :class:`~pandas.CategoricalIndex`.\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n [a, b, c]\n Categories (3, object): [a, b, c]\n >>> cat.map(lambda x: x.upper())\n [A, B, C]\n Categories (3, object): [A, B, C]\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})\n [first, second, third]\n Categories (3, object): [first, second, third]\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)\n >>> cat\n [a, b, c]\n Categories (3, object): [a < b < c]\n >>> cat.map({'a': 3, 'b': 2, 'c': 1})\n [3, 2, 1]\n Categories (3, int64): [3 < 2 < 1]\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> cat.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n " new_categories = self.categories.map(mapper) try: return self.from_codes(self._codes.copy(), categories=new_categories, ordered=self.ordered) except ValueError: return np.take(new_categories, self._codes)
3,000,125,632,197,192,700
Map categories using input correspondence (dict, Series, or function). Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat [a, b, c] Categories (3, object): [a, b, c] >>> cat.map(lambda x: x.upper()) [A, B, C] Categories (3, object): [A, B, C] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}) [first, second, third] Categories (3, object): [first, second, third] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat [a, b, c] Categories (3, object): [a < b < c] >>> cat.map({'a': 3, 'b': 2, 'c': 1}) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object')
pandas/core/arrays/categorical.py
map
Adirio/pandas
python
def map(self, mapper): "\n Map categories using input correspondence (dict, Series, or function).\n\n Maps the categories to new categories. If the mapping correspondence is\n one-to-one the result is a :class:`~pandas.Categorical` which has the\n same order property as the original, otherwise a :class:`~pandas.Index`\n is returned.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.Categorical or pandas.Index\n Mapped categorical.\n\n See Also\n --------\n CategoricalIndex.map : Apply a mapping correspondence on a\n :class:`~pandas.CategoricalIndex`.\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n [a, b, c]\n Categories (3, object): [a, b, c]\n >>> cat.map(lambda x: x.upper())\n [A, B, C]\n Categories (3, object): [A, B, C]\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})\n [first, second, third]\n Categories (3, object): [first, second, third]\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)\n >>> cat\n [a, b, c]\n Categories (3, object): [a < b < c]\n >>> cat.map({'a': 3, 'b': 2, 'c': 1})\n [3, 2, 1]\n Categories (3, int64): [3 < 2 < 1]\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> cat.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n " new_categories = self.categories.map(mapper) try: return self.from_codes(self._codes.copy(), categories=new_categories, ordered=self.ordered) except ValueError: return np.take(new_categories, self._codes)
@property def shape(self): ' Shape of the Categorical.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n shape : tuple\n ' return tuple([len(self._codes)])
5,707,964,752,293,149,000
Shape of the Categorical. For internal compatibility with numpy arrays. Returns ------- shape : tuple
pandas/core/arrays/categorical.py
shape
Adirio/pandas
python
@property def shape(self): ' Shape of the Categorical.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n shape : tuple\n ' return tuple([len(self._codes)])
def shift(self, periods): '\n Shift Categorical by desired number of periods.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : Categorical\n ' codes = self.codes if (codes.ndim > 1): raise NotImplementedError('Categorical with ndim > 1.') if (np.prod(codes.shape) and (periods != 0)): codes = np.roll(codes, ensure_platform_int(periods), axis=0) if (periods > 0): codes[:periods] = (- 1) else: codes[periods:] = (- 1) return self.from_codes(codes, categories=self.categories, ordered=self.ordered)
-8,439,460,178,993,714,000
Shift Categorical by desired number of periods. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : Categorical
pandas/core/arrays/categorical.py
shift
Adirio/pandas
python
def shift(self, periods): '\n Shift Categorical by desired number of periods.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : Categorical\n ' codes = self.codes if (codes.ndim > 1): raise NotImplementedError('Categorical with ndim > 1.') if (np.prod(codes.shape) and (periods != 0)): codes = np.roll(codes, ensure_platform_int(periods), axis=0) if (periods > 0): codes[:periods] = (- 1) else: codes[periods:] = (- 1) return self.from_codes(codes, categories=self.categories, ordered=self.ordered)
def __array__(self, dtype=None): '\n The numpy array interface.\n\n Returns\n -------\n values : numpy array\n A numpy array of either the specified dtype or,\n if dtype==None (default), the same dtype as\n categorical.categories.dtype\n ' ret = take_1d(self.categories.values, self._codes) if (dtype and (not is_dtype_equal(dtype, self.categories.dtype))): return np.asarray(ret, dtype) if is_extension_array_dtype(ret): ret = np.asarray(ret) return ret
2,061,315,902,725,703,000
The numpy array interface. Returns ------- values : numpy array A numpy array of either the specified dtype or, if dtype==None (default), the same dtype as categorical.categories.dtype
pandas/core/arrays/categorical.py
__array__
Adirio/pandas
python
def __array__(self, dtype=None): '\n The numpy array interface.\n\n Returns\n -------\n values : numpy array\n A numpy array of either the specified dtype or,\n if dtype==None (default), the same dtype as\n categorical.categories.dtype\n ' ret = take_1d(self.categories.values, self._codes) if (dtype and (not is_dtype_equal(dtype, self.categories.dtype))): return np.asarray(ret, dtype) if is_extension_array_dtype(ret): ret = np.asarray(ret) return ret