body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def draw_map():
'\n 绘制世界地图\n 遇到一个很神奇的问题:\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\n :return:\n '
(countrys_names, confirmed_count) = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
c = Map().add('确诊人数', [list(z) for z in zip(countrys_names, confirmed_count_list)], is_map_symbol_show=False, maptype='world', label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.ItemStyleOpts(color='rgb(49,60,72)')).set_series_opts(label_opts=opts.LabelOpts(is_show=False)).set_global_opts(title_opts=opts.TitleOpts(title='全球 2019-nCoV 地图'), visualmap_opts=opts.VisualMapOpts(max_=1700000)).render('map_world.html') | -3,497,472,359,941,637,600 | 绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return: | python-data-analysis/2019-nCoV-global/global_map.py | draw_map | DearCasper/python-learning | python | def draw_map():
'\n 绘制世界地图\n 遇到一个很神奇的问题:\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\n :return:\n '
(countrys_names, confirmed_count) = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
c = Map().add('确诊人数', [list(z) for z in zip(countrys_names, confirmed_count_list)], is_map_symbol_show=False, maptype='world', label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.ItemStyleOpts(color='rgb(49,60,72)')).set_series_opts(label_opts=opts.LabelOpts(is_show=False)).set_global_opts(title_opts=opts.TitleOpts(title='全球 2019-nCoV 地图'), visualmap_opts=opts.VisualMapOpts(max_=1700000)).render('map_world.html') |
def size_num_grads(link):
'Count total size of all gradient arrays of a given link\n\n Args:\n link (chainer.link.Link): Target link object.\n '
size = 0
num = 0
for param in link.params():
if (param.size == 0):
continue
size += param.size
num += 1
return (size, num) | -9,161,413,702,474,069,000 | Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object. | chainer/training/updaters/multiprocess_parallel_updater.py | size_num_grads | Lynkzhang/Chainer-UM | python | def size_num_grads(link):
'Count total size of all gradient arrays of a given link\n\n Args:\n link (chainer.link.Link): Target link object.\n '
size = 0
num = 0
for param in link.params():
if (param.size == 0):
continue
size += param.size
num += 1
return (size, num) |
def gather_grads(link):
'Put together all gradient arrays and make a single array\n\n Args:\n link (chainer.link.Link): Target link object.\n Return:\n cupy.ndarray\n '
if (link.xp is numpy):
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad') | -4,323,672,895,691,865,600 | Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray | chainer/training/updaters/multiprocess_parallel_updater.py | gather_grads | Lynkzhang/Chainer-UM | python | def gather_grads(link):
'Put together all gradient arrays and make a single array\n\n Args:\n link (chainer.link.Link): Target link object.\n Return:\n cupy.ndarray\n '
if (link.xp is numpy):
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad') |
def gather_params(link):
'Put together all gradient arrays and make a single array\n\n Args:\n link (chainer.link.Link): Target link object.\n Return:\n cupy.ndarray\n '
if (link.xp is numpy):
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data') | -5,200,660,742,070,774,000 | Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray | chainer/training/updaters/multiprocess_parallel_updater.py | gather_params | Lynkzhang/Chainer-UM | python | def gather_params(link):
'Put together all gradient arrays and make a single array\n\n Args:\n link (chainer.link.Link): Target link object.\n Return:\n cupy.ndarray\n '
if (link.xp is numpy):
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data') |
def scatter_grads(link, array):
'Put back contents of the specified array to the related gradient arrays\n\n Args:\n link (chainer.link.Link): Target link object.\n array (cupy.ndarray): gathered array created by gather_grads()\n '
return _scatter(link, array, 'grad') | 7,692,519,946,315,319,000 | Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads() | chainer/training/updaters/multiprocess_parallel_updater.py | scatter_grads | Lynkzhang/Chainer-UM | python | def scatter_grads(link, array):
'Put back contents of the specified array to the related gradient arrays\n\n Args:\n link (chainer.link.Link): Target link object.\n array (cupy.ndarray): gathered array created by gather_grads()\n '
return _scatter(link, array, 'grad') |
def scatter_params(link, array):
'Put back contents of the specified array to the related gradient arrays\n\n Args:\n link (chainer.link.Link): Target link object.\n array (cupy.ndarray): gathered array created by gather_params()\n '
return _scatter(link, array, 'data') | -5,699,551,748,872,963,000 | Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params() | chainer/training/updaters/multiprocess_parallel_updater.py | scatter_params | Lynkzhang/Chainer-UM | python | def scatter_params(link, array):
'Put back contents of the specified array to the related gradient arrays\n\n Args:\n link (chainer.link.Link): Target link object.\n array (cupy.ndarray): gathered array created by gather_params()\n '
return _scatter(link, array, 'data') |
def _get_nccl_data_type(dtype):
'Get data type for NCCL'
if (dtype == numpy.float32):
nccl_data_type = nccl.NCCL_FLOAT
elif (dtype == numpy.float16):
nccl_data_type = nccl.NCCL_HALF
elif (dtype == numpy.float64):
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type | -2,394,153,031,925,755,400 | Get data type for NCCL | chainer/training/updaters/multiprocess_parallel_updater.py | _get_nccl_data_type | Lynkzhang/Chainer-UM | python | def _get_nccl_data_type(dtype):
if (dtype == numpy.float32):
nccl_data_type = nccl.NCCL_FLOAT
elif (dtype == numpy.float16):
nccl_data_type = nccl.NCCL_HALF
elif (dtype == numpy.float64):
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type |
def ensure_server() -> libtmux.Server:
'\n Either create new or return existing server\n '
return libtmux.Server() | 7,596,910,660,909,000,000 | Either create new or return existing server | kmux/tmux.py | ensure_server | kiemlicz/kmux | python | def ensure_server() -> libtmux.Server:
'\n \n '
return libtmux.Server() |
def grayscale(self):
"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')"
return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY) | -5,467,729,550,303,214,000 | Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray') | color.py | grayscale | mhhm2005eg/CarND-Advanced-Lane-Lines | python | def grayscale(self):
"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')"
return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY) |
def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
'\n Adds a record that a certain peer has a block.\n '
if (header_hash == self.sync_target_header_hash):
self.peers_changed.set()
if (header_hash in self.peak_to_peer):
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight) | -334,373,693,815,353,800 | Adds a record that a certain peer has a block. | seno/full_node/sync_store.py | peer_has_block | AcidBurnSB/seno-blockchain | python | def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
'\n \n '
if (header_hash == self.sync_target_header_hash):
self.peers_changed.set()
if (header_hash in self.peak_to_peer):
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight) |
def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
'\n Returns: peer ids of peers that have at least one of the header hashes.\n '
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if (header_hash in self.peak_to_peer):
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids | 36,668,548,843,882,770 | Returns: peer ids of peers that have at least one of the header hashes. | seno/full_node/sync_store.py | get_peers_that_have_peak | AcidBurnSB/seno-blockchain | python | def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
'\n \n '
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if (header_hash in self.peak_to_peer):
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids |
def get_peak_of_each_peer(self) -> Dict[(bytes32, Tuple[(bytes32, uint32, uint128)])]:
'\n Returns: dictionary of peer id to peak information.\n '
ret = {}
for (peer_id, v) in self.peer_to_peak.items():
if (v[0] not in self.peak_to_peer):
continue
ret[peer_id] = v
return ret | -2,512,652,720,815,065,600 | Returns: dictionary of peer id to peak information. | seno/full_node/sync_store.py | get_peak_of_each_peer | AcidBurnSB/seno-blockchain | python | def get_peak_of_each_peer(self) -> Dict[(bytes32, Tuple[(bytes32, uint32, uint128)])]:
'\n \n '
ret = {}
for (peer_id, v) in self.peer_to_peak.items():
if (v[0] not in self.peak_to_peer):
continue
ret[peer_id] = v
return ret |
def get_heaviest_peak(self) -> Optional[Tuple[(bytes32, uint32, uint128)]]:
'\n Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified\n us of.\n '
if (len(self.peer_to_peak) == 0):
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for (peer_id, (peak_hash, height, weight)) in self.peer_to_peak.items():
if (peak_hash not in self.peak_to_peer):
continue
if ((heaviest_peak_hash is None) or (weight > heaviest_peak_weight)):
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert ((heaviest_peak_hash is not None) and (heaviest_peak_weight is not None) and (heaviest_peak_height is not None))
return (heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight) | -3,561,257,540,632,519,700 | Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified
us of. | seno/full_node/sync_store.py | get_heaviest_peak | AcidBurnSB/seno-blockchain | python | def get_heaviest_peak(self) -> Optional[Tuple[(bytes32, uint32, uint128)]]:
'\n Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified\n us of.\n '
if (len(self.peer_to_peak) == 0):
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for (peer_id, (peak_hash, height, weight)) in self.peer_to_peak.items():
if (peak_hash not in self.peak_to_peer):
continue
if ((heaviest_peak_hash is None) or (weight > heaviest_peak_weight)):
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert ((heaviest_peak_hash is not None) and (heaviest_peak_weight is not None) and (heaviest_peak_height is not None))
return (heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight) |
async def clear_sync_info(self):
'\n Clears the peak_to_peer info which can get quite large.\n '
self.peak_to_peer = {} | 7,185,190,742,401,591,000 | Clears the peak_to_peer info which can get quite large. | seno/full_node/sync_store.py | clear_sync_info | AcidBurnSB/seno-blockchain | python | async def clear_sync_info(self):
'\n \n '
self.peak_to_peer = {} |
def __init__(self, email_address=None, email_type=None, status=None, merge_fields=None, interests=None, language=None, vip=None, location=None, marketing_permissions=None, ip_signup=None, timestamp_signup=None, ip_opt=None, timestamp_opt=None, tags=None):
'AddListMembers1 - a model defined in Swagger'
self._email_address = None
self._email_type = None
self._status = None
self._merge_fields = None
self._interests = None
self._language = None
self._vip = None
self._location = None
self._marketing_permissions = None
self._ip_signup = None
self._timestamp_signup = None
self._ip_opt = None
self._timestamp_opt = None
self._tags = None
self.discriminator = None
self.email_address = email_address
if (email_type is not None):
self.email_type = email_type
self.status = status
if (merge_fields is not None):
self.merge_fields = merge_fields
if (interests is not None):
self.interests = interests
if (language is not None):
self.language = language
if (vip is not None):
self.vip = vip
if (location is not None):
self.location = location
if (marketing_permissions is not None):
self.marketing_permissions = marketing_permissions
if (ip_signup is not None):
self.ip_signup = ip_signup
if (timestamp_signup is not None):
self.timestamp_signup = timestamp_signup
if (ip_opt is not None):
self.ip_opt = ip_opt
if (timestamp_opt is not None):
self.timestamp_opt = timestamp_opt
if (tags is not None):
self.tags = tags | 3,081,994,696,511,349,000 | AddListMembers1 - a model defined in Swagger | mailchimp_marketing_asyncio/models/add_list_members1.py | __init__ | RWiggers/mailchimp-marketing-asyncio | python | def __init__(self, email_address=None, email_type=None, status=None, merge_fields=None, interests=None, language=None, vip=None, location=None, marketing_permissions=None, ip_signup=None, timestamp_signup=None, ip_opt=None, timestamp_opt=None, tags=None):
self._email_address = None
self._email_type = None
self._status = None
self._merge_fields = None
self._interests = None
self._language = None
self._vip = None
self._location = None
self._marketing_permissions = None
self._ip_signup = None
self._timestamp_signup = None
self._ip_opt = None
self._timestamp_opt = None
self._tags = None
self.discriminator = None
self.email_address = email_address
if (email_type is not None):
self.email_type = email_type
self.status = status
if (merge_fields is not None):
self.merge_fields = merge_fields
if (interests is not None):
self.interests = interests
if (language is not None):
self.language = language
if (vip is not None):
self.vip = vip
if (location is not None):
self.location = location
if (marketing_permissions is not None):
self.marketing_permissions = marketing_permissions
if (ip_signup is not None):
self.ip_signup = ip_signup
if (timestamp_signup is not None):
self.timestamp_signup = timestamp_signup
if (ip_opt is not None):
self.ip_opt = ip_opt
if (timestamp_opt is not None):
self.timestamp_opt = timestamp_opt
if (tags is not None):
self.tags = tags |
@property
def email_address(self):
'Gets the email_address of this AddListMembers1. # noqa: E501\n\n Email address for a subscriber. # noqa: E501\n\n :return: The email_address of this AddListMembers1. # noqa: E501\n :rtype: str\n '
return self._email_address | -1,458,265,820,136,020,700 | Gets the email_address of this AddListMembers1. # noqa: E501
Email address for a subscriber. # noqa: E501
:return: The email_address of this AddListMembers1. # noqa: E501
:rtype: str | mailchimp_marketing_asyncio/models/add_list_members1.py | email_address | RWiggers/mailchimp-marketing-asyncio | python | @property
def email_address(self):
'Gets the email_address of this AddListMembers1. # noqa: E501\n\n Email address for a subscriber. # noqa: E501\n\n :return: The email_address of this AddListMembers1. # noqa: E501\n :rtype: str\n '
return self._email_address |
@email_address.setter
def email_address(self, email_address):
'Sets the email_address of this AddListMembers1.\n\n Email address for a subscriber. # noqa: E501\n\n :param email_address: The email_address of this AddListMembers1. # noqa: E501\n :type: str\n '
if (email_address is None):
raise ValueError('Invalid value for `email_address`, must not be `None`')
self._email_address = email_address | 7,775,084,497,611,737,000 | Sets the email_address of this AddListMembers1.
Email address for a subscriber. # noqa: E501
:param email_address: The email_address of this AddListMembers1. # noqa: E501
:type: str | mailchimp_marketing_asyncio/models/add_list_members1.py | email_address | RWiggers/mailchimp-marketing-asyncio | python | @email_address.setter
def email_address(self, email_address):
'Sets the email_address of this AddListMembers1.\n\n Email address for a subscriber. # noqa: E501\n\n :param email_address: The email_address of this AddListMembers1. # noqa: E501\n :type: str\n '
if (email_address is None):
raise ValueError('Invalid value for `email_address`, must not be `None`')
self._email_address = email_address |
@property
def email_type(self):
"Gets the email_type of this AddListMembers1. # noqa: E501\n\n Type of email this member asked to get ('html' or 'text'). # noqa: E501\n\n :return: The email_type of this AddListMembers1. # noqa: E501\n :rtype: str\n "
return self._email_type | 995,840,052,661,736,000 | Gets the email_type of this AddListMembers1. # noqa: E501
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:return: The email_type of this AddListMembers1. # noqa: E501
:rtype: str | mailchimp_marketing_asyncio/models/add_list_members1.py | email_type | RWiggers/mailchimp-marketing-asyncio | python | @property
def email_type(self):
"Gets the email_type of this AddListMembers1. # noqa: E501\n\n Type of email this member asked to get ('html' or 'text'). # noqa: E501\n\n :return: The email_type of this AddListMembers1. # noqa: E501\n :rtype: str\n "
return self._email_type |
@email_type.setter
def email_type(self, email_type):
"Sets the email_type of this AddListMembers1.\n\n Type of email this member asked to get ('html' or 'text'). # noqa: E501\n\n :param email_type: The email_type of this AddListMembers1. # noqa: E501\n :type: str\n "
self._email_type = email_type | -8,059,765,046,876,889,000 | Sets the email_type of this AddListMembers1.
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:param email_type: The email_type of this AddListMembers1. # noqa: E501
:type: str | mailchimp_marketing_asyncio/models/add_list_members1.py | email_type | RWiggers/mailchimp-marketing-asyncio | python | @email_type.setter
def email_type(self, email_type):
"Sets the email_type of this AddListMembers1.\n\n Type of email this member asked to get ('html' or 'text'). # noqa: E501\n\n :param email_type: The email_type of this AddListMembers1. # noqa: E501\n :type: str\n "
self._email_type = email_type |
@property
def status(self):
"Gets the status of this AddListMembers1. # noqa: E501\n\n Subscriber's current status. # noqa: E501\n\n :return: The status of this AddListMembers1. # noqa: E501\n :rtype: str\n "
return self._status | -8,841,901,392,601,686,000 | Gets the status of this AddListMembers1. # noqa: E501
Subscriber's current status. # noqa: E501
:return: The status of this AddListMembers1. # noqa: E501
:rtype: str | mailchimp_marketing_asyncio/models/add_list_members1.py | status | RWiggers/mailchimp-marketing-asyncio | python | @property
def status(self):
"Gets the status of this AddListMembers1. # noqa: E501\n\n Subscriber's current status. # noqa: E501\n\n :return: The status of this AddListMembers1. # noqa: E501\n :rtype: str\n "
return self._status |
@status.setter
def status(self, status):
"Sets the status of this AddListMembers1.\n\n Subscriber's current status. # noqa: E501\n\n :param status: The status of this AddListMembers1. # noqa: E501\n :type: str\n "
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
allowed_values = ['subscribed', 'unsubscribed', 'cleaned', 'pending', 'transactional']
if (status not in allowed_values):
raise ValueError('Invalid value for `status` ({0}), must be one of {1}'.format(status, allowed_values))
self._status = status | -4,489,359,152,190,322,700 | Sets the status of this AddListMembers1.
Subscriber's current status. # noqa: E501
:param status: The status of this AddListMembers1. # noqa: E501
:type: str | mailchimp_marketing_asyncio/models/add_list_members1.py | status | RWiggers/mailchimp-marketing-asyncio | python | @status.setter
def status(self, status):
"Sets the status of this AddListMembers1.\n\n Subscriber's current status. # noqa: E501\n\n :param status: The status of this AddListMembers1. # noqa: E501\n :type: str\n "
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
allowed_values = ['subscribed', 'unsubscribed', 'cleaned', 'pending', 'transactional']
if (status not in allowed_values):
raise ValueError('Invalid value for `status` ({0}), must be one of {1}'.format(status, allowed_values))
self._status = status |
@property
def merge_fields(self):
'Gets the merge_fields of this AddListMembers1. # noqa: E501\n\n A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501\n\n :return: The merge_fields of this AddListMembers1. # noqa: E501\n :rtype: dict(str, object)\n '
return self._merge_fields | 5,729,846,895,573,442,000 | Gets the merge_fields of this AddListMembers1. # noqa: E501
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:return: The merge_fields of this AddListMembers1. # noqa: E501
:rtype: dict(str, object) | mailchimp_marketing_asyncio/models/add_list_members1.py | merge_fields | RWiggers/mailchimp-marketing-asyncio | python | @property
def merge_fields(self):
'Gets the merge_fields of this AddListMembers1. # noqa: E501\n\n A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501\n\n :return: The merge_fields of this AddListMembers1. # noqa: E501\n :rtype: dict(str, object)\n '
return self._merge_fields |
@merge_fields.setter
def merge_fields(self, merge_fields):
'Sets the merge_fields of this AddListMembers1.\n\n A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501\n\n :param merge_fields: The merge_fields of this AddListMembers1. # noqa: E501\n :type: dict(str, object)\n '
self._merge_fields = merge_fields | 1,798,863,505,504,119,800 | Sets the merge_fields of this AddListMembers1.
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:param merge_fields: The merge_fields of this AddListMembers1. # noqa: E501
:type: dict(str, object) | mailchimp_marketing_asyncio/models/add_list_members1.py | merge_fields | RWiggers/mailchimp-marketing-asyncio | python | @merge_fields.setter
def merge_fields(self, merge_fields):
'Sets the merge_fields of this AddListMembers1.\n\n A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501\n\n :param merge_fields: The merge_fields of this AddListMembers1. # noqa: E501\n :type: dict(str, object)\n '
self._merge_fields = merge_fields |
@property
def interests(self):
"Gets the interests of this AddListMembers1. # noqa: E501\n\n The key of this object's properties is the ID of the interest in question. # noqa: E501\n\n :return: The interests of this AddListMembers1. # noqa: E501\n :rtype: dict(str, bool)\n "
return self._interests | -7,395,813,785,538,401,000 | Gets the interests of this AddListMembers1. # noqa: E501
The key of this object's properties is the ID of the interest in question. # noqa: E501
:return: The interests of this AddListMembers1. # noqa: E501
:rtype: dict(str, bool) | mailchimp_marketing_asyncio/models/add_list_members1.py | interests | RWiggers/mailchimp-marketing-asyncio | python | @property
def interests(self):
"Gets the interests of this AddListMembers1. # noqa: E501\n\n The key of this object's properties is the ID of the interest in question. # noqa: E501\n\n :return: The interests of this AddListMembers1. # noqa: E501\n :rtype: dict(str, bool)\n "
return self._interests |
@interests.setter
def interests(self, interests):
"Sets the interests of this AddListMembers1.\n\n The key of this object's properties is the ID of the interest in question. # noqa: E501\n\n :param interests: The interests of this AddListMembers1. # noqa: E501\n :type: dict(str, bool)\n "
self._interests = interests | -4,364,914,497,824,911,000 | Sets the interests of this AddListMembers1.
The key of this object's properties is the ID of the interest in question. # noqa: E501
:param interests: The interests of this AddListMembers1. # noqa: E501
:type: dict(str, bool) | mailchimp_marketing_asyncio/models/add_list_members1.py | interests | RWiggers/mailchimp-marketing-asyncio | python | @interests.setter
def interests(self, interests):
"Sets the interests of this AddListMembers1.\n\n The key of this object's properties is the ID of the interest in question. # noqa: E501\n\n :param interests: The interests of this AddListMembers1. # noqa: E501\n :type: dict(str, bool)\n "
self._interests = interests |
@property
def language(self):
"Gets the language of this AddListMembers1. # noqa: E501\n\n If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501\n\n :return: The language of this AddListMembers1. # noqa: E501\n :rtype: str\n "
return self._language | 5,055,788,823,884,538,000 | Gets the language of this AddListMembers1. # noqa: E501
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:return: The language of this AddListMembers1. # noqa: E501
:rtype: str | mailchimp_marketing_asyncio/models/add_list_members1.py | language | RWiggers/mailchimp-marketing-asyncio | python | @property
def language(self):
"Gets the language of this AddListMembers1. # noqa: E501\n\n If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501\n\n :return: The language of this AddListMembers1. # noqa: E501\n :rtype: str\n "
return self._language |
@language.setter
def language(self, language):
"Sets the language of this AddListMembers1.\n\n If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501\n\n :param language: The language of this AddListMembers1. # noqa: E501\n :type: str\n "
self._language = language | 3,258,681,828,675,341,300 | Sets the language of this AddListMembers1.
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:param language: The language of this AddListMembers1. # noqa: E501
:type: str | mailchimp_marketing_asyncio/models/add_list_members1.py | language | RWiggers/mailchimp-marketing-asyncio | python | @language.setter
def language(self, language):
"Sets the language of this AddListMembers1.\n\n If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501\n\n :param language: The language of this AddListMembers1. # noqa: E501\n :type: str\n "
self._language = language |
@property
def vip(self):
'Gets the vip of this AddListMembers1. # noqa: E501\n\n [VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501\n\n :return: The vip of this AddListMembers1. # noqa: E501\n :rtype: bool\n '
return self._vip | -392,789,501,235,498,200 | Gets the vip of this AddListMembers1. # noqa: E501
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:return: The vip of this AddListMembers1. # noqa: E501
:rtype: bool | mailchimp_marketing_asyncio/models/add_list_members1.py | vip | RWiggers/mailchimp-marketing-asyncio | python | @property
def vip(self):
'Gets the vip of this AddListMembers1. # noqa: E501\n\n [VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501\n\n :return: The vip of this AddListMembers1. # noqa: E501\n :rtype: bool\n '
return self._vip |
@vip.setter
def vip(self, vip):
'Sets the vip of this AddListMembers1.\n\n [VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501\n\n :param vip: The vip of this AddListMembers1. # noqa: E501\n :type: bool\n '
self._vip = vip | -5,151,318,988,252,167,000 | Sets the vip of this AddListMembers1.
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:param vip: The vip of this AddListMembers1. # noqa: E501
:type: bool | mailchimp_marketing_asyncio/models/add_list_members1.py | vip | RWiggers/mailchimp-marketing-asyncio | python | @vip.setter
def vip(self, vip):
'Sets the vip of this AddListMembers1.\n\n [VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501\n\n :param vip: The vip of this AddListMembers1. # noqa: E501\n :type: bool\n '
self._vip = vip |
@property
def location(self):
'Gets the location of this AddListMembers1. # noqa: E501\n\n\n :return: The location of this AddListMembers1. # noqa: E501\n :rtype: Location\n '
return self._location | 4,798,484,802,868,938,000 | Gets the location of this AddListMembers1. # noqa: E501
:return: The location of this AddListMembers1. # noqa: E501
:rtype: Location | mailchimp_marketing_asyncio/models/add_list_members1.py | location | RWiggers/mailchimp-marketing-asyncio | python | @property
def location(self):
'Gets the location of this AddListMembers1. # noqa: E501\n\n\n :return: The location of this AddListMembers1. # noqa: E501\n :rtype: Location\n '
return self._location |
@location.setter
def location(self, location):
'Sets the location of this AddListMembers1.\n\n\n :param location: The location of this AddListMembers1. # noqa: E501\n :type: Location\n '
self._location = location | -9,192,074,986,748,529,000 | Sets the location of this AddListMembers1.
:param location: The location of this AddListMembers1. # noqa: E501
:type: Location | mailchimp_marketing_asyncio/models/add_list_members1.py | location | RWiggers/mailchimp-marketing-asyncio | python | @location.setter
def location(self, location):
'Sets the location of this AddListMembers1.\n\n\n :param location: The location of this AddListMembers1. # noqa: E501\n :type: Location\n '
self._location = location |
@property
def marketing_permissions(self):
'Gets the marketing_permissions of this AddListMembers1. # noqa: E501\n\n The marketing permissions for the subscriber. # noqa: E501\n\n :return: The marketing_permissions of this AddListMembers1. # noqa: E501\n :rtype: list[MarketingPermission1]\n '
return self._marketing_permissions | -209,653,211,692,470,000 | Gets the marketing_permissions of this AddListMembers1. # noqa: E501
The marketing permissions for the subscriber. # noqa: E501
:return: The marketing_permissions of this AddListMembers1. # noqa: E501
:rtype: list[MarketingPermission1] | mailchimp_marketing_asyncio/models/add_list_members1.py | marketing_permissions | RWiggers/mailchimp-marketing-asyncio | python | @property
def marketing_permissions(self):
'Gets the marketing_permissions of this AddListMembers1. # noqa: E501\n\n The marketing permissions for the subscriber. # noqa: E501\n\n :return: The marketing_permissions of this AddListMembers1. # noqa: E501\n :rtype: list[MarketingPermission1]\n '
return self._marketing_permissions |
@marketing_permissions.setter
def marketing_permissions(self, marketing_permissions):
'Sets the marketing_permissions of this AddListMembers1.\n\n The marketing permissions for the subscriber. # noqa: E501\n\n :param marketing_permissions: The marketing_permissions of this AddListMembers1. # noqa: E501\n :type: list[MarketingPermission1]\n '
self._marketing_permissions = marketing_permissions | 4,081,623,534,439,569,400 | Sets the marketing_permissions of this AddListMembers1.
The marketing permissions for the subscriber. # noqa: E501
:param marketing_permissions: The marketing_permissions of this AddListMembers1. # noqa: E501
:type: list[MarketingPermission1] | mailchimp_marketing_asyncio/models/add_list_members1.py | marketing_permissions | RWiggers/mailchimp-marketing-asyncio | python | @marketing_permissions.setter
def marketing_permissions(self, marketing_permissions):
'Sets the marketing_permissions of this AddListMembers1.\n\n The marketing permissions for the subscriber. # noqa: E501\n\n :param marketing_permissions: The marketing_permissions of this AddListMembers1. # noqa: E501\n :type: list[MarketingPermission1]\n '
self._marketing_permissions = marketing_permissions |
@property
def ip_signup(self):
'Gets the ip_signup of this AddListMembers1. # noqa: E501\n\n IP address the subscriber signed up from. # noqa: E501\n\n :return: The ip_signup of this AddListMembers1. # noqa: E501\n :rtype: str\n '
return self._ip_signup | -9,131,692,653,842,025,000 | Gets the ip_signup of this AddListMembers1. # noqa: E501
IP address the subscriber signed up from. # noqa: E501
:return: The ip_signup of this AddListMembers1. # noqa: E501
:rtype: str | mailchimp_marketing_asyncio/models/add_list_members1.py | ip_signup | RWiggers/mailchimp-marketing-asyncio | python | @property
def ip_signup(self):
'Gets the ip_signup of this AddListMembers1. # noqa: E501\n\n IP address the subscriber signed up from. # noqa: E501\n\n :return: The ip_signup of this AddListMembers1. # noqa: E501\n :rtype: str\n '
return self._ip_signup |
@ip_signup.setter
def ip_signup(self, ip_signup):
'Sets the ip_signup of this AddListMembers1.\n\n IP address the subscriber signed up from. # noqa: E501\n\n :param ip_signup: The ip_signup of this AddListMembers1. # noqa: E501\n :type: str\n '
self._ip_signup = ip_signup | 8,695,358,424,534,109,000 | Sets the ip_signup of this AddListMembers1.
IP address the subscriber signed up from. # noqa: E501
:param ip_signup: The ip_signup of this AddListMembers1. # noqa: E501
:type: str | mailchimp_marketing_asyncio/models/add_list_members1.py | ip_signup | RWiggers/mailchimp-marketing-asyncio | python | @ip_signup.setter
def ip_signup(self, ip_signup):
'Sets the ip_signup of this AddListMembers1.\n\n IP address the subscriber signed up from. # noqa: E501\n\n :param ip_signup: The ip_signup of this AddListMembers1. # noqa: E501\n :type: str\n '
self._ip_signup = ip_signup |
@property
def timestamp_signup(self):
'Gets the timestamp_signup of this AddListMembers1. # noqa: E501\n\n The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501\n\n :return: The timestamp_signup of this AddListMembers1. # noqa: E501\n :rtype: datetime\n '
return self._timestamp_signup | 7,031,646,117,093,732,000 | Gets the timestamp_signup of this AddListMembers1. # noqa: E501
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:return: The timestamp_signup of this AddListMembers1. # noqa: E501
:rtype: datetime | mailchimp_marketing_asyncio/models/add_list_members1.py | timestamp_signup | RWiggers/mailchimp-marketing-asyncio | python | @property
def timestamp_signup(self):
'Gets the timestamp_signup of this AddListMembers1. # noqa: E501\n\n The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501\n\n :return: The timestamp_signup of this AddListMembers1. # noqa: E501\n :rtype: datetime\n '
return self._timestamp_signup |
@timestamp_signup.setter
def timestamp_signup(self, timestamp_signup):
'Sets the timestamp_signup of this AddListMembers1.\n\n The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501\n\n :param timestamp_signup: The timestamp_signup of this AddListMembers1. # noqa: E501\n :type: datetime\n '
self._timestamp_signup = timestamp_signup | -2,677,855,865,278,813,000 | Sets the timestamp_signup of this AddListMembers1.
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:param timestamp_signup: The timestamp_signup of this AddListMembers1. # noqa: E501
:type: datetime | mailchimp_marketing_asyncio/models/add_list_members1.py | timestamp_signup | RWiggers/mailchimp-marketing-asyncio | python | @timestamp_signup.setter
def timestamp_signup(self, timestamp_signup):
'Sets the timestamp_signup of this AddListMembers1.\n\n The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501\n\n :param timestamp_signup: The timestamp_signup of this AddListMembers1. # noqa: E501\n :type: datetime\n '
self._timestamp_signup = timestamp_signup |
@property
def ip_opt(self):
'Gets the ip_opt of this AddListMembers1. # noqa: E501\n\n The IP address the subscriber used to confirm their opt-in status. # noqa: E501\n\n :return: The ip_opt of this AddListMembers1. # noqa: E501\n :rtype: str\n '
return self._ip_opt | -4,255,675,526,227,299,000 | Gets the ip_opt of this AddListMembers1. # noqa: E501
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:return: The ip_opt of this AddListMembers1. # noqa: E501
:rtype: str | mailchimp_marketing_asyncio/models/add_list_members1.py | ip_opt | RWiggers/mailchimp-marketing-asyncio | python | @property
def ip_opt(self):
'Gets the ip_opt of this AddListMembers1. # noqa: E501\n\n The IP address the subscriber used to confirm their opt-in status. # noqa: E501\n\n :return: The ip_opt of this AddListMembers1. # noqa: E501\n :rtype: str\n '
return self._ip_opt |
@ip_opt.setter
def ip_opt(self, ip_opt):
'Sets the ip_opt of this AddListMembers1.\n\n The IP address the subscriber used to confirm their opt-in status. # noqa: E501\n\n :param ip_opt: The ip_opt of this AddListMembers1. # noqa: E501\n :type: str\n '
self._ip_opt = ip_opt | -3,171,704,755,062,573,000 | Sets the ip_opt of this AddListMembers1.
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:param ip_opt: The ip_opt of this AddListMembers1. # noqa: E501
:type: str | mailchimp_marketing_asyncio/models/add_list_members1.py | ip_opt | RWiggers/mailchimp-marketing-asyncio | python | @ip_opt.setter
def ip_opt(self, ip_opt):
'Sets the ip_opt of this AddListMembers1.\n\n The IP address the subscriber used to confirm their opt-in status. # noqa: E501\n\n :param ip_opt: The ip_opt of this AddListMembers1. # noqa: E501\n :type: str\n '
self._ip_opt = ip_opt |
@property
def timestamp_opt(self):
'Gets the timestamp_opt of this AddListMembers1. # noqa: E501\n\n The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501\n\n :return: The timestamp_opt of this AddListMembers1. # noqa: E501\n :rtype: datetime\n '
return self._timestamp_opt | 1,856,651,276,959,445,200 | Gets the timestamp_opt of this AddListMembers1. # noqa: E501
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:return: The timestamp_opt of this AddListMembers1. # noqa: E501
:rtype: datetime | mailchimp_marketing_asyncio/models/add_list_members1.py | timestamp_opt | RWiggers/mailchimp-marketing-asyncio | python | @property
def timestamp_opt(self):
'Gets the timestamp_opt of this AddListMembers1. # noqa: E501\n\n The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501\n\n :return: The timestamp_opt of this AddListMembers1. # noqa: E501\n :rtype: datetime\n '
return self._timestamp_opt |
@timestamp_opt.setter
def timestamp_opt(self, timestamp_opt):
'Sets the timestamp_opt of this AddListMembers1.\n\n The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501\n\n :param timestamp_opt: The timestamp_opt of this AddListMembers1. # noqa: E501\n :type: datetime\n '
self._timestamp_opt = timestamp_opt | -7,590,912,189,221,661,000 | Sets the timestamp_opt of this AddListMembers1.
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:param timestamp_opt: The timestamp_opt of this AddListMembers1. # noqa: E501
:type: datetime | mailchimp_marketing_asyncio/models/add_list_members1.py | timestamp_opt | RWiggers/mailchimp-marketing-asyncio | python | @timestamp_opt.setter
def timestamp_opt(self, timestamp_opt):
'Sets the timestamp_opt of this AddListMembers1.\n\n The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501\n\n :param timestamp_opt: The timestamp_opt of this AddListMembers1. # noqa: E501\n :type: datetime\n '
self._timestamp_opt = timestamp_opt |
@property
def tags(self):
'Gets the tags of this AddListMembers1. # noqa: E501\n\n The tags that are associated with a member. # noqa: E501\n\n :return: The tags of this AddListMembers1. # noqa: E501\n :rtype: list[str]\n '
return self._tags | -1,838,366,037,586,048,800 | Gets the tags of this AddListMembers1. # noqa: E501
The tags that are associated with a member. # noqa: E501
:return: The tags of this AddListMembers1. # noqa: E501
:rtype: list[str] | mailchimp_marketing_asyncio/models/add_list_members1.py | tags | RWiggers/mailchimp-marketing-asyncio | python | @property
def tags(self):
'Gets the tags of this AddListMembers1. # noqa: E501\n\n The tags that are associated with a member. # noqa: E501\n\n :return: The tags of this AddListMembers1. # noqa: E501\n :rtype: list[str]\n '
return self._tags |
@tags.setter
def tags(self, tags):
'Sets the tags of this AddListMembers1.\n\n The tags that are associated with a member. # noqa: E501\n\n :param tags: The tags of this AddListMembers1. # noqa: E501\n :type: list[str]\n '
self._tags = tags | -8,568,244,354,649,068,000 | Sets the tags of this AddListMembers1.
The tags that are associated with a member. # noqa: E501
:param tags: The tags of this AddListMembers1. # noqa: E501
:type: list[str] | mailchimp_marketing_asyncio/models/add_list_members1.py | tags | RWiggers/mailchimp-marketing-asyncio | python | @tags.setter
def tags(self, tags):
'Sets the tags of this AddListMembers1.\n\n The tags that are associated with a member. # noqa: E501\n\n :param tags: The tags of this AddListMembers1. # noqa: E501\n :type: list[str]\n '
self._tags = tags |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(AddListMembers1, dict):
for (key, value) in self.items():
result[key] = value
return result | -6,043,816,492,661,430,000 | Returns the model properties as a dict | mailchimp_marketing_asyncio/models/add_list_members1.py | to_dict | RWiggers/mailchimp-marketing-asyncio | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(AddListMembers1, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | mailchimp_marketing_asyncio/models/add_list_members1.py | to_str | RWiggers/mailchimp-marketing-asyncio | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | mailchimp_marketing_asyncio/models/add_list_members1.py | __repr__ | RWiggers/mailchimp-marketing-asyncio | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, AddListMembers1)):
return False
return (self.__dict__ == other.__dict__) | -6,343,505,069,438,291,000 | Returns true if both objects are equal | mailchimp_marketing_asyncio/models/add_list_members1.py | __eq__ | RWiggers/mailchimp-marketing-asyncio | python | def __eq__(self, other):
if (not isinstance(other, AddListMembers1)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | mailchimp_marketing_asyncio/models/add_list_members1.py | __ne__ | RWiggers/mailchimp-marketing-asyncio | python | def __ne__(self, other):
return (not (self == other)) |
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
'Init object.'
self.config_entry = config_entry | -2,559,020,865,665,428,000 | Init object. | homeassistant/components/xiaomi_miio/config_flow.py | __init__ | 0xFEEDC0DE64/homeassistant-core | python | def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry |
async def async_step_init(self, user_input=None):
'Manage the options.'
errors = {}
if (user_input is not None):
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if (use_cloud and ((not cloud_username) or (not cloud_password) or (not cloud_country))):
errors['base'] = 'cloud_credentials_incomplete'
self.hass.async_create_task(self.hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_REAUTH}, data=self.config_entry.data))
if (not errors):
return self.async_create_entry(title='', data=user_input)
settings_schema = vol.Schema({vol.Optional(CONF_CLOUD_SUBDEVICES, default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False)): bool})
return self.async_show_form(step_id='init', data_schema=settings_schema, errors=errors) | -6,022,703,587,631,939,000 | Manage the options. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_init | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_init(self, user_input=None):
errors = {}
if (user_input is not None):
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if (use_cloud and ((not cloud_username) or (not cloud_password) or (not cloud_country))):
errors['base'] = 'cloud_credentials_incomplete'
self.hass.async_create_task(self.hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_REAUTH}, data=self.config_entry.data))
if (not errors):
return self.async_create_entry(title=, data=user_input)
settings_schema = vol.Schema({vol.Optional(CONF_CLOUD_SUBDEVICES, default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False)): bool})
return self.async_show_form(step_id='init', data_schema=settings_schema, errors=errors) |
def __init__(self):
'Initialize.'
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {} | -5,904,689,502,503,525,000 | Initialize. | homeassistant/components/xiaomi_miio/config_flow.py | __init__ | 0xFEEDC0DE64/homeassistant-core | python | def __init__(self):
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {} |
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
'Get the options flow.'
return OptionsFlowHandler(config_entry) | 5,334,322,877,548,666,000 | Get the options flow. | homeassistant/components/xiaomi_miio/config_flow.py | async_get_options_flow | 0xFEEDC0DE64/homeassistant-core | python | @staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
return OptionsFlowHandler(config_entry) |
async def async_step_reauth(self, user_input=None):
'Perform reauth upon an authentication error or missing cloud credentials.'
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return (await self.async_step_reauth_confirm()) | 8,628,532,616,613,430,000 | Perform reauth upon an authentication error or missing cloud credentials. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_reauth | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_reauth(self, user_input=None):
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return (await self.async_step_reauth_confirm()) |
async def async_step_reauth_confirm(self, user_input=None):
'Dialog that informs the user that reauth is required.'
if (user_input is not None):
return (await self.async_step_cloud())
return self.async_show_form(step_id='reauth_confirm', data_schema=vol.Schema({})) | -3,886,696,658,973,102,600 | Dialog that informs the user that reauth is required. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_reauth_confirm | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_reauth_confirm(self, user_input=None):
if (user_input is not None):
return (await self.async_step_cloud())
return self.async_show_form(step_id='reauth_confirm', data_schema=vol.Schema({})) |
async def async_step_import(self, conf: dict):
'Import a configuration from config.yaml.'
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update({'title_placeholders': {'name': f'YAML import {self.host}'}})
return (await self.async_step_connect()) | 1,010,408,588,209,126,900 | Import a configuration from config.yaml. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_import | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_import(self, conf: dict):
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update({'title_placeholders': {'name': f'YAML import {self.host}'}})
return (await self.async_step_connect()) |
async def async_step_user(self, user_input=None):
'Handle a flow initialized by the user.'
return (await self.async_step_cloud()) | 3,497,861,031,134,423,000 | Handle a flow initialized by the user. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_user | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_user(self, user_input=None):
return (await self.async_step_cloud()) |
async def async_step_zeroconf(self, discovery_info):
'Handle zeroconf discovery.'
name = discovery_info.get('name')
self.host = discovery_info.get('host')
self.mac = discovery_info.get('properties', {}).get('mac')
if (self.mac is None):
poch = discovery_info.get('properties', {}).get('poch', '')
result = search('mac=\\w+', poch)
if (result is not None):
self.mac = result.group(0).split('=')[1]
if ((not name) or (not self.host) or (not self.mac)):
return self.async_abort(reason='not_xiaomi_miio')
self.mac = format_mac(self.mac)
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace('.', '-')):
unique_id = self.mac
(await self.async_set_unique_id(unique_id))
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update({'title_placeholders': {'name': f'Gateway {self.host}'}})
return (await self.async_step_cloud())
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace('.', '-')):
unique_id = self.mac
(await self.async_set_unique_id(unique_id))
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update({'title_placeholders': {'name': f'{device_model} {self.host}'}})
return (await self.async_step_cloud())
_LOGGER.debug("Not yet supported Xiaomi Miio device '%s' discovered with host %s", name, self.host)
return self.async_abort(reason='not_xiaomi_miio') | 6,474,276,426,894,509,000 | Handle zeroconf discovery. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_zeroconf | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_zeroconf(self, discovery_info):
name = discovery_info.get('name')
self.host = discovery_info.get('host')
self.mac = discovery_info.get('properties', {}).get('mac')
if (self.mac is None):
poch = discovery_info.get('properties', {}).get('poch', )
result = search('mac=\\w+', poch)
if (result is not None):
self.mac = result.group(0).split('=')[1]
if ((not name) or (not self.host) or (not self.mac)):
return self.async_abort(reason='not_xiaomi_miio')
self.mac = format_mac(self.mac)
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace('.', '-')):
unique_id = self.mac
(await self.async_set_unique_id(unique_id))
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update({'title_placeholders': {'name': f'Gateway {self.host}'}})
return (await self.async_step_cloud())
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace('.', '-')):
unique_id = self.mac
(await self.async_set_unique_id(unique_id))
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update({'title_placeholders': {'name': f'{device_model} {self.host}'}})
return (await self.async_step_cloud())
_LOGGER.debug("Not yet supported Xiaomi Miio device '%s' discovered with host %s", name, self.host)
return self.async_abort(reason='not_xiaomi_miio') |
def extract_cloud_info(self, cloud_device_info):
'Extract the cloud info.'
if (self.host is None):
self.host = cloud_device_info['localip']
if (self.mac is None):
self.mac = format_mac(cloud_device_info['mac'])
if (self.model is None):
self.model = cloud_device_info['model']
if (self.name is None):
self.name = cloud_device_info['name']
self.token = cloud_device_info['token'] | 1,728,987,662,477,281,300 | Extract the cloud info. | homeassistant/components/xiaomi_miio/config_flow.py | extract_cloud_info | 0xFEEDC0DE64/homeassistant-core | python | def extract_cloud_info(self, cloud_device_info):
if (self.host is None):
self.host = cloud_device_info['localip']
if (self.mac is None):
self.mac = format_mac(cloud_device_info['mac'])
if (self.model is None):
self.model = cloud_device_info['model']
if (self.name is None):
self.name = cloud_device_info['name']
self.token = cloud_device_info['token'] |
async def async_step_cloud(self, user_input=None):
'Configure a xiaomi miio device through the Miio Cloud.'
errors = {}
if (user_input is not None):
if user_input[CONF_MANUAL]:
return (await self.async_step_manual())
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if ((not cloud_username) or (not cloud_password) or (not cloud_country)):
errors['base'] = 'cloud_credentials_incomplete'
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors)
miio_cloud = MiCloud(cloud_username, cloud_password)
if (not (await self.hass.async_add_executor_job(miio_cloud.login))):
errors['base'] = 'cloud_login_error'
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors)
devices_raw = (await self.hass.async_add_executor_job(miio_cloud.get_devices, cloud_country))
if (not devices_raw):
errors['base'] = 'cloud_no_devices'
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors)
self.cloud_devices = {}
for device in devices_raw:
parent_id = device.get('parent_id')
if (not parent_id):
name = device['name']
model = device['model']
list_name = f'{name} - {model}'
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if (self.host is not None):
for device in self.cloud_devices.values():
cloud_host = device.get('localip')
if (cloud_host == self.host):
self.extract_cloud_info(device)
return (await self.async_step_connect())
if (len(self.cloud_devices) == 1):
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return (await self.async_step_connect())
return (await self.async_step_select())
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors) | -5,096,489,469,025,828,000 | Configure a xiaomi miio device through the Miio Cloud. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_cloud | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_cloud(self, user_input=None):
errors = {}
if (user_input is not None):
if user_input[CONF_MANUAL]:
return (await self.async_step_manual())
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if ((not cloud_username) or (not cloud_password) or (not cloud_country)):
errors['base'] = 'cloud_credentials_incomplete'
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors)
miio_cloud = MiCloud(cloud_username, cloud_password)
if (not (await self.hass.async_add_executor_job(miio_cloud.login))):
errors['base'] = 'cloud_login_error'
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors)
devices_raw = (await self.hass.async_add_executor_job(miio_cloud.get_devices, cloud_country))
if (not devices_raw):
errors['base'] = 'cloud_no_devices'
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors)
self.cloud_devices = {}
for device in devices_raw:
parent_id = device.get('parent_id')
if (not parent_id):
name = device['name']
model = device['model']
list_name = f'{name} - {model}'
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if (self.host is not None):
for device in self.cloud_devices.values():
cloud_host = device.get('localip')
if (cloud_host == self.host):
self.extract_cloud_info(device)
return (await self.async_step_connect())
if (len(self.cloud_devices) == 1):
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return (await self.async_step_connect())
return (await self.async_step_select())
return self.async_show_form(step_id='cloud', data_schema=DEVICE_CLOUD_CONFIG, errors=errors) |
async def async_step_select(self, user_input=None):
'Handle multiple cloud devices found.'
errors = {}
if (user_input is not None):
cloud_device = self.cloud_devices[user_input['select_device']]
self.extract_cloud_info(cloud_device)
return (await self.async_step_connect())
select_schema = vol.Schema({vol.Required('select_device'): vol.In(list(self.cloud_devices))})
return self.async_show_form(step_id='select', data_schema=select_schema, errors=errors) | 5,268,307,135,454,738,000 | Handle multiple cloud devices found. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_select | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_select(self, user_input=None):
errors = {}
if (user_input is not None):
cloud_device = self.cloud_devices[user_input['select_device']]
self.extract_cloud_info(cloud_device)
return (await self.async_step_connect())
select_schema = vol.Schema({vol.Required('select_device'): vol.In(list(self.cloud_devices))})
return self.async_show_form(step_id='select', data_schema=select_schema, errors=errors) |
async def async_step_manual(self, user_input=None):
'Configure a xiaomi miio device Manually.'
errors = {}
if (user_input is not None):
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return (await self.async_step_connect())
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id='manual', data_schema=schema, errors=errors) | 362,390,456,201,465,700 | Configure a xiaomi miio device Manually. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_manual | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_manual(self, user_input=None):
errors = {}
if (user_input is not None):
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return (await self.async_step_connect())
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id='manual', data_schema=schema, errors=errors) |
async def async_step_connect(self, user_input=None):
'Connect to a xiaomi miio device.'
errors = {}
if ((self.host is None) or (self.token is None)):
return self.async_abort(reason='incomplete_info')
if (user_input is not None):
self.model = user_input[CONF_MODEL]
connect_device_class = ConnectXiaomiDevice(self.hass)
(await connect_device_class.async_connect_device(self.host, self.token))
device_info = connect_device_class.device_info
if ((self.model is None) and (device_info is not None)):
self.model = device_info.model
if (self.model is None):
errors['base'] = 'cannot_connect'
return self.async_show_form(step_id='connect', data_schema=DEVICE_MODEL_CONFIG, errors=errors)
if ((self.mac is None) and (device_info is not None)):
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = (await self.async_set_unique_id(unique_id, raise_on_progress=False))
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if ((self.cloud_username is not None) and (self.cloud_password is not None) and (self.cloud_country is not None)):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
(await self.hass.config_entries.async_reload(existing_entry.entry_id))
return self.async_abort(reason='reauth_successful')
if (self.name is None):
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if (flow_type is None):
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if (flow_type is not None):
return self.async_create_entry(title=self.name, data={CONF_FLOW_TYPE: flow_type, CONF_HOST: self.host, CONF_TOKEN: self.token, CONF_MODEL: self.model, CONF_MAC: self.mac, CONF_CLOUD_USERNAME: self.cloud_username, CONF_CLOUD_PASSWORD: self.cloud_password, CONF_CLOUD_COUNTRY: self.cloud_country})
errors['base'] = 'unknown_device'
return self.async_show_form(step_id='connect', data_schema=DEVICE_MODEL_CONFIG, errors=errors) | -748,321,618,283,200,400 | Connect to a xiaomi miio device. | homeassistant/components/xiaomi_miio/config_flow.py | async_step_connect | 0xFEEDC0DE64/homeassistant-core | python | async def async_step_connect(self, user_input=None):
errors = {}
if ((self.host is None) or (self.token is None)):
return self.async_abort(reason='incomplete_info')
if (user_input is not None):
self.model = user_input[CONF_MODEL]
connect_device_class = ConnectXiaomiDevice(self.hass)
(await connect_device_class.async_connect_device(self.host, self.token))
device_info = connect_device_class.device_info
if ((self.model is None) and (device_info is not None)):
self.model = device_info.model
if (self.model is None):
errors['base'] = 'cannot_connect'
return self.async_show_form(step_id='connect', data_schema=DEVICE_MODEL_CONFIG, errors=errors)
if ((self.mac is None) and (device_info is not None)):
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = (await self.async_set_unique_id(unique_id, raise_on_progress=False))
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if ((self.cloud_username is not None) and (self.cloud_password is not None) and (self.cloud_country is not None)):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
(await self.hass.config_entries.async_reload(existing_entry.entry_id))
return self.async_abort(reason='reauth_successful')
if (self.name is None):
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if (flow_type is None):
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if (flow_type is not None):
return self.async_create_entry(title=self.name, data={CONF_FLOW_TYPE: flow_type, CONF_HOST: self.host, CONF_TOKEN: self.token, CONF_MODEL: self.model, CONF_MAC: self.mac, CONF_CLOUD_USERNAME: self.cloud_username, CONF_CLOUD_PASSWORD: self.cloud_password, CONF_CLOUD_COUNTRY: self.cloud_country})
errors['base'] = 'unknown_device'
return self.async_show_form(step_id='connect', data_schema=DEVICE_MODEL_CONFIG, errors=errors) |
def is_jpeg(data):
"\n Check whether a bytes object (or similar) contains JPEG (JFIF) data.\n Returns False for truncated files.\n Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.\n\n :param data: JPEG (JFIF) data\n :return: True if JPEG\n "
return (data[:2] == b'\xff\xd8') | 4,500,547,273,639,981,600 | Check whether a bytes object (or similar) contains JPEG (JFIF) data.
Returns False for truncated files.
Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.
:param data: JPEG (JFIF) data
:return: True if JPEG | src/gulpio2/fileio.py | is_jpeg | kiyoon/GulpIO2 | python | def is_jpeg(data):
"\n Check whether a bytes object (or similar) contains JPEG (JFIF) data.\n Returns False for truncated files.\n Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.\n\n :param data: JPEG (JFIF) data\n :return: True if JPEG\n "
return (data[:2] == b'\xff\xd8') |
def calculate_chunk_slices(items_per_chunk, num_items):
'Calculate slices for indexing an adapter.\n\n Parameters\n ----------\n items_per_chunk: int\n Approximate number of items per chunk.\n num_items: int\n Total number of items.\n\n Returns\n -------\n list of slices\n\n '
assert (items_per_chunk > 0)
assert (num_items > 0)
return [slice(i, min((i + items_per_chunk), num_items)) for i in range(0, num_items, items_per_chunk)] | -1,868,767,188,175,562,800 | Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: int
Approximate number of items per chunk.
num_items: int
Total number of items.
Returns
-------
list of slices | src/gulpio2/fileio.py | calculate_chunk_slices | kiyoon/GulpIO2 | python | def calculate_chunk_slices(items_per_chunk, num_items):
'Calculate slices for indexing an adapter.\n\n Parameters\n ----------\n items_per_chunk: int\n Approximate number of items per chunk.\n num_items: int\n Total number of items.\n\n Returns\n -------\n list of slices\n\n '
assert (items_per_chunk > 0)
assert (num_items > 0)
return [slice(i, min((i + items_per_chunk), num_items)) for i in range(0, num_items, items_per_chunk)] |
def chunks(self):
' Return a generator over existing GulpChunk objects which are ready\n to be opened and read from. '
return self.__iter__() | 3,609,108,842,850,875,400 | Return a generator over existing GulpChunk objects which are ready
to be opened and read from. | src/gulpio2/fileio.py | chunks | kiyoon/GulpIO2 | python | def chunks(self):
' Return a generator over existing GulpChunk objects which are ready\n to be opened and read from. '
return self.__iter__() |
def new_chunks(self, total_new_chunks):
' Return a generator over freshly setup GulpChunk objects which are ready\n to be opened and written to.\n\n Parameters\n ----------\n total_new_chunks: int\n The total number of new chunks to initialize.\n '
return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._allocate_new_file_paths(total_new_chunks)) | 6,015,641,194,201,491,000 | Return a generator over freshly setup GulpChunk objects which are ready
to be opened and written to.
Parameters
----------
total_new_chunks: int
The total number of new chunks to initialize. | src/gulpio2/fileio.py | new_chunks | kiyoon/GulpIO2 | python | def new_chunks(self, total_new_chunks):
' Return a generator over freshly setup GulpChunk objects which are ready\n to be opened and written to.\n\n Parameters\n ----------\n total_new_chunks: int\n The total number of new chunks to initialize.\n '
return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in self._allocate_new_file_paths(total_new_chunks)) |
@contextmanager
def open(self, flag='rb'):
"Open the gulp chunk for reading.\n\n Parameters\n ----------\n flag: str\n 'rb': Read binary\n 'wb': Write binary\n 'ab': Append to binary\n\n Notes\n -----\n Works as a context manager but returns None.\n\n "
if (flag in ['wb', 'rb', 'ab']):
self.fp = open(self.data_file_path, flag)
else:
m = "This file does not support the mode: '{}'".format(flag)
raise NotImplementedError(m)
(yield)
if (flag in ['wb', 'ab']):
self.flush()
self.fp.close() | -4,415,841,470,561,487,000 | Open the gulp chunk for reading.
Parameters
----------
flag: str
'rb': Read binary
'wb': Write binary
'ab': Append to binary
Notes
-----
Works as a context manager but returns None. | src/gulpio2/fileio.py | open | kiyoon/GulpIO2 | python | @contextmanager
def open(self, flag='rb'):
"Open the gulp chunk for reading.\n\n Parameters\n ----------\n flag: str\n 'rb': Read binary\n 'wb': Write binary\n 'ab': Append to binary\n\n Notes\n -----\n Works as a context manager but returns None.\n\n "
if (flag in ['wb', 'rb', 'ab']):
self.fp = open(self.data_file_path, flag)
else:
m = "This file does not support the mode: '{}'".format(flag)
raise NotImplementedError(m)
(yield)
if (flag in ['wb', 'ab']):
self.flush()
self.fp.close() |
def flush(self):
'Flush all buffers and write the meta file.'
self.fp.flush()
self.serializer.dump(self.meta_dict, self.meta_file_path) | 9,160,402,057,853,548,000 | Flush all buffers and write the meta file. | src/gulpio2/fileio.py | flush | kiyoon/GulpIO2 | python | def flush(self):
self.fp.flush()
self.serializer.dump(self.meta_dict, self.meta_file_path) |
def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
' Append an item to the gulp.\n\n Parameters\n ----------\n id_ : str\n The ID of the item\n meta_data: dict\n The meta-data associated with the item.\n frames: list of numpy arrays\n The frames of the item as a list of numpy dictionaries consisting\n of image pixel values.\n\n '
self._append_meta(id_, meta_data)
self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality) | 4,820,802,838,144,094,000 | Append an item to the gulp.
Parameters
----------
id_ : str
The ID of the item
meta_data: dict
The meta-data associated with the item.
frames: list of numpy arrays
The frames of the item as a list of numpy dictionaries consisting
of image pixel values. | src/gulpio2/fileio.py | append | kiyoon/GulpIO2 | python | def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
' Append an item to the gulp.\n\n Parameters\n ----------\n id_ : str\n The ID of the item\n meta_data: dict\n The meta-data associated with the item.\n frames: list of numpy arrays\n The frames of the item as a list of numpy dictionaries consisting\n of image pixel values.\n\n '
self._append_meta(id_, meta_data)
self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality) |
def read_frames(self, id_, slice_=None):
' Read frames for a single item.\n\n Parameters\n ----------\n id_: str\n The ID of the item\n slice_: slice or list of ints:\n A slice or list of indices with which to select frames.\n\n Returns\n -------\n frames (int), meta(dict)\n The frames of the item as a list of numpy arrays consisting of\n image pixel values. And the metadata.\n\n '
(frame_infos, meta_data) = self._get_frame_infos(id_)
slice_element = (slice_ if (slice_ is not None) else slice(0, len(frame_infos)))
def extract_frame(frame_info):
self.fp.seek(frame_info.loc)
record = self.fp.read(frame_info.length)
img_str = record[:(len(record) - frame_info.pad)]
img = self.jpeg_decoder(img_str)
return img
if isinstance(slice_element, (list, np.ndarray)):
selected_frame_infos = [frame_infos[idx] for idx in slice_element]
else:
selected_frame_infos = frame_infos[slice_element]
frames = [extract_frame(frame_info) for frame_info in selected_frame_infos]
return (frames, meta_data) | -7,096,632,636,170,711,000 | Read frames for a single item.
Parameters
----------
id_: str
The ID of the item
slice_: slice or list of ints:
A slice or list of indices with which to select frames.
Returns
-------
frames (int), meta(dict)
The frames of the item as a list of numpy arrays consisting of
image pixel values. And the metadata. | src/gulpio2/fileio.py | read_frames | kiyoon/GulpIO2 | python | def read_frames(self, id_, slice_=None):
' Read frames for a single item.\n\n Parameters\n ----------\n id_: str\n The ID of the item\n slice_: slice or list of ints:\n A slice or list of indices with which to select frames.\n\n Returns\n -------\n frames (int), meta(dict)\n The frames of the item as a list of numpy arrays consisting of\n image pixel values. And the metadata.\n\n '
(frame_infos, meta_data) = self._get_frame_infos(id_)
slice_element = (slice_ if (slice_ is not None) else slice(0, len(frame_infos)))
def extract_frame(frame_info):
self.fp.seek(frame_info.loc)
record = self.fp.read(frame_info.length)
img_str = record[:(len(record) - frame_info.pad)]
img = self.jpeg_decoder(img_str)
return img
if isinstance(slice_element, (list, np.ndarray)):
selected_frame_infos = [frame_infos[idx] for idx in slice_element]
else:
selected_frame_infos = frame_infos[slice_element]
frames = [extract_frame(frame_info) for frame_info in selected_frame_infos]
return (frames, meta_data) |
def iter_all(self, accepted_ids=None, shuffle=False):
' Iterate over all frames in the gulp.\n\n Parameters\n ----------\n accepted_ids: list of str\n A filter for accepted ids.\n shuffle: bool\n Shuffle the items or not.\n\n Returns\n -------\n iterator\n An iterator that yield a series of frames,meta tuples. See\n `read_frames` for details.\n '
ids = self.meta_dict.keys()
if (accepted_ids is not None):
intersection = list((set(ids) & set(accepted_ids)))
ids = [id_ for id_ in ids if (id_ in intersection)]
if shuffle:
ids = list(ids)
np.random.shuffle(ids)
with self.open('rb'):
for id_ in ids:
(frames, meta) = self.read_frames(id_)
(yield (frames, meta)) | 3,772,652,658,498,743,000 | Iterate over all frames in the gulp.
Parameters
----------
accepted_ids: list of str
A filter for accepted ids.
shuffle: bool
Shuffle the items or not.
Returns
-------
iterator
An iterator that yield a series of frames,meta tuples. See
`read_frames` for details. | src/gulpio2/fileio.py | iter_all | kiyoon/GulpIO2 | python | def iter_all(self, accepted_ids=None, shuffle=False):
' Iterate over all frames in the gulp.\n\n Parameters\n ----------\n accepted_ids: list of str\n A filter for accepted ids.\n shuffle: bool\n Shuffle the items or not.\n\n Returns\n -------\n iterator\n An iterator that yield a series of frames,meta tuples. See\n `read_frames` for details.\n '
ids = self.meta_dict.keys()
if (accepted_ids is not None):
intersection = list((set(ids) & set(accepted_ids)))
ids = [id_ for id_ in ids if (id_ in intersection)]
if shuffle:
ids = list(ids)
np.random.shuffle(ids)
with self.open('rb'):
for id_ in ids:
(frames, meta) = self.read_frames(id_)
(yield (frames, meta)) |
def write_chunk(self, output_chunk, input_slice):
'Write from an input slice in the adapter to an output chunk.\n\n Parameters\n ----------\n output_chunk: GulpChunk\n The chunk to write to\n input_slice: slice\n The slice to use from the adapter.\n\n '
with output_chunk.open('wb'):
for video in self.adapter.iter_data(input_slice):
id_ = video['id']
meta_data = video['meta']
frames = video['frames']
if (len(frames) > 0):
output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality())
else:
print('Failed to write video with id: {}; no frames'.format(id_)) | -3,292,202,568,721,897,000 | Write from an input slice in the adapter to an output chunk.
Parameters
----------
output_chunk: GulpChunk
The chunk to write to
input_slice: slice
The slice to use from the adapter. | src/gulpio2/fileio.py | write_chunk | kiyoon/GulpIO2 | python | def write_chunk(self, output_chunk, input_slice):
'Write from an input slice in the adapter to an output chunk.\n\n Parameters\n ----------\n output_chunk: GulpChunk\n The chunk to write to\n input_slice: slice\n The slice to use from the adapter.\n\n '
with output_chunk.open('wb'):
for video in self.adapter.iter_data(input_slice):
id_ = video['id']
meta_data = video['meta']
frames = video['frames']
if (len(frames) > 0):
output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality())
else:
print('Failed to write video with id: {}; no frames'.format(id_)) |
def __init__(self, gansynth_subset=False, estimate_f0_and_loudness=False, **kwargs):
'Constructs a NsynthConfig.\n\n Args:\n gansynth_subset: bool, whether to use the subset of the dataset introduced\n in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses\n acoustic-only instrument sources and limits the pitches to the interval\n [24, 84]. The train and test splits are also modified so that\n instruments (but not specific notes) overlap between them. See\n https://arxiv.org/abs/1902.08710 for more details.\n estimate_f0_and_loudness: bool, whether to estimate fundamental frequency\n (F0) and loudness for the audio (at 250 Hz) and add them to the set of\n features.\n **kwargs: keyword arguments forwarded to super.\n '
name_parts = []
if gansynth_subset:
name_parts.append('gansynth_subset')
else:
name_parts.append('full')
if estimate_f0_and_loudness:
name_parts.append('f0_and_loudness')
super(NsynthConfig, self).__init__(name='.'.join(name_parts), version=tfds.core.Version('1.1.0', experiments={tfds.core.Experiment.S3: False}), **kwargs)
self.gansynth_subset = gansynth_subset
self.estimate_f0_and_loudness = estimate_f0_and_loudness | -1,277,827,415,216,770,300 | Constructs a NsynthConfig.
Args:
gansynth_subset: bool, whether to use the subset of the dataset introduced
in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses
acoustic-only instrument sources and limits the pitches to the interval
[24, 84]. The train and test splits are also modified so that
instruments (but not specific notes) overlap between them. See
https://arxiv.org/abs/1902.08710 for more details.
estimate_f0_and_loudness: bool, whether to estimate fundamental frequency
(F0) and loudness for the audio (at 250 Hz) and add them to the set of
features.
**kwargs: keyword arguments forwarded to super. | tensorflow_datasets/audio/nsynth.py | __init__ | Alex-Fabbri/datasets | python | def __init__(self, gansynth_subset=False, estimate_f0_and_loudness=False, **kwargs):
'Constructs a NsynthConfig.\n\n Args:\n gansynth_subset: bool, whether to use the subset of the dataset introduced\n in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses\n acoustic-only instrument sources and limits the pitches to the interval\n [24, 84]. The train and test splits are also modified so that\n instruments (but not specific notes) overlap between them. See\n https://arxiv.org/abs/1902.08710 for more details.\n estimate_f0_and_loudness: bool, whether to estimate fundamental frequency\n (F0) and loudness for the audio (at 250 Hz) and add them to the set of\n features.\n **kwargs: keyword arguments forwarded to super.\n '
name_parts = []
if gansynth_subset:
name_parts.append('gansynth_subset')
else:
name_parts.append('full')
if estimate_f0_and_loudness:
name_parts.append('f0_and_loudness')
super(NsynthConfig, self).__init__(name='.'.join(name_parts), version=tfds.core.Version('1.1.0', experiments={tfds.core.Experiment.S3: False}), **kwargs)
self.gansynth_subset = gansynth_subset
self.estimate_f0_and_loudness = estimate_f0_and_loudness |
def _split_generators(self, dl_manager):
'Returns splits.'
dl_urls = {}
dl_urls['examples'] = {split: (_BASE_DOWNLOAD_PATH + ('%s.tfrecord.tar' % split)) for split in _SPLITS}
dl_urls['instrument_labels'] = (_BASE_DOWNLOAD_PATH + 'instrument_labels.txt')
if self.builder_config.gansynth_subset:
dl_urls['gansynth_splits'] = (_BASE_DOWNLOAD_PATH + 'gansynth_splits.csv')
dl_paths = dl_manager.download_and_extract(dl_urls)
with tf.io.gfile.GFile(dl_paths['instrument_labels']) as f:
instrument_labels = f.read().strip().splitlines()
self.info.features['instrument']['label'].names = instrument_labels
split_ids = {s: set() for s in _SPLITS}
split_dirs = {s: [dl_paths['examples'][s]] for s in _SPLITS}
if self.builder_config.gansynth_subset:
split_dirs = {s: dl_paths['examples'].values() for s in _SPLITS}
with tf.io.gfile.GFile(dl_paths['gansynth_splits']) as f:
reader = csv.DictReader(f)
for row in reader:
split_ids[row['split']].add(row['id'])
return [tfds.core.SplitGenerator(name=split, num_shards=_SPLIT_SHARDS[split], gen_kwargs={'tfrecord_dirs': split_dirs[split], 'ids': split_ids[split], 'split': split}) for split in _SPLITS] | 2,251,434,939,515,742,500 | Returns splits. | tensorflow_datasets/audio/nsynth.py | _split_generators | Alex-Fabbri/datasets | python | def _split_generators(self, dl_manager):
dl_urls = {}
dl_urls['examples'] = {split: (_BASE_DOWNLOAD_PATH + ('%s.tfrecord.tar' % split)) for split in _SPLITS}
dl_urls['instrument_labels'] = (_BASE_DOWNLOAD_PATH + 'instrument_labels.txt')
if self.builder_config.gansynth_subset:
dl_urls['gansynth_splits'] = (_BASE_DOWNLOAD_PATH + 'gansynth_splits.csv')
dl_paths = dl_manager.download_and_extract(dl_urls)
with tf.io.gfile.GFile(dl_paths['instrument_labels']) as f:
instrument_labels = f.read().strip().splitlines()
self.info.features['instrument']['label'].names = instrument_labels
split_ids = {s: set() for s in _SPLITS}
split_dirs = {s: [dl_paths['examples'][s]] for s in _SPLITS}
if self.builder_config.gansynth_subset:
split_dirs = {s: dl_paths['examples'].values() for s in _SPLITS}
with tf.io.gfile.GFile(dl_paths['gansynth_splits']) as f:
reader = csv.DictReader(f)
for row in reader:
split_ids[row['split']].add(row['id'])
return [tfds.core.SplitGenerator(name=split, num_shards=_SPLIT_SHARDS[split], gen_kwargs={'tfrecord_dirs': split_dirs[split], 'ids': split_ids[split], 'split': split}) for split in _SPLITS] |
def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):
'Build PCollection of examples for split.'
beam = tfds.core.lazy_imports.apache_beam
def _emit_base_example(ex):
'Maps an input example to a TFDS example.'
beam.metrics.Metrics.counter(split, 'base-examples').inc()
features = ex.features.feature
return {'id': features['note_str'].bytes_list.value[0], 'audio': np.array(features['audio'].float_list.value, dtype=np.float32), 'pitch': features['pitch'].int64_list.value[0], 'velocity': features['velocity'].int64_list.value[0], 'instrument': {'label': tf.compat.as_text(features['instrument_str'].bytes_list.value[0]), 'family': tf.compat.as_text(features['instrument_family_str'].bytes_list.value[0]), 'source': tf.compat.as_text(features['instrument_source_str'].bytes_list.value[0])}, 'qualities': {q: features['qualities'].int64_list.value[i] for (i, q) in enumerate(_QUALITIES)}}
def _in_split(ex, split_ids):
if ((not split_ids) or (tf.compat.as_text(ex['id']) in split_ids)):
beam.metrics.Metrics.counter(split, 'in-split').inc()
return True
return False
def _estimate_f0(ex):
'Estimate the fundamental frequency using CREPE and add to example.'
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'estimate-f0').inc()
(_, f0_hz, f0_confidence, _) = tfds.core.lazy_imports.crepe.predict(ex['audio'], sr=_AUDIO_RATE, viterbi=True, step_size=(1000 / _F0_AND_LOUDNESS_RATE), verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
f0_midi[(f0_midi == (- np.inf))] = 0
f0_confidence = np.nan_to_num(f0_confidence)
ex['f0'] = {'hz': f0_hz.astype(np.float32), 'midi': f0_midi.astype(np.float32), 'confidence': f0_confidence.astype(np.float32)}
return ex
def _compute_loudness(ex):
'Compute loudness and add to example.'
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'compute-loudness').inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(ex['audio'], n_fft=n_fft, hop_length=int((_AUDIO_RATE // _F0_AND_LOUDNESS_RATE)))
loudness_db = librosa.perceptual_weighting((np.abs(stft) ** 2), librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft), amin=amin, top_db=top_db)
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(mean_loudness_amp, amin=amin, top_db=top_db)
ex['loudness'] = {'db': mean_loudness_db.astype(np.float32)}
return ex
examples = ((((pipeline | beam.Create([os.path.join(dir_, '*') for dir_ in tfrecord_dirs])) | beam.io.tfrecordio.ReadAllFromTFRecord(coder=beam.coders.ProtoCoder(tf.train.Example))) | beam.Map(_emit_base_example)) | beam.Filter(_in_split, split_ids=ids))
if self.builder_config.estimate_f0_and_loudness:
examples = (((examples | beam.Reshuffle()) | beam.Map(_estimate_f0)) | beam.Map(_compute_loudness))
if (split == tfds.Split.TRAIN):
loudness = (examples | beam.Map((lambda x: np.mean(x['loudness']['db']))))
loudness_mean = (loudness | ('loudness_mean' >> beam.combiners.Mean.Globally()))
loudness_variance = ((loudness | beam.Map((lambda ld, ld_mean: ((ld - ld_mean) ** 2)), ld_mean=beam.pvalue.AsSingleton(loudness_mean))) | ('loudness_variance' >> beam.combiners.Mean.Globally()))
self.info.metadata['loudness_db_mean'] = loudness_mean
self.info.metadata['loudness_db_variance'] = loudness_variance
return examples | 1,458,585,263,117,671,200 | Build PCollection of examples for split. | tensorflow_datasets/audio/nsynth.py | _build_pcollection | Alex-Fabbri/datasets | python | def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):
beam = tfds.core.lazy_imports.apache_beam
def _emit_base_example(ex):
'Maps an input example to a TFDS example.'
beam.metrics.Metrics.counter(split, 'base-examples').inc()
features = ex.features.feature
return {'id': features['note_str'].bytes_list.value[0], 'audio': np.array(features['audio'].float_list.value, dtype=np.float32), 'pitch': features['pitch'].int64_list.value[0], 'velocity': features['velocity'].int64_list.value[0], 'instrument': {'label': tf.compat.as_text(features['instrument_str'].bytes_list.value[0]), 'family': tf.compat.as_text(features['instrument_family_str'].bytes_list.value[0]), 'source': tf.compat.as_text(features['instrument_source_str'].bytes_list.value[0])}, 'qualities': {q: features['qualities'].int64_list.value[i] for (i, q) in enumerate(_QUALITIES)}}
def _in_split(ex, split_ids):
if ((not split_ids) or (tf.compat.as_text(ex['id']) in split_ids)):
beam.metrics.Metrics.counter(split, 'in-split').inc()
return True
return False
def _estimate_f0(ex):
'Estimate the fundamental frequency using CREPE and add to example.'
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'estimate-f0').inc()
(_, f0_hz, f0_confidence, _) = tfds.core.lazy_imports.crepe.predict(ex['audio'], sr=_AUDIO_RATE, viterbi=True, step_size=(1000 / _F0_AND_LOUDNESS_RATE), verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
f0_midi[(f0_midi == (- np.inf))] = 0
f0_confidence = np.nan_to_num(f0_confidence)
ex['f0'] = {'hz': f0_hz.astype(np.float32), 'midi': f0_midi.astype(np.float32), 'confidence': f0_confidence.astype(np.float32)}
return ex
def _compute_loudness(ex):
'Compute loudness and add to example.'
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'compute-loudness').inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(ex['audio'], n_fft=n_fft, hop_length=int((_AUDIO_RATE // _F0_AND_LOUDNESS_RATE)))
loudness_db = librosa.perceptual_weighting((np.abs(stft) ** 2), librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft), amin=amin, top_db=top_db)
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(mean_loudness_amp, amin=amin, top_db=top_db)
ex['loudness'] = {'db': mean_loudness_db.astype(np.float32)}
return ex
examples = ((((pipeline | beam.Create([os.path.join(dir_, '*') for dir_ in tfrecord_dirs])) | beam.io.tfrecordio.ReadAllFromTFRecord(coder=beam.coders.ProtoCoder(tf.train.Example))) | beam.Map(_emit_base_example)) | beam.Filter(_in_split, split_ids=ids))
if self.builder_config.estimate_f0_and_loudness:
examples = (((examples | beam.Reshuffle()) | beam.Map(_estimate_f0)) | beam.Map(_compute_loudness))
if (split == tfds.Split.TRAIN):
loudness = (examples | beam.Map((lambda x: np.mean(x['loudness']['db']))))
loudness_mean = (loudness | ('loudness_mean' >> beam.combiners.Mean.Globally()))
loudness_variance = ((loudness | beam.Map((lambda ld, ld_mean: ((ld - ld_mean) ** 2)), ld_mean=beam.pvalue.AsSingleton(loudness_mean))) | ('loudness_variance' >> beam.combiners.Mean.Globally()))
self.info.metadata['loudness_db_mean'] = loudness_mean
self.info.metadata['loudness_db_variance'] = loudness_variance
return examples |
def _emit_base_example(ex):
'Maps an input example to a TFDS example.'
beam.metrics.Metrics.counter(split, 'base-examples').inc()
features = ex.features.feature
return {'id': features['note_str'].bytes_list.value[0], 'audio': np.array(features['audio'].float_list.value, dtype=np.float32), 'pitch': features['pitch'].int64_list.value[0], 'velocity': features['velocity'].int64_list.value[0], 'instrument': {'label': tf.compat.as_text(features['instrument_str'].bytes_list.value[0]), 'family': tf.compat.as_text(features['instrument_family_str'].bytes_list.value[0]), 'source': tf.compat.as_text(features['instrument_source_str'].bytes_list.value[0])}, 'qualities': {q: features['qualities'].int64_list.value[i] for (i, q) in enumerate(_QUALITIES)}} | 4,464,020,363,926,044,700 | Maps an input example to a TFDS example. | tensorflow_datasets/audio/nsynth.py | _emit_base_example | Alex-Fabbri/datasets | python | def _emit_base_example(ex):
beam.metrics.Metrics.counter(split, 'base-examples').inc()
features = ex.features.feature
return {'id': features['note_str'].bytes_list.value[0], 'audio': np.array(features['audio'].float_list.value, dtype=np.float32), 'pitch': features['pitch'].int64_list.value[0], 'velocity': features['velocity'].int64_list.value[0], 'instrument': {'label': tf.compat.as_text(features['instrument_str'].bytes_list.value[0]), 'family': tf.compat.as_text(features['instrument_family_str'].bytes_list.value[0]), 'source': tf.compat.as_text(features['instrument_source_str'].bytes_list.value[0])}, 'qualities': {q: features['qualities'].int64_list.value[i] for (i, q) in enumerate(_QUALITIES)}} |
def _estimate_f0(ex):
'Estimate the fundamental frequency using CREPE and add to example.'
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'estimate-f0').inc()
(_, f0_hz, f0_confidence, _) = tfds.core.lazy_imports.crepe.predict(ex['audio'], sr=_AUDIO_RATE, viterbi=True, step_size=(1000 / _F0_AND_LOUDNESS_RATE), verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
f0_midi[(f0_midi == (- np.inf))] = 0
f0_confidence = np.nan_to_num(f0_confidence)
ex['f0'] = {'hz': f0_hz.astype(np.float32), 'midi': f0_midi.astype(np.float32), 'confidence': f0_confidence.astype(np.float32)}
return ex | 6,520,584,630,848,258,000 | Estimate the fundamental frequency using CREPE and add to example. | tensorflow_datasets/audio/nsynth.py | _estimate_f0 | Alex-Fabbri/datasets | python | def _estimate_f0(ex):
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'estimate-f0').inc()
(_, f0_hz, f0_confidence, _) = tfds.core.lazy_imports.crepe.predict(ex['audio'], sr=_AUDIO_RATE, viterbi=True, step_size=(1000 / _F0_AND_LOUDNESS_RATE), verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
f0_midi[(f0_midi == (- np.inf))] = 0
f0_confidence = np.nan_to_num(f0_confidence)
ex['f0'] = {'hz': f0_hz.astype(np.float32), 'midi': f0_midi.astype(np.float32), 'confidence': f0_confidence.astype(np.float32)}
return ex |
def _compute_loudness(ex):
'Compute loudness and add to example.'
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'compute-loudness').inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(ex['audio'], n_fft=n_fft, hop_length=int((_AUDIO_RATE // _F0_AND_LOUDNESS_RATE)))
loudness_db = librosa.perceptual_weighting((np.abs(stft) ** 2), librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft), amin=amin, top_db=top_db)
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(mean_loudness_amp, amin=amin, top_db=top_db)
ex['loudness'] = {'db': mean_loudness_db.astype(np.float32)}
return ex | 8,235,905,811,359,947,000 | Compute loudness and add to example. | tensorflow_datasets/audio/nsynth.py | _compute_loudness | Alex-Fabbri/datasets | python | def _compute_loudness(ex):
ex = ex.copy()
beam.metrics.Metrics.counter(split, 'compute-loudness').inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(ex['audio'], n_fft=n_fft, hop_length=int((_AUDIO_RATE // _F0_AND_LOUDNESS_RATE)))
loudness_db = librosa.perceptual_weighting((np.abs(stft) ** 2), librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft), amin=amin, top_db=top_db)
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(mean_loudness_amp, amin=amin, top_db=top_db)
ex['loudness'] = {'db': mean_loudness_db.astype(np.float32)}
return ex |
def validate_k8s_version(namespace):
'Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server\n to use its default version.'
if namespace.kubernetes_version:
k8s_release_regex = re.compile('^[v|V]?(\\d+\\.\\d+\\.\\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, such as "1.7.12" or "1.8.7"') | 4,560,851,881,821,565,000 | Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version. | src/aks-preview/azext_aks_preview/_validators.py | validate_k8s_version | andyzhangx/azure-cli-extensions | python | def validate_k8s_version(namespace):
'Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server\n to use its default version.'
if namespace.kubernetes_version:
k8s_release_regex = re.compile('^[v|V]?(\\d+\\.\\d+\\.\\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, such as "1.7.12" or "1.8.7"') |
def validate_linux_host_name(namespace):
"Validates a string as a legal host name component.\n\n This validation will also occur server-side in the ARM API, but that may take\n a minute or two before the user sees it. So it's more user-friendly to validate\n in the CLI pre-flight.\n "
rfc1123_regex = re.compile('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$')
found = rfc1123_regex.findall(namespace.name)
if (not found):
raise CLIError('--name cannot exceed 63 characters and can only contain letters, numbers, or dashes (-).') | 9,058,301,189,045,302,000 | Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight. | src/aks-preview/azext_aks_preview/_validators.py | validate_linux_host_name | andyzhangx/azure-cli-extensions | python | def validate_linux_host_name(namespace):
"Validates a string as a legal host name component.\n\n This validation will also occur server-side in the ARM API, but that may take\n a minute or two before the user sees it. So it's more user-friendly to validate\n in the CLI pre-flight.\n "
rfc1123_regex = re.compile('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$')
found = rfc1123_regex.findall(namespace.name)
if (not found):
raise CLIError('--name cannot exceed 63 characters and can only contain letters, numbers, or dashes (-).') |
def validate_max_pods(namespace):
'Validates that max_pods is set to a reasonable minimum number.'
minimum_pods_required = ceil(((((namespace.node_count * 2) + 6) + 1) / namespace.node_count))
if ((namespace.max_pods != 0) and (namespace.max_pods < minimum_pods_required)):
raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.'.format(minimum_pods_required)) | 7,976,692,459,901,999,000 | Validates that max_pods is set to a reasonable minimum number. | src/aks-preview/azext_aks_preview/_validators.py | validate_max_pods | andyzhangx/azure-cli-extensions | python | def validate_max_pods(namespace):
minimum_pods_required = ceil(((((namespace.node_count * 2) + 6) + 1) / namespace.node_count))
if ((namespace.max_pods != 0) and (namespace.max_pods < minimum_pods_required)):
raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.'.format(minimum_pods_required)) |
def validate_nodes_count(namespace):
'Validate that min_count and max_count is set to 1-100'
if (namespace.min_count is not None):
if ((namespace.min_count < 1) or (namespace.min_count > 100)):
raise CLIError('--min-count must be in the range [1,100]')
if (namespace.max_count is not None):
if ((namespace.max_count < 1) or (namespace.max_count > 100)):
raise CLIError('--max-count must be in the range [1,100]') | 4,935,573,627,725,259,000 | Validate that min_count and max_count is set to 1-100 | src/aks-preview/azext_aks_preview/_validators.py | validate_nodes_count | andyzhangx/azure-cli-extensions | python | def validate_nodes_count(namespace):
if (namespace.min_count is not None):
if ((namespace.min_count < 1) or (namespace.min_count > 100)):
raise CLIError('--min-count must be in the range [1,100]')
if (namespace.max_count is not None):
if ((namespace.max_count < 1) or (namespace.max_count > 100)):
raise CLIError('--max-count must be in the range [1,100]') |
def validate_nodepool_name(namespace):
'Validates a nodepool name to be at most 12 characters, alphanumeric only.'
if (namespace.nodepool_name != ''):
if (len(namespace.nodepool_name) > 12):
raise CLIError('--nodepool-name can contain atmost 12 characters')
if (not namespace.nodepool_name.isalnum()):
raise CLIError('--nodepool-name should only contain alphanumeric characters') | -4,239,464,942,452,206,600 | Validates a nodepool name to be at most 12 characters, alphanumeric only. | src/aks-preview/azext_aks_preview/_validators.py | validate_nodepool_name | andyzhangx/azure-cli-extensions | python | def validate_nodepool_name(namespace):
if (namespace.nodepool_name != ):
if (len(namespace.nodepool_name) > 12):
raise CLIError('--nodepool-name can contain atmost 12 characters')
if (not namespace.nodepool_name.isalnum()):
raise CLIError('--nodepool-name should only contain alphanumeric characters') |
def validate_vm_set_type(namespace):
'Validates the vm set type string.'
if (namespace.vm_set_type is not None):
if (namespace.vm_set_type == ''):
return
if ((namespace.vm_set_type.lower() != 'availabilityset') and (namespace.vm_set_type.lower() != 'virtualmachinescalesets')):
raise CLIError('--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet') | 4,283,648,957,399,007,000 | Validates the vm set type string. | src/aks-preview/azext_aks_preview/_validators.py | validate_vm_set_type | andyzhangx/azure-cli-extensions | python | def validate_vm_set_type(namespace):
if (namespace.vm_set_type is not None):
if (namespace.vm_set_type == ):
return
if ((namespace.vm_set_type.lower() != 'availabilityset') and (namespace.vm_set_type.lower() != 'virtualmachinescalesets')):
raise CLIError('--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet') |
def validate_load_balancer_sku(namespace):
'Validates the load balancer sku string.'
if (namespace.load_balancer_sku is not None):
if (namespace.load_balancer_sku == ''):
return
if ((namespace.load_balancer_sku.lower() != 'basic') and (namespace.load_balancer_sku.lower() != 'standard')):
raise CLIError('--load-balancer-sku can only be standard or basic') | 214,954,928,562,401,760 | Validates the load balancer sku string. | src/aks-preview/azext_aks_preview/_validators.py | validate_load_balancer_sku | andyzhangx/azure-cli-extensions | python | def validate_load_balancer_sku(namespace):
if (namespace.load_balancer_sku is not None):
if (namespace.load_balancer_sku == ):
return
if ((namespace.load_balancer_sku.lower() != 'basic') and (namespace.load_balancer_sku.lower() != 'standard')):
raise CLIError('--load-balancer-sku can only be standard or basic') |
def validate_load_balancer_outbound_ips(namespace):
'validate load balancer profile outbound IP ids'
if (namespace.load_balancer_outbound_ips is not None):
ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')]
if (not all(ip_id_list)):
raise CLIError('--load-balancer-outbound-ips cannot contain whitespace') | 5,513,461,929,437,336,000 | validate load balancer profile outbound IP ids | src/aks-preview/azext_aks_preview/_validators.py | validate_load_balancer_outbound_ips | andyzhangx/azure-cli-extensions | python | def validate_load_balancer_outbound_ips(namespace):
if (namespace.load_balancer_outbound_ips is not None):
ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')]
if (not all(ip_id_list)):
raise CLIError('--load-balancer-outbound-ips cannot contain whitespace') |
def validate_load_balancer_outbound_ip_prefixes(namespace):
'validate load balancer profile outbound IP prefix ids'
if (namespace.load_balancer_outbound_ip_prefixes is not None):
ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
if (not all(ip_prefix_id_list)):
raise CLIError('--load-balancer-outbound-ip-prefixes cannot contain whitespace') | 1,773,393,025,881,589,000 | validate load balancer profile outbound IP prefix ids | src/aks-preview/azext_aks_preview/_validators.py | validate_load_balancer_outbound_ip_prefixes | andyzhangx/azure-cli-extensions | python | def validate_load_balancer_outbound_ip_prefixes(namespace):
if (namespace.load_balancer_outbound_ip_prefixes is not None):
ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
if (not all(ip_prefix_id_list)):
raise CLIError('--load-balancer-outbound-ip-prefixes cannot contain whitespace') |
def validate_taints(namespace):
'Validates that provided taint is a valid format'
regex = re.compile('^[a-zA-Z\\d][\\w\\-\\.\\/]{0,252}=[a-zA-Z\\d][\\w\\-\\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$')
if ((namespace.node_taints is not None) and (namespace.node_taints != '')):
for taint in namespace.node_taints.split(','):
if (taint == ''):
continue
found = regex.findall(taint)
if (not found):
raise CLIError(('Invalid node taint: %s' % taint)) | 3,300,243,401,317,772,000 | Validates that provided taint is a valid format | src/aks-preview/azext_aks_preview/_validators.py | validate_taints | andyzhangx/azure-cli-extensions | python | def validate_taints(namespace):
regex = re.compile('^[a-zA-Z\\d][\\w\\-\\.\\/]{0,252}=[a-zA-Z\\d][\\w\\-\\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$')
if ((namespace.node_taints is not None) and (namespace.node_taints != )):
for taint in namespace.node_taints.split(','):
if (taint == ):
continue
found = regex.findall(taint)
if (not found):
raise CLIError(('Invalid node taint: %s' % taint)) |
def validate_priority(namespace):
'Validates the node pool priority string.'
if (namespace.priority is not None):
if (namespace.priority == ''):
return
if ((namespace.priority != 'Low') and (namespace.priority != 'Regular')):
raise CLIError('--priority can only be Low or Regular') | 7,004,494,543,943,205,000 | Validates the node pool priority string. | src/aks-preview/azext_aks_preview/_validators.py | validate_priority | andyzhangx/azure-cli-extensions | python | def validate_priority(namespace):
if (namespace.priority is not None):
if (namespace.priority == ):
return
if ((namespace.priority != 'Low') and (namespace.priority != 'Regular')):
raise CLIError('--priority can only be Low or Regular') |
def validate_eviction_policy(namespace):
'Validates the node pool priority string.'
if (namespace.eviction_policy is not None):
if (namespace.eviction_policy == ''):
return
if ((namespace.eviction_policy != 'Delete') and (namespace.eviction_policy != 'Deallocate')):
raise CLIError('--eviction-policy can only be Delete or Deallocate') | -8,553,922,939,024,618,000 | Validates the node pool priority string. | src/aks-preview/azext_aks_preview/_validators.py | validate_eviction_policy | andyzhangx/azure-cli-extensions | python | def validate_eviction_policy(namespace):
if (namespace.eviction_policy is not None):
if (namespace.eviction_policy == ):
return
if ((namespace.eviction_policy != 'Delete') and (namespace.eviction_policy != 'Deallocate')):
raise CLIError('--eviction-policy can only be Delete or Deallocate') |
def _num_frame_valid(nsp_src, nsp_win, len_hop):
"Computes the number of frames with 'valid' setting"
return ((nsp_src - (nsp_win - len_hop)) // len_hop) | -5,105,235,960,667,779,000 | Computes the number of frames with 'valid' setting | tests/test_time_frequency.py | _num_frame_valid | Path-A/kapre | python | def _num_frame_valid(nsp_src, nsp_win, len_hop):
return ((nsp_src - (nsp_win - len_hop)) // len_hop) |
def _num_frame_same(nsp_src, len_hop):
"Computes the number of frames with 'same' setting"
return int(np.ceil((float(nsp_src) / len_hop))) | -5,171,118,155,236,767,000 | Computes the number of frames with 'same' setting | tests/test_time_frequency.py | _num_frame_same | Path-A/kapre | python | def _num_frame_same(nsp_src, len_hop):
return int(np.ceil((float(nsp_src) / len_hop))) |
def allclose_phase(a, b, atol=0.001):
'Testing phase.\n Remember that a small error in complex value may lead to a large phase difference\n if the norm is very small.\n\n Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.\n\n '
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol) | 6,064,070,407,929,880,000 | Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase. | tests/test_time_frequency.py | allclose_phase | Path-A/kapre | python | def allclose_phase(a, b, atol=0.001):
'Testing phase.\n Remember that a small error in complex value may lead to a large phase difference\n if the norm is very small.\n\n Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.\n\n '
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol) |
@pytest.mark.parametrize('n_fft', [512])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [2])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('amin', [1e-05, 0.001])
@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])
@pytest.mark.parametrize('n_mels', [40])
@pytest.mark.parametrize('mel_f_min', [0.0])
@pytest.mark.parametrize('mel_f_max', [8000])
def test_melspectrogram_correctness(n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max):
'Test the correctness of melspectrogram.\n\n Note that mel filterbank is tested separated\n\n '
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
melgram_model = get_melspectrogram_layer(n_fft=n_fft, sample_rate=sr, n_mels=n_mels, mel_f_min=mel_f_min, mel_f_max=mel_f_max, win_length=win_length, hop_length=hop_length, input_data_format=data_format, output_data_format=data_format, return_decibel=return_decibel, input_shape=input_shape, db_amin=amin, db_dynamic_range=dynamic_range)
return melgram_model
(src_mono, batch_src, input_shape) = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft
S_ref = librosa.feature.melspectrogram(src_mono, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, power=1.0, n_mels=n_mels, fmin=mel_f_min, fmax=mel_f_max).T
S_ref = np.expand_dims(S_ref, axis=2)
S_ref = np.tile(S_ref, [1, 1, n_ch])
if (data_format == 'channels_first'):
S_ref = np.transpose(S_ref, (2, 0, 1))
melgram_model = _get_melgram_model(return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0)
S = melgram_model.predict(batch_src)[0]
np.testing.assert_allclose(S_ref, S, atol=0.0001)
melgram_model = _get_melgram_model(return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range)
S = melgram_model.predict(batch_src)[0]
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(S_ref_db, S, rtol=0.003) | 9,109,858,188,567,050,000 | Test the correctness of melspectrogram.
Note that mel filterbank is tested separated | tests/test_time_frequency.py | test_melspectrogram_correctness | Path-A/kapre | python | @pytest.mark.parametrize('n_fft', [512])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [2])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('amin', [1e-05, 0.001])
@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])
@pytest.mark.parametrize('n_mels', [40])
@pytest.mark.parametrize('mel_f_min', [0.0])
@pytest.mark.parametrize('mel_f_max', [8000])
def test_melspectrogram_correctness(n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max):
'Test the correctness of melspectrogram.\n\n Note that mel filterbank is tested separated\n\n '
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
melgram_model = get_melspectrogram_layer(n_fft=n_fft, sample_rate=sr, n_mels=n_mels, mel_f_min=mel_f_min, mel_f_max=mel_f_max, win_length=win_length, hop_length=hop_length, input_data_format=data_format, output_data_format=data_format, return_decibel=return_decibel, input_shape=input_shape, db_amin=amin, db_dynamic_range=dynamic_range)
return melgram_model
(src_mono, batch_src, input_shape) = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft
S_ref = librosa.feature.melspectrogram(src_mono, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, power=1.0, n_mels=n_mels, fmin=mel_f_min, fmax=mel_f_max).T
S_ref = np.expand_dims(S_ref, axis=2)
S_ref = np.tile(S_ref, [1, 1, n_ch])
if (data_format == 'channels_first'):
S_ref = np.transpose(S_ref, (2, 0, 1))
melgram_model = _get_melgram_model(return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0)
S = melgram_model.predict(batch_src)[0]
np.testing.assert_allclose(S_ref, S, atol=0.0001)
melgram_model = _get_melgram_model(return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range)
S = melgram_model.predict(batch_src)[0]
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(S_ref_db, S, rtol=0.003) |
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_log_spectrogram_runnable(data_format):
'test if log spectrogram layer works well'
(src_mono, batch_src, input_shape) = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False) | 2,736,683,118,145,581,600 | test if log spectrogram layer works well | tests/test_time_frequency.py | test_log_spectrogram_runnable | Path-A/kapre | python | @pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_log_spectrogram_runnable(data_format):
(src_mono, batch_src, input_shape) = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False) |
@pytest.mark.xfail
def test_log_spectrogram_fail():
'test if log spectrogram layer works well'
(src_mono, batch_src, input_shape) = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200) | -2,335,011,064,550,476,300 | test if log spectrogram layer works well | tests/test_time_frequency.py | test_log_spectrogram_fail | Path-A/kapre | python | @pytest.mark.xfail
def test_log_spectrogram_fail():
(src_mono, batch_src, input_shape) = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200) |
def test_delta():
'test delta layer'
specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
specgrams = np.reshape(specgrams, (1, (- 1), 1, 1))
delta_model = tensorflow.keras.models.Sequential()
delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))
delta_kapre = delta_model(specgrams)
delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)
delta_ref = np.reshape(delta_ref, (1, (- 1), 1, 1))
np.testing.assert_allclose(delta_kapre, delta_ref) | -849,587,254,323,033,900 | test delta layer | tests/test_time_frequency.py | test_delta | Path-A/kapre | python | def test_delta():
specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
specgrams = np.reshape(specgrams, (1, (- 1), 1, 1))
delta_model = tensorflow.keras.models.Sequential()
delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))
delta_kapre = delta_model(specgrams)
delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)
delta_ref = np.reshape(delta_ref, (1, (- 1), 1, 1))
np.testing.assert_allclose(delta_kapre, delta_ref) |
def test_save_load():
'test saving/loading of models that has stft, melspectorgrma, and log frequency.'
(src_mono, batch_src, input_shape) = get_audio(data_format='channels_last', n_ch=1)
save_load_compare(STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers)
save_load_compare(get_melspectrogram_layer(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose)
save_load_compare(get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose)
save_load_compare(get_stft_mag_phase(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose)
save_load_compare(get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose) | -2,395,560,271,028,600,300 | test saving/loading of models that has stft, melspectorgrma, and log frequency. | tests/test_time_frequency.py | test_save_load | Path-A/kapre | python | def test_save_load():
(src_mono, batch_src, input_shape) = get_audio(data_format='channels_last', n_ch=1)
save_load_compare(STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers)
save_load_compare(get_melspectrogram_layer(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose)
save_load_compare(get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose)
save_load_compare(get_stft_mag_phase(input_shape=input_shape, return_decibel=True), batch_src, np.testing.assert_allclose)
save_load_compare(get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose) |
def round_to_memory_units(memory_bytes, round_up):
'Round bytes to the nearest memory unit.'
return from_memory_units(to_memory_units(memory_bytes, round_up)) | -2,739,755,447,883,276,300 | Round bytes to the nearest memory unit. | python/ray/ray_constants.py | round_to_memory_units | stephanie-wang/ray | python | def round_to_memory_units(memory_bytes, round_up):
return from_memory_units(to_memory_units(memory_bytes, round_up)) |
def from_memory_units(memory_units):
'Convert from memory units -> bytes.'
return (memory_units * MEMORY_RESOURCE_UNIT_BYTES) | -8,630,025,388,805,738,000 | Convert from memory units -> bytes. | python/ray/ray_constants.py | from_memory_units | stephanie-wang/ray | python | def from_memory_units(memory_units):
return (memory_units * MEMORY_RESOURCE_UNIT_BYTES) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.