body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def active_current(self): 'Get current active Amps.' if (not self.is_local_active): return properties = self.instantaneous_values() return float(properties['current'])
5,645,062,419,562,329,000
Get current active Amps.
homeassistant/components/smappee.py
active_current
Arshrock/home-assistant
python
def active_current(self): if (not self.is_local_active): return properties = self.instantaneous_values() return float(properties['current'])
def active_voltage(self): 'Get current active Voltage.' if (not self.is_local_active): return properties = self.instantaneous_values() return float(properties['voltage'])
1,126,977,129,704,298,000
Get current active Voltage.
homeassistant/components/smappee.py
active_voltage
Arshrock/home-assistant
python
def active_voltage(self): if (not self.is_local_active): return properties = self.instantaneous_values() return float(properties['voltage'])
def load_instantaneous(self): 'LoadInstantaneous.' if (not self.is_local_active): return try: return self._localsmappy.load_instantaneous() except RequestException as error: _LOGGER.error('Error getting data from Local Smappee unit. (%s)', error)
-1,047,353,706,137,381,600
LoadInstantaneous.
homeassistant/components/smappee.py
load_instantaneous
Arshrock/home-assistant
python
def load_instantaneous(self): if (not self.is_local_active): return try: return self._localsmappy.load_instantaneous() except RequestException as error: _LOGGER.error('Error getting data from Local Smappee unit. (%s)', error)
def __init__(self, key=None, value=None) -> None: 'Initializes the AVL Node.\n\n Args:\n data (dict, optional): {Key:Value} pair. Defaults to None.\n ' super().__init__() self.key = key self.value = value self.left = None self.right = None self.height = 1
5,150,536,766,491,454,000
Initializes the AVL Node. Args: data (dict, optional): {Key:Value} pair. Defaults to None.
avltree/AVLNode.py
__init__
gpk2000/avl-db
python
def __init__(self, key=None, value=None) -> None: 'Initializes the AVL Node.\n\n Args:\n data (dict, optional): {Key:Value} pair. Defaults to None.\n ' super().__init__() self.key = key self.value = value self.left = None self.right = None self.height = 1
def __str__(self) -> str: 'Prints single AVL Node to stdout\n\n Raises:\n NoNodeData: If no data is present in the node\n\n Returns:\n str: output string\n ' if self.key: out = 'data: {0}\nleft: {1}\nright: {2}\n'.format((self.key, self.value), self.left.__str__(), self.right.__str__()) return out raise NoNodeData
4,570,802,116,212,675,000
Prints single AVL Node to stdout Raises: NoNodeData: If no data is present in the node Returns: str: output string
avltree/AVLNode.py
__str__
gpk2000/avl-db
python
def __str__(self) -> str: 'Prints single AVL Node to stdout\n\n Raises:\n NoNodeData: If no data is present in the node\n\n Returns:\n str: output string\n ' if self.key: out = 'data: {0}\nleft: {1}\nright: {2}\n'.format((self.key, self.value), self.left.__str__(), self.right.__str__()) return out raise NoNodeData
def get_key(self) -> str: 'returns the key of the node\n\n Returns:\n str: the key in (key, value) pair\n ' return self.key
-7,853,967,984,499,772,000
returns the key of the node Returns: str: the key in (key, value) pair
avltree/AVLNode.py
get_key
gpk2000/avl-db
python
def get_key(self) -> str: 'returns the key of the node\n\n Returns:\n str: the key in (key, value) pair\n ' return self.key
def get_value(self) -> str: 'returns the value of the key\n\n Returns:\n str: the value in (key, value) pair\n ' return self.value
7,666,927,667,515,784,000
returns the value of the key Returns: str: the value in (key, value) pair
avltree/AVLNode.py
get_value
gpk2000/avl-db
python
def get_value(self) -> str: 'returns the value of the key\n\n Returns:\n str: the value in (key, value) pair\n ' return self.value
def densenet121(pretrained=False, **kwargs): 'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
-2,746,342,752,423,188,000
Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
cvlib/models/densenet.py
densenet121
AaronLeong/cvlib
python
def densenet121(pretrained=False, **kwargs): 'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
def densenet169(pretrained=False, **kwargs): 'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet169']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
960,222,106,457,515,500
Densenet-169 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
cvlib/models/densenet.py
densenet169
AaronLeong/cvlib
python
def densenet169(pretrained=False, **kwargs): 'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet169']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
def densenet201(pretrained=False, **kwargs): 'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet201']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
-1,951,926,253,490,703,400
Densenet-201 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
cvlib/models/densenet.py
densenet201
AaronLeong/cvlib
python
def densenet201(pretrained=False, **kwargs): 'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet201']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
def densenet161(pretrained=False, **kwargs): 'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet161']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
3,394,265,925,019,332,000
Densenet-161 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
cvlib/models/densenet.py
densenet161
AaronLeong/cvlib
python
def densenet161(pretrained=False, **kwargs): 'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ' model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), **kwargs) if pretrained: pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet161']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = (res.group(1) + res.group(2)) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) return model
def get_event_listener(): 'Return a new `Queue` object that will see all events.' queue = asyncio.Queue() _event_queues.append(queue) return queue
3,413,151,637,712,462,300
Return a new `Queue` object that will see all events.
pipekit/component.py
get_event_listener
DrDub/pipekit
python
def get_event_listener(): queue = asyncio.Queue() _event_queues.append(queue) return queue
def add_event_callback(event, callable, *args, **kwargs): 'Register a callback that will be called upon the given event.' _event_callbacks[event].append(partial(callable, *args, **kwargs))
5,979,074,628,122,418,000
Register a callback that will be called upon the given event.
pipekit/component.py
add_event_callback
DrDub/pipekit
python
def add_event_callback(event, callable, *args, **kwargs): _event_callbacks[event].append(partial(callable, *args, **kwargs))
def hasstatus(self, status): 'Return `True` if given status was set.' if isinstance(status, ComponentEvent): event = status.id elif (':' in status): event = status else: event = ComponentEvent(status, self).id return _events[event].is_set()
7,900,613,702,815,817,000
Return `True` if given status was set.
pipekit/component.py
hasstatus
DrDub/pipekit
python
def hasstatus(self, status): if isinstance(status, ComponentEvent): event = status.id elif (':' in status): event = status else: event = ComponentEvent(status, self).id return _events[event].is_set()
@property def running(self): 'Return `True` if in one of the running states.' if (not self.stopped): for status in ['started', 'running']: if self.hasstatus(status): return True
7,820,058,830,212,171,000
Return `True` if in one of the running states.
pipekit/component.py
running
DrDub/pipekit
python
@property def running(self): if (not self.stopped): for status in ['started', 'running']: if self.hasstatus(status): return True
@property def stopped(self): 'Return `True` if in one of the stopped states.' for status in ['aborted', 'finished']: if self.hasstatus(status): return True
2,776,393,392,516,385,000
Return `True` if in one of the stopped states.
pipekit/component.py
stopped
DrDub/pipekit
python
@property def stopped(self): for status in ['aborted', 'finished']: if self.hasstatus(status): return True
@property def aborted(self): 'Return `True` if the aborted event was emitted.' return self.hasstatus('aborted')
5,742,274,142,716,906,000
Return `True` if the aborted event was emitted.
pipekit/component.py
aborted
DrDub/pipekit
python
@property def aborted(self): return self.hasstatus('aborted')
def _log_formatted(self, msg, *args): "Return the msg prefixed with this component's ID and type." prefix = (f'{self.id} ' if self.id else '') msg = f'{prefix}({self.type}) {msg}' return ((msg,) + args)
-7,614,228,937,209,844,000
Return the msg prefixed with this component's ID and type.
pipekit/component.py
_log_formatted
DrDub/pipekit
python
def _log_formatted(self, msg, *args): prefix = (f'{self.id} ' if self.id else ) msg = f'{prefix}({self.type}) {msg}' return ((msg,) + args)
async def try_while_running(self, callable, timeout=0.5): 'Return result of `callable`, or raise `ComponentInterrupted` if component is stopped.' while self.running: coro = callable() try: return (await asyncio.wait_for(coro, timeout)) except asyncio.TimeoutError: pass raise ComponentInterrupted
-5,315,369,414,755,020,000
Return result of `callable`, or raise `ComponentInterrupted` if component is stopped.
pipekit/component.py
try_while_running
DrDub/pipekit
python
async def try_while_running(self, callable, timeout=0.5): while self.running: coro = callable() try: return (await asyncio.wait_for(coro, timeout)) except asyncio.TimeoutError: pass raise ComponentInterrupted
@property def id(self): 'Return a fully qualified ID string representing this event.' return f'{self.component.id}:{self.status}'
-5,425,337,119,097,036,000
Return a fully qualified ID string representing this event.
pipekit/component.py
id
DrDub/pipekit
python
@property def id(self): return f'{self.component.id}:{self.status}'
@property def format_is_text_affinity(self): 'return True if the storage format will automatically imply\n a TEXT affinity.\n\n If the storage format contains no non-numeric characters,\n it will imply a NUMERIC storage format on SQLite; in this case,\n the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,\n TIME_CHAR.\n\n .. versionadded:: 1.0.0\n\n ' spec = (self._storage_format % {'year': 0, 'month': 0, 'day': 0, 'hour': 0, 'minute': 0, 'second': 0, 'microsecond': 0}) return bool(re.search('[^0-9]', spec))
1,196,238,046,785,517,800
return True if the storage format will automatically imply a TEXT affinity. If the storage format contains no non-numeric characters, it will imply a NUMERIC storage format on SQLite; in this case, the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, TIME_CHAR. .. versionadded:: 1.0.0
lib/sqlalchemy/dialects/sqlite/base.py
format_is_text_affinity
aalvrz/sqlalchemy
python
@property def format_is_text_affinity(self): 'return True if the storage format will automatically imply\n a TEXT affinity.\n\n If the storage format contains no non-numeric characters,\n it will imply a NUMERIC storage format on SQLite; in this case,\n the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,\n TIME_CHAR.\n\n .. versionadded:: 1.0.0\n\n ' spec = (self._storage_format % {'year': 0, 'month': 0, 'day': 0, 'hour': 0, 'minute': 0, 'second': 0, 'microsecond': 0}) return bool(re.search('[^0-9]', spec))
def define_constraint_remote_table(self, constraint, table, preparer): 'Format the remote table clause of a CREATE CONSTRAINT clause.' return preparer.format_table(table, use_schema=False)
6,815,560,746,236,661,000
Format the remote table clause of a CREATE CONSTRAINT clause.
lib/sqlalchemy/dialects/sqlite/base.py
define_constraint_remote_table
aalvrz/sqlalchemy
python
def define_constraint_remote_table(self, constraint, table, preparer): return preparer.format_table(table, use_schema=False)
def _resolve_type_affinity(self, type_): "Return a data type from a reflected column, using affinity rules.\n\n SQLite's goal for universal compatibility introduces some complexity\n during reflection, as a column's defined type might not actually be a\n type that SQLite understands - or indeed, my not be defined *at all*.\n Internally, SQLite handles this with a 'data type affinity' for each\n column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',\n 'REAL', or 'NONE' (raw bits). The algorithm that determines this is\n listed in https://www.sqlite.org/datatype3.html section 2.1.\n\n This method allows SQLAlchemy to support that algorithm, while still\n providing access to smarter reflection utilities by recognizing\n column definitions that SQLite only supports through affinity (like\n DATE and DOUBLE).\n\n " match = re.match('([\\w ]+)(\\(.*?\\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = '' args = '' if (coltype in self.ischema_names): coltype = self.ischema_names[coltype] elif ('INT' in coltype): coltype = sqltypes.INTEGER elif (('CHAR' in coltype) or ('CLOB' in coltype) or ('TEXT' in coltype)): coltype = sqltypes.TEXT elif (('BLOB' in coltype) or (not coltype)): coltype = sqltypes.NullType elif (('REAL' in coltype) or ('FLOA' in coltype) or ('DOUB' in coltype)): coltype = sqltypes.REAL else: coltype = sqltypes.NUMERIC if (args is not None): args = re.findall('(\\d+)', args) try: coltype = coltype(*[int(a) for a in args]) except TypeError: util.warn(('Could not instantiate type %s with reflected arguments %s; using no arguments.' % (coltype, args))) coltype = coltype() else: coltype = coltype() return coltype
3,770,842,394,055,191,000
Return a data type from a reflected column, using affinity rules. SQLite's goal for universal compatibility introduces some complexity during reflection, as a column's defined type might not actually be a type that SQLite understands - or indeed, my not be defined *at all*. Internally, SQLite handles this with a 'data type affinity' for each column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER', 'REAL', or 'NONE' (raw bits). The algorithm that determines this is listed in https://www.sqlite.org/datatype3.html section 2.1. This method allows SQLAlchemy to support that algorithm, while still providing access to smarter reflection utilities by recognizing column definitions that SQLite only supports through affinity (like DATE and DOUBLE).
lib/sqlalchemy/dialects/sqlite/base.py
_resolve_type_affinity
aalvrz/sqlalchemy
python
def _resolve_type_affinity(self, type_): "Return a data type from a reflected column, using affinity rules.\n\n SQLite's goal for universal compatibility introduces some complexity\n during reflection, as a column's defined type might not actually be a\n type that SQLite understands - or indeed, my not be defined *at all*.\n Internally, SQLite handles this with a 'data type affinity' for each\n column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',\n 'REAL', or 'NONE' (raw bits). The algorithm that determines this is\n listed in https://www.sqlite.org/datatype3.html section 2.1.\n\n This method allows SQLAlchemy to support that algorithm, while still\n providing access to smarter reflection utilities by recognizing\n column definitions that SQLite only supports through affinity (like\n DATE and DOUBLE).\n\n " match = re.match('([\\w ]+)(\\(.*?\\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = args = if (coltype in self.ischema_names): coltype = self.ischema_names[coltype] elif ('INT' in coltype): coltype = sqltypes.INTEGER elif (('CHAR' in coltype) or ('CLOB' in coltype) or ('TEXT' in coltype)): coltype = sqltypes.TEXT elif (('BLOB' in coltype) or (not coltype)): coltype = sqltypes.NullType elif (('REAL' in coltype) or ('FLOA' in coltype) or ('DOUB' in coltype)): coltype = sqltypes.REAL else: coltype = sqltypes.NUMERIC if (args is not None): args = re.findall('(\\d+)', args) try: coltype = coltype(*[int(a) for a in args]) except TypeError: util.warn(('Could not instantiate type %s with reflected arguments %s; using no arguments.' % (coltype, args))) coltype = coltype() else: coltype = coltype() return coltype
def view_or_basicauth(view, request, test_func, realm='', *args, **kwargs): "\n This is a helper function used by both 'logged_in_or_basicauth' and\n 'has_perm_or_basicauth' that does the nitty of determining if they\n are already logged in or if they have provided proper http-authorization\n and returning the view if all goes well, otherwise responding with a 401.\n " if test_func(request.user): return view(request, *args, **kwargs) if ('HTTP_AUTHORIZATION' in request.META): auth = request.META['HTTP_AUTHORIZATION'].split() if (len(auth) == 2): if (auth[0].lower() == 'basic'): (uname, passwd) = base64.b64decode(auth[1]).split(':') user = authenticate(username=uname, password=passwd) if (user is not None): if user.is_active: login(request, user) request.user = user return view(request, *args, **kwargs) response = HttpResponse() response.status_code = 401 response['WWW-Authenticate'] = ('Basic realm="%s"' % realm) return response
221,229,046,749,284,450
This is a helper function used by both 'logged_in_or_basicauth' and 'has_perm_or_basicauth' that does the nitty of determining if they are already logged in or if they have provided proper http-authorization and returning the view if all goes well, otherwise responding with a 401.
django/basic_auth/example1/decorators.py
view_or_basicauth
tullyrankin/python-frameworks
python
def view_or_basicauth(view, request, test_func, realm=, *args, **kwargs): "\n This is a helper function used by both 'logged_in_or_basicauth' and\n 'has_perm_or_basicauth' that does the nitty of determining if they\n are already logged in or if they have provided proper http-authorization\n and returning the view if all goes well, otherwise responding with a 401.\n " if test_func(request.user): return view(request, *args, **kwargs) if ('HTTP_AUTHORIZATION' in request.META): auth = request.META['HTTP_AUTHORIZATION'].split() if (len(auth) == 2): if (auth[0].lower() == 'basic'): (uname, passwd) = base64.b64decode(auth[1]).split(':') user = authenticate(username=uname, password=passwd) if (user is not None): if user.is_active: login(request, user) request.user = user return view(request, *args, **kwargs) response = HttpResponse() response.status_code = 401 response['WWW-Authenticate'] = ('Basic realm="%s"' % realm) return response
def logged_in_or_basicauth(realm=''): "\n A simple decorator that requires a user to be logged in. If they are not\n logged in the request is examined for a 'authorization' header.\n\n If the header is present it is tested for basic authentication and\n the user is logged in with the provided credentials.\n\n If the header is not present a http 401 is sent back to the\n requestor to provide credentials.\n\n The purpose of this is that in several django projects I have needed\n several specific views that need to support basic authentication, yet the\n web site as a whole used django's provided authentication.\n\n The uses for this are for urls that are access programmatically such as\n by rss feed readers, yet the view requires a user to be logged in. Many rss\n readers support supplying the authentication credentials via http basic\n auth (and they do NOT support a redirect to a form where they post a\n username/password.)\n\n Usage is simple:\n\n @logged_in_or_basicauth()\n def your_view:\n ...\n\n You can provide the name of the realm to ask for authentication within.\n " def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_basicauth(func, request, (lambda u: u.is_authenticated()), realm, *args, **kwargs) return wrapper return view_decorator
-4,738,000,789,620,230,000
A simple decorator that requires a user to be logged in. If they are not logged in the request is examined for a 'authorization' header. If the header is present it is tested for basic authentication and the user is logged in with the provided credentials. If the header is not present a http 401 is sent back to the requestor to provide credentials. The purpose of this is that in several django projects I have needed several specific views that need to support basic authentication, yet the web site as a whole used django's provided authentication. The uses for this are for urls that are access programmatically such as by rss feed readers, yet the view requires a user to be logged in. Many rss readers support supplying the authentication credentials via http basic auth (and they do NOT support a redirect to a form where they post a username/password.) Usage is simple: @logged_in_or_basicauth() def your_view: ... You can provide the name of the realm to ask for authentication within.
django/basic_auth/example1/decorators.py
logged_in_or_basicauth
tullyrankin/python-frameworks
python
def logged_in_or_basicauth(realm=): "\n A simple decorator that requires a user to be logged in. If they are not\n logged in the request is examined for a 'authorization' header.\n\n If the header is present it is tested for basic authentication and\n the user is logged in with the provided credentials.\n\n If the header is not present a http 401 is sent back to the\n requestor to provide credentials.\n\n The purpose of this is that in several django projects I have needed\n several specific views that need to support basic authentication, yet the\n web site as a whole used django's provided authentication.\n\n The uses for this are for urls that are access programmatically such as\n by rss feed readers, yet the view requires a user to be logged in. Many rss\n readers support supplying the authentication credentials via http basic\n auth (and they do NOT support a redirect to a form where they post a\n username/password.)\n\n Usage is simple:\n\n @logged_in_or_basicauth()\n def your_view:\n ...\n\n You can provide the name of the realm to ask for authentication within.\n " def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_basicauth(func, request, (lambda u: u.is_authenticated()), realm, *args, **kwargs) return wrapper return view_decorator
def has_perm_or_basicauth(perm, realm=''): "\n This is similar to the above decorator 'logged_in_or_basicauth'\n except that it requires the logged in user to have a specific\n permission.\n\n Use:\n\n @logged_in_or_basicauth('asforums.view_forumcollection')\n def your_view:\n ...\n " def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_basicauth(func, request, (lambda u: u.has_perm(perm)), realm, *args, **kwargs) return wrapper return view_decorator
-2,109,840,568,243,241,500
This is similar to the above decorator 'logged_in_or_basicauth' except that it requires the logged in user to have a specific permission. Use: @logged_in_or_basicauth('asforums.view_forumcollection') def your_view: ...
django/basic_auth/example1/decorators.py
has_perm_or_basicauth
tullyrankin/python-frameworks
python
def has_perm_or_basicauth(perm, realm=): "\n This is similar to the above decorator 'logged_in_or_basicauth'\n except that it requires the logged in user to have a specific\n permission.\n\n Use:\n\n @logged_in_or_basicauth('asforums.view_forumcollection')\n def your_view:\n ...\n " def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_basicauth(func, request, (lambda u: u.has_perm(perm)), realm, *args, **kwargs) return wrapper return view_decorator
def create_test_pipeline(): 'Builds an Iris example pipeline with slight changes.' pipeline_name = 'iris' iris_root = 'iris_root' serving_model_dir = os.path.join(iris_root, 'serving_model', pipeline_name) tfx_root = 'tfx_root' data_path = os.path.join(tfx_root, 'data_path') pipeline_root = os.path.join(tfx_root, 'pipelines', pipeline_name) example_gen = CsvExampleGen(input_base=data_path) statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) importer = ImporterNode(source_uri='m/y/u/r/i', properties={'split_names': "['train', 'eval']"}, custom_properties={'int_custom_property': 42, 'str_custom_property': '42'}, artifact_type=standard_artifacts.Examples).with_id('my_importer') another_statistics_gen = StatisticsGen(examples=importer.outputs['result']).with_id('another_statistics_gen') schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics']) example_validator = ExampleValidator(statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) trainer = Trainer(module_file=data_types.RuntimeParameter(name='module_file', default=os.path.join(iris_root, 'iris_utils.py'), ptype=str), custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor), examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], train_args=trainer_pb2.TrainArgs(num_steps=2000), eval_args=trainer_pb2.EvalArgs(num_steps=5)).with_platform_config(config=trainer_pb2.TrainArgs(num_steps=2000)) model_resolver = resolver.Resolver(strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=standard_artifacts.Model, producer_component_id=trainer.id), model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id('latest_blessed_model_resolver') eval_config = tfma.EvalConfig(model_specs=[tfma.ModelSpec(signature_name='eval')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[tfma.MetricsSpec(thresholds={'sparse_categorical_accuracy': tfma.config.MetricThreshold(value_threshold=tfma.GenericValueThreshold(lower_bound={'value': 0.6}), change_threshold=tfma.GenericChangeThreshold(direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': (- 1e-10)}))})]) evaluator = Evaluator(examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) pusher = Pusher(model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination(filesystem=pusher_pb2.PushDestination.Filesystem(base_directory=serving_model_dir))) return pipeline.Pipeline(pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[example_gen, statistics_gen, another_statistics_gen, importer, schema_gen, example_validator, trainer, model_resolver, evaluator, pusher], enable_cache=True, beam_pipeline_args=['--my_testing_beam_pipeline_args=foo'], platform_config=trainer_pb2.TrainArgs(num_steps=2000), execution_mode=pipeline.ExecutionMode.SYNC)
-8,805,324,632,474,356,000
Builds an Iris example pipeline with slight changes.
tfx/dsl/compiler/testdata/iris_pipeline_sync.py
create_test_pipeline
Saiprasad16/tfx
python
def create_test_pipeline(): pipeline_name = 'iris' iris_root = 'iris_root' serving_model_dir = os.path.join(iris_root, 'serving_model', pipeline_name) tfx_root = 'tfx_root' data_path = os.path.join(tfx_root, 'data_path') pipeline_root = os.path.join(tfx_root, 'pipelines', pipeline_name) example_gen = CsvExampleGen(input_base=data_path) statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) importer = ImporterNode(source_uri='m/y/u/r/i', properties={'split_names': "['train', 'eval']"}, custom_properties={'int_custom_property': 42, 'str_custom_property': '42'}, artifact_type=standard_artifacts.Examples).with_id('my_importer') another_statistics_gen = StatisticsGen(examples=importer.outputs['result']).with_id('another_statistics_gen') schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics']) example_validator = ExampleValidator(statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) trainer = Trainer(module_file=data_types.RuntimeParameter(name='module_file', default=os.path.join(iris_root, 'iris_utils.py'), ptype=str), custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor), examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], train_args=trainer_pb2.TrainArgs(num_steps=2000), eval_args=trainer_pb2.EvalArgs(num_steps=5)).with_platform_config(config=trainer_pb2.TrainArgs(num_steps=2000)) model_resolver = resolver.Resolver(strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=standard_artifacts.Model, producer_component_id=trainer.id), model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id('latest_blessed_model_resolver') eval_config = tfma.EvalConfig(model_specs=[tfma.ModelSpec(signature_name='eval')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[tfma.MetricsSpec(thresholds={'sparse_categorical_accuracy': tfma.config.MetricThreshold(value_threshold=tfma.GenericValueThreshold(lower_bound={'value': 0.6}), change_threshold=tfma.GenericChangeThreshold(direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': (- 1e-10)}))})]) evaluator = Evaluator(examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) pusher = Pusher(model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination(filesystem=pusher_pb2.PushDestination.Filesystem(base_directory=serving_model_dir))) return pipeline.Pipeline(pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[example_gen, statistics_gen, another_statistics_gen, importer, schema_gen, example_validator, trainer, model_resolver, evaluator, pusher], enable_cache=True, beam_pipeline_args=['--my_testing_beam_pipeline_args=foo'], platform_config=trainer_pb2.TrainArgs(num_steps=2000), execution_mode=pipeline.ExecutionMode.SYNC)
def __init__(self, limit, action_shape, observation_shape, dtype='float32', do_valid=False): "Daniel: careful about RAM usage. See:\n https://github.com/BerkeleyAutomation/baselines-fork/issues/9\n\n For this we can assume that in the replay buffer, the teacher samples\n come first, and are fixed ahead of time, so our 'starting' index for\n adding into the replay buffer should be offset by this quantity.\n " self.limit = limit self.do_valid = do_valid if self.do_valid: self.valid_frac = 0.2 self.nb_valid_items = 0 self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype) self.actions = RingBuffer(limit, shape=action_shape) self.nb_teach = 0 self.done_adding_teach = False
9,051,821,898,664,674,000
Daniel: careful about RAM usage. See: https://github.com/BerkeleyAutomation/baselines-fork/issues/9 For this we can assume that in the replay buffer, the teacher samples come first, and are fixed ahead of time, so our 'starting' index for adding into the replay buffer should be offset by this quantity.
baselines/imit/memory.py
__init__
DanielTakeshi/baselines-fork
python
def __init__(self, limit, action_shape, observation_shape, dtype='float32', do_valid=False): "Daniel: careful about RAM usage. See:\n https://github.com/BerkeleyAutomation/baselines-fork/issues/9\n\n For this we can assume that in the replay buffer, the teacher samples\n come first, and are fixed ahead of time, so our 'starting' index for\n adding into the replay buffer should be offset by this quantity.\n " self.limit = limit self.do_valid = do_valid if self.do_valid: self.valid_frac = 0.2 self.nb_valid_items = 0 self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype) self.actions = RingBuffer(limit, shape=action_shape) self.nb_teach = 0 self.done_adding_teach = False
def append(self, obs0, action, is_teacher=False, training=True): "Keep separate copies of obs0, obs1. So it's not memory efficient.\n " if (not training): return if is_teacher: assert (not self.done_adding_teach), self.nb_teach assert (self.nb_teach < self.limit), self.nb_teach self.nb_teach += 1 self.observations0.append(obs0, is_teacher) self.actions.append(action, is_teacher)
6,900,547,232,302,533,000
Keep separate copies of obs0, obs1. So it's not memory efficient.
baselines/imit/memory.py
append
DanielTakeshi/baselines-fork
python
def append(self, obs0, action, is_teacher=False, training=True): "\n " if (not training): return if is_teacher: assert (not self.done_adding_teach), self.nb_teach assert (self.nb_teach < self.limit), self.nb_teach self.nb_teach += 1 self.observations0.append(obs0, is_teacher) self.actions.append(action, is_teacher)
def set_teacher_idx(self): 'Call from IMIT so we do not over-write teacher data.\n ' self.done_adding_teach = True
7,528,259,754,594,265,000
Call from IMIT so we do not over-write teacher data.
baselines/imit/memory.py
set_teacher_idx
DanielTakeshi/baselines-fork
python
def set_teacher_idx(self): '\n ' self.done_adding_teach = True
def set_valid_idx(self): 'Set the validation index.\n ' assert self.done_adding_teach self.nb_valid_items = int((self.valid_frac * self.nb_entries))
-1,232,710,549,275,495,700
Set the validation index.
baselines/imit/memory.py
set_valid_idx
DanielTakeshi/baselines-fork
python
def set_valid_idx(self): '\n ' assert self.done_adding_teach self.nb_valid_items = int((self.valid_frac * self.nb_entries))
def get_valid_obs(self, s_idx, e_idx): 'Get a validation minibatch with fixed starting and ending indices.\n ' assert self.do_valid batch_idxs = np.arange(s_idx, e_idx) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) result = {'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch)} return result
6,104,942,645,043,721,000
Get a validation minibatch with fixed starting and ending indices.
baselines/imit/memory.py
get_valid_obs
DanielTakeshi/baselines-fork
python
def get_valid_obs(self, s_idx, e_idx): '\n ' assert self.do_valid batch_idxs = np.arange(s_idx, e_idx) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) result = {'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch)} return result
@asyncio.coroutine def test_async_get_all_descriptions(hass): 'Test async_get_all_descriptions.' group = loader.get_component(hass, 'group') group_config = {group.DOMAIN: {}} (yield from async_setup_component(hass, group.DOMAIN, group_config)) descriptions = (yield from service.async_get_all_descriptions(hass)) assert (len(descriptions) == 1) assert ('description' in descriptions['group']['reload']) assert ('fields' in descriptions['group']['reload']) logger = loader.get_component(hass, 'logger') logger_config = {logger.DOMAIN: {}} (yield from async_setup_component(hass, logger.DOMAIN, logger_config)) descriptions = (yield from service.async_get_all_descriptions(hass)) assert (len(descriptions) == 2) assert ('description' in descriptions[logger.DOMAIN]['set_level']) assert ('fields' in descriptions[logger.DOMAIN]['set_level'])
5,295,321,943,545,454,000
Test async_get_all_descriptions.
tests/helpers/test_service.py
test_async_get_all_descriptions
DevRGT/home-assistant
python
@asyncio.coroutine def test_async_get_all_descriptions(hass): group = loader.get_component(hass, 'group') group_config = {group.DOMAIN: {}} (yield from async_setup_component(hass, group.DOMAIN, group_config)) descriptions = (yield from service.async_get_all_descriptions(hass)) assert (len(descriptions) == 1) assert ('description' in descriptions['group']['reload']) assert ('fields' in descriptions['group']['reload']) logger = loader.get_component(hass, 'logger') logger_config = {logger.DOMAIN: {}} (yield from async_setup_component(hass, logger.DOMAIN, logger_config)) descriptions = (yield from service.async_get_all_descriptions(hass)) assert (len(descriptions) == 2) assert ('description' in descriptions[logger.DOMAIN]['set_level']) assert ('fields' in descriptions[logger.DOMAIN]['set_level'])
def setUp(self): 'Setup things to be run when tests are started.' self.hass = get_test_home_assistant() self.calls = mock_service(self.hass, 'test_domain', 'test_service')
5,043,399,500,462,036,000
Setup things to be run when tests are started.
tests/helpers/test_service.py
setUp
DevRGT/home-assistant
python
def setUp(self): self.hass = get_test_home_assistant() self.calls = mock_service(self.hass, 'test_domain', 'test_service')
def tearDown(self): 'Stop down everything that was started.' self.hass.stop()
-7,983,443,536,413,136,000
Stop down everything that was started.
tests/helpers/test_service.py
tearDown
DevRGT/home-assistant
python
def tearDown(self): self.hass.stop()
def test_template_service_call(self): 'Test service call with templating.' config = {'service_template': "{{ 'test_domain.test_service' }}", 'entity_id': 'hello.world', 'data_template': {'hello': "{{ 'goodbye' }}", 'data': {'value': "{{ 'complex' }}", 'simple': 'simple'}, 'list': ["{{ 'list' }}", '2']}} service.call_from_config(self.hass, config) self.hass.block_till_done() self.assertEqual('goodbye', self.calls[0].data['hello']) self.assertEqual('complex', self.calls[0].data['data']['value']) self.assertEqual('simple', self.calls[0].data['data']['simple']) self.assertEqual('list', self.calls[0].data['list'][0])
-1,124,711,168,828,296,400
Test service call with templating.
tests/helpers/test_service.py
test_template_service_call
DevRGT/home-assistant
python
def test_template_service_call(self): config = {'service_template': "{{ 'test_domain.test_service' }}", 'entity_id': 'hello.world', 'data_template': {'hello': "{{ 'goodbye' }}", 'data': {'value': "{{ 'complex' }}", 'simple': 'simple'}, 'list': ["{{ 'list' }}", '2']}} service.call_from_config(self.hass, config) self.hass.block_till_done() self.assertEqual('goodbye', self.calls[0].data['hello']) self.assertEqual('complex', self.calls[0].data['data']['value']) self.assertEqual('simple', self.calls[0].data['data']['simple']) self.assertEqual('list', self.calls[0].data['list'][0])
def test_passing_variables_to_templates(self): 'Test passing variables to templates.' config = {'service_template': '{{ var_service }}', 'entity_id': 'hello.world', 'data_template': {'hello': '{{ var_data }}'}} service.call_from_config(self.hass, config, variables={'var_service': 'test_domain.test_service', 'var_data': 'goodbye'}) self.hass.block_till_done() self.assertEqual('goodbye', self.calls[0].data['hello'])
8,943,617,645,706,332,000
Test passing variables to templates.
tests/helpers/test_service.py
test_passing_variables_to_templates
DevRGT/home-assistant
python
def test_passing_variables_to_templates(self): config = {'service_template': '{{ var_service }}', 'entity_id': 'hello.world', 'data_template': {'hello': '{{ var_data }}'}} service.call_from_config(self.hass, config, variables={'var_service': 'test_domain.test_service', 'var_data': 'goodbye'}) self.hass.block_till_done() self.assertEqual('goodbye', self.calls[0].data['hello'])
def test_bad_template(self): 'Test passing bad template.' config = {'service_template': '{{ var_service }}', 'entity_id': 'hello.world', 'data_template': {'hello': '{{ states + unknown_var }}'}} service.call_from_config(self.hass, config, variables={'var_service': 'test_domain.test_service', 'var_data': 'goodbye'}) self.hass.block_till_done() self.assertEqual(len(self.calls), 0)
-2,483,393,078,968,225,300
Test passing bad template.
tests/helpers/test_service.py
test_bad_template
DevRGT/home-assistant
python
def test_bad_template(self): config = {'service_template': '{{ var_service }}', 'entity_id': 'hello.world', 'data_template': {'hello': '{{ states + unknown_var }}'}} service.call_from_config(self.hass, config, variables={'var_service': 'test_domain.test_service', 'var_data': 'goodbye'}) self.hass.block_till_done() self.assertEqual(len(self.calls), 0)
def test_split_entity_string(self): 'Test splitting of entity string.' service.call_from_config(self.hass, {'service': 'test_domain.test_service', 'entity_id': 'hello.world, sensor.beer'}) self.hass.block_till_done() self.assertEqual(['hello.world', 'sensor.beer'], self.calls[(- 1)].data.get('entity_id'))
-6,907,887,811,830,371,000
Test splitting of entity string.
tests/helpers/test_service.py
test_split_entity_string
DevRGT/home-assistant
python
def test_split_entity_string(self): service.call_from_config(self.hass, {'service': 'test_domain.test_service', 'entity_id': 'hello.world, sensor.beer'}) self.hass.block_till_done() self.assertEqual(['hello.world', 'sensor.beer'], self.calls[(- 1)].data.get('entity_id'))
def test_not_mutate_input(self): 'Test for immutable input.' config = cv.SERVICE_SCHEMA({'service': 'test_domain.test_service', 'entity_id': 'hello.world, sensor.beer', 'data': {'hello': 1}, 'data_template': {'nested': {'value': '{{ 1 + 1 }}'}}}) orig = deepcopy(config) template.attach(self.hass, orig) service.call_from_config(self.hass, config, validate_config=False) assert (orig == config)
-4,141,478,369,422,551,600
Test for immutable input.
tests/helpers/test_service.py
test_not_mutate_input
DevRGT/home-assistant
python
def test_not_mutate_input(self): config = cv.SERVICE_SCHEMA({'service': 'test_domain.test_service', 'entity_id': 'hello.world, sensor.beer', 'data': {'hello': 1}, 'data_template': {'nested': {'value': '{{ 1 + 1 }}'}}}) orig = deepcopy(config) template.attach(self.hass, orig) service.call_from_config(self.hass, config, validate_config=False) assert (orig == config)
@patch('homeassistant.helpers.service._LOGGER.error') def test_fail_silently_if_no_service(self, mock_log): 'Test failing if service is missing.' service.call_from_config(self.hass, None) self.assertEqual(1, mock_log.call_count) service.call_from_config(self.hass, {}) self.assertEqual(2, mock_log.call_count) service.call_from_config(self.hass, {'service': 'invalid'}) self.assertEqual(3, mock_log.call_count)
8,630,739,014,849,930,000
Test failing if service is missing.
tests/helpers/test_service.py
test_fail_silently_if_no_service
DevRGT/home-assistant
python
@patch('homeassistant.helpers.service._LOGGER.error') def test_fail_silently_if_no_service(self, mock_log): service.call_from_config(self.hass, None) self.assertEqual(1, mock_log.call_count) service.call_from_config(self.hass, {}) self.assertEqual(2, mock_log.call_count) service.call_from_config(self.hass, {'service': 'invalid'}) self.assertEqual(3, mock_log.call_count)
def test_extract_entity_ids(self): 'Test extract_entity_ids method.' self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) self.hass.states.set('light.Kitchen', STATE_OFF) loader.get_component(self.hass, 'group').Group.create_group(self.hass, 'test', ['light.Ceiling', 'light.Kitchen']) call = ha.ServiceCall('light', 'turn_on', {ATTR_ENTITY_ID: 'light.Bowl'}) self.assertEqual(['light.bowl'], service.extract_entity_ids(self.hass, call)) call = ha.ServiceCall('light', 'turn_on', {ATTR_ENTITY_ID: 'group.test'}) self.assertEqual(['light.ceiling', 'light.kitchen'], service.extract_entity_ids(self.hass, call)) self.assertEqual(['group.test'], service.extract_entity_ids(self.hass, call, expand_group=False))
-558,844,837,868,676,600
Test extract_entity_ids method.
tests/helpers/test_service.py
test_extract_entity_ids
DevRGT/home-assistant
python
def test_extract_entity_ids(self): self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) self.hass.states.set('light.Kitchen', STATE_OFF) loader.get_component(self.hass, 'group').Group.create_group(self.hass, 'test', ['light.Ceiling', 'light.Kitchen']) call = ha.ServiceCall('light', 'turn_on', {ATTR_ENTITY_ID: 'light.Bowl'}) self.assertEqual(['light.bowl'], service.extract_entity_ids(self.hass, call)) call = ha.ServiceCall('light', 'turn_on', {ATTR_ENTITY_ID: 'group.test'}) self.assertEqual(['light.ceiling', 'light.kitchen'], service.extract_entity_ids(self.hass, call)) self.assertEqual(['group.test'], service.extract_entity_ids(self.hass, call, expand_group=False))
def set_selected(self, selected): ' Update the styling to reflect if the widget is selected or not ' if selected: p = QtGui.QPalette() highlight_col = p.color(QtGui.QPalette.Active, QtGui.QPalette.Highlight) transp_highlight_str = ('rgba(%s, %s, %s, 25%%)' % (highlight_col.red(), highlight_col.green(), highlight_col.blue())) highlight_str = ('rgb(%s, %s, %s)' % (highlight_col.red(), highlight_col.green(), highlight_col.blue())) self.setStyleSheet(('#frame {\n border-width: 2px;\n border-color: %s;\n border-style: solid;\n background-color: %s;\n }\n ' % (highlight_str, transp_highlight_str))) else: self.setStyleSheet('#frame {\n border-width: 2px;\n border-color: transparent;\n border-style: solid;\n }')
7,886,654,971,727,653,000
Update the styling to reflect if the widget is selected or not
install/app_store/tk-framework-adminui/v0.1.6/python/setup_project/project_delegate.py
set_selected
JoanAzpeitia/lp_sg
python
def set_selected(self, selected): ' ' if selected: p = QtGui.QPalette() highlight_col = p.color(QtGui.QPalette.Active, QtGui.QPalette.Highlight) transp_highlight_str = ('rgba(%s, %s, %s, 25%%)' % (highlight_col.red(), highlight_col.green(), highlight_col.blue())) highlight_str = ('rgb(%s, %s, %s)' % (highlight_col.red(), highlight_col.green(), highlight_col.blue())) self.setStyleSheet(('#frame {\n border-width: 2px;\n border-color: %s;\n border-style: solid;\n background-color: %s;\n }\n ' % (highlight_str, transp_highlight_str))) else: self.setStyleSheet('#frame {\n border-width: 2px;\n border-color: transparent;\n border-style: solid;\n }')
def get_result(result_path, label_path): 'calculate the result' files = os.listdir(result_path) with open(label_path, 'r') as label: labels = json.load(label) top1 = 0 top5 = 0 total_data = len(files) for file in files: img_ids_name = file.split('_0.')[0] data_path = os.path.join(result_path, (img_ids_name + '_0.bin')) result = np.fromfile(data_path, dtype=np.float16).reshape(batch_size, num_classes) for batch in range(batch_size): predict = np.argsort((- result[batch]), axis=(- 1)) if (labels[(img_ids_name + '.JPEG')] == predict[0]): top1 += 1 if (labels[(img_ids_name + '.JPEG')] in predict[:5]): top5 += 1 print(f'Total data: {total_data}, top1 accuracy: {(top1 / total_data)}, top5 accuracy: {(top5 / total_data)}.')
7,567,841,998,634,557,000
calculate the result
research/cv/resnext152_64x4d/postprocess.py
get_result
mindspore-ai/models
python
def get_result(result_path, label_path): files = os.listdir(result_path) with open(label_path, 'r') as label: labels = json.load(label) top1 = 0 top5 = 0 total_data = len(files) for file in files: img_ids_name = file.split('_0.')[0] data_path = os.path.join(result_path, (img_ids_name + '_0.bin')) result = np.fromfile(data_path, dtype=np.float16).reshape(batch_size, num_classes) for batch in range(batch_size): predict = np.argsort((- result[batch]), axis=(- 1)) if (labels[(img_ids_name + '.JPEG')] == predict[0]): top1 += 1 if (labels[(img_ids_name + '.JPEG')] in predict[:5]): top5 += 1 print(f'Total data: {total_data}, top1 accuracy: {(top1 / total_data)}, top5 accuracy: {(top5 / total_data)}.')
def fxcm_ohlc(p_instrument, p_period, p_ini, p_end): '\n to download OHLC prices from FXCM broker\n\n Parameters\n ----------\n \n p_instrument: str\n The name of the instrument according to fxcmpy\n\n p_freq: str\n The frequency or granularity of prices, according to fxcmpy\n\n p_ini: str\n Initial timestamp, in format "yyyy-mm-dd hh:mm:ss"\n\n p_end: str\n final timestamp, in format "yyyy-mm-dd hh:mm:ss"\n\n Returns\n -------\n\n data_ohlc: DataFrame\n with columns Open, High, Low, Close and Timestamp as index\n\n ' data_ohlc = con.get_candles(instrument=p_instrument, period=p_period, start=p_ini, end=p_end) data_ohlc['open'] = ((data_ohlc['bidopen'] + data_ohlc['askopen']) * 0.5) data_ohlc['high'] = ((data_ohlc['bidhigh'] + data_ohlc['askhigh']) * 0.5) data_ohlc['low'] = ((data_ohlc['bidlow'] + data_ohlc['asklow']) * 0.5) data_ohlc['close'] = ((data_ohlc['bidclose'] + data_ohlc['askclose']) * 0.5) data_ohlc = data_ohlc[['open', 'high', 'low', 'close']] data_ohlc.index.name = 'timestamp' return data_ohlc
8,197,742,484,928,827,000
to download OHLC prices from FXCM broker Parameters ---------- p_instrument: str The name of the instrument according to fxcmpy p_freq: str The frequency or granularity of prices, according to fxcmpy p_ini: str Initial timestamp, in format "yyyy-mm-dd hh:mm:ss" p_end: str final timestamp, in format "yyyy-mm-dd hh:mm:ss" Returns ------- data_ohlc: DataFrame with columns Open, High, Low, Close and Timestamp as index
data.py
fxcm_ohlc
IFFranciscoME/trading-project
python
def fxcm_ohlc(p_instrument, p_period, p_ini, p_end): '\n to download OHLC prices from FXCM broker\n\n Parameters\n ----------\n \n p_instrument: str\n The name of the instrument according to fxcmpy\n\n p_freq: str\n The frequency or granularity of prices, according to fxcmpy\n\n p_ini: str\n Initial timestamp, in format "yyyy-mm-dd hh:mm:ss"\n\n p_end: str\n final timestamp, in format "yyyy-mm-dd hh:mm:ss"\n\n Returns\n -------\n\n data_ohlc: DataFrame\n with columns Open, High, Low, Close and Timestamp as index\n\n ' data_ohlc = con.get_candles(instrument=p_instrument, period=p_period, start=p_ini, end=p_end) data_ohlc['open'] = ((data_ohlc['bidopen'] + data_ohlc['askopen']) * 0.5) data_ohlc['high'] = ((data_ohlc['bidhigh'] + data_ohlc['askhigh']) * 0.5) data_ohlc['low'] = ((data_ohlc['bidlow'] + data_ohlc['asklow']) * 0.5) data_ohlc['close'] = ((data_ohlc['bidclose'] + data_ohlc['askclose']) * 0.5) data_ohlc = data_ohlc[['open', 'high', 'low', 'close']] data_ohlc.index.name = 'timestamp' return data_ohlc
def sum_crc16(crc, file_bit): '\n 计算CRC16\n @param crc:初始校验码\n @param file_bit:文件2进制流\n @return:校验码\n ' for bit in file_bit: crc = (65535 & crc) temp = (crc >> 8) crc = (65535 & crc) crc <<= 8 crc = (65535 & crc) crc ^= crc_list[(255 & (temp ^ bit))] return crc
7,158,657,531,153,871,000
计算CRC16 @param crc:初始校验码 @param file_bit:文件2进制流 @return:校验码
hooks/commit-msg.py
sum_crc16
wotsen/learning_platform_server
python
def sum_crc16(crc, file_bit): '\n 计算CRC16\n @param crc:初始校验码\n @param file_bit:文件2进制流\n @return:校验码\n ' for bit in file_bit: crc = (65535 & crc) temp = (crc >> 8) crc = (65535 & crc) crc <<= 8 crc = (65535 & crc) crc ^= crc_list[(255 & (temp ^ bit))] return crc
def sum_file_crc16(file_name): '\n 计算文件校验码,每次计算4096字节\n @param file_name:文件名\n @return:校验码\n ' crc = 0 with open(file_name, 'rb') as f: crc = sum_crc16(crc, f.read()) return crc
-1,984,359,075,406,307,000
计算文件校验码,每次计算4096字节 @param file_name:文件名 @return:校验码
hooks/commit-msg.py
sum_file_crc16
wotsen/learning_platform_server
python
def sum_file_crc16(file_name): '\n 计算文件校验码,每次计算4096字节\n @param file_name:文件名\n @return:校验码\n ' crc = 0 with open(file_name, 'rb') as f: crc = sum_crc16(crc, f.read()) return crc
@property def valenceSubshell(self): 'Name of the valence subshell.' for (subshell, properties) in SUBSHELLS.items(): if (self.atomicNumber in range(*properties['atomicNumbers'])): return subshell return None
883,103,798,630,194,700
Name of the valence subshell.
crispy/gui/quanty/calculation.py
valenceSubshell
jminar/crispy
python
@property def valenceSubshell(self): for (subshell, properties) in SUBSHELLS.items(): if (self.atomicNumber in range(*properties['atomicNumbers'])): return subshell return None
@property def valenceBlock(self): 'Name of the valence block.' return self.valenceSubshell[(- 1)]
-2,266,033,982,444,831,500
Name of the valence block.
crispy/gui/quanty/calculation.py
valenceBlock
jminar/crispy
python
@property def valenceBlock(self): return self.valenceSubshell[(- 1)]
@property def valenceOccupancy(self): 'Occupancy of the valence subshell.' assert (self.charge is not None), 'The charge must be set.' charge = int(self.charge[::(- 1)]) ion_electrons = (self.atomicNumber - charge) core_electorns = SUBSHELLS[self.valenceSubshell]['coreElectrons'] occupancy = (ion_electrons - core_electorns) return occupancy
-9,197,985,960,004,200,000
Occupancy of the valence subshell.
crispy/gui/quanty/calculation.py
valenceOccupancy
jminar/crispy
python
@property def valenceOccupancy(self): assert (self.charge is not None), 'The charge must be set.' charge = int(self.charge[::(- 1)]) ion_electrons = (self.atomicNumber - charge) core_electorns = SUBSHELLS[self.valenceSubshell]['coreElectrons'] occupancy = (ion_electrons - core_electorns) return occupancy
@staticmethod def countParticles(shell, occupancy): 'Count the number of particles (electrons) or quasiparticles\n (holes) in a shell.' key = f'{shell}{occupancy}' if (key in ('s0', 's2', 'p0', 'p6', 'd0', 'd10', 'f0', 'f14')): particles = 'zero' elif (key in ('s1', 'p1', 'p5', 'd1', 'd9', 'f1', 'f13')): particles = 'one' else: particles = 'multiple' return particles
2,591,367,215,900,072,000
Count the number of particles (electrons) or quasiparticles (holes) in a shell.
crispy/gui/quanty/calculation.py
countParticles
jminar/crispy
python
@staticmethod def countParticles(shell, occupancy): 'Count the number of particles (electrons) or quasiparticles\n (holes) in a shell.' key = f'{shell}{occupancy}' if (key in ('s0', 's2', 'p0', 'p6', 'd0', 'd10', 'f0', 'f14')): particles = 'zero' elif (key in ('s1', 'p1', 'p5', 'd1', 'd9', 'f1', 'f13')): particles = 'one' else: particles = 'multiple' return particles
@property def numberOfCoreParticles(self): 'Count the number of core particles. Returns None if the electronic\n configuration has no core.' if (not self.hasCore): return None (core_shell, _) = self.shells (core_occupancy, _) = self.occupancies return self.countParticles(core_shell, core_occupancy)
-7,317,887,003,854,708,000
Count the number of core particles. Returns None if the electronic configuration has no core.
crispy/gui/quanty/calculation.py
numberOfCoreParticles
jminar/crispy
python
@property def numberOfCoreParticles(self): 'Count the number of core particles. Returns None if the electronic\n configuration has no core.' if (not self.hasCore): return None (core_shell, _) = self.shells (core_occupancy, _) = self.occupancies return self.countParticles(core_shell, core_occupancy)
@property def coreSubshells(self): 'Use the name of the edge to determine the names of the core subshells.\n e.g. for K (1s) the function returns ("1s",), while for K-L2,3 (1s2p) it\n returns ("1s", "2p").\n ' PATTERNS = ('.*\\((\\d\\w)(\\d\\w)\\)', '.*\\((\\d\\w)\\)') name = self.value tokens = (token for pattern in PATTERNS for token in re.findall(pattern, name)) [tokens] = tokens if (not tokens): raise ValueError('The name of the edge cannot be parsed.') if isinstance(tokens, str): tokens = (tokens,) return tokens
-8,442,643,745,731,064,000
Use the name of the edge to determine the names of the core subshells. e.g. for K (1s) the function returns ("1s",), while for K-L2,3 (1s2p) it returns ("1s", "2p").
crispy/gui/quanty/calculation.py
coreSubshells
jminar/crispy
python
@property def coreSubshells(self): 'Use the name of the edge to determine the names of the core subshells.\n e.g. for K (1s) the function returns ("1s",), while for K-L2,3 (1s2p) it\n returns ("1s", "2p").\n ' PATTERNS = ('.*\\((\\d\\w)(\\d\\w)\\)', '.*\\((\\d\\w)\\)') name = self.value tokens = (token for pattern in PATTERNS for token in re.findall(pattern, name)) [tokens] = tokens if (not tokens): raise ValueError('The name of the edge cannot be parsed.') if isinstance(tokens, str): tokens = (tokens,) return tokens
@property def labels(self): 'Edge or line labels needed to interrogate xraydb database.' CONVERTERS = {'Kɑ': 'Ka1', 'Kβ': 'Kb1', 'K': 'K', 'L1': 'L1', 'L2,3': 'L3', 'M1': 'M1', 'M2,3': 'M3', 'M4,5': 'M5', 'N1': 'N1', 'N2,3': 'N3', 'N4,5': 'N5', 'O1': 'O1', 'O2,3': 'O3', 'O4,5': 'O5'} (raw, _) = self.value.split() names = list() separator = '-' if (separator in raw): names.extend(raw.split(separator)) else: names.append(raw) names = [CONVERTERS[name] for name in names] return tuple(names)
231,516,312,385,425,570
Edge or line labels needed to interrogate xraydb database.
crispy/gui/quanty/calculation.py
labels
jminar/crispy
python
@property def labels(self): CONVERTERS = {'Kɑ': 'Ka1', 'Kβ': 'Kb1', 'K': 'K', 'L1': 'L1', 'L2,3': 'L3', 'M1': 'M1', 'M2,3': 'M3', 'M4,5': 'M5', 'N1': 'N1', 'N2,3': 'N3', 'N4,5': 'N5', 'O1': 'O1', 'O2,3': 'O3', 'O4,5': 'O5'} (raw, _) = self.value.split() names = list() separator = '-' if (separator in raw): names.extend(raw.split(separator)) else: names.append(raw) names = [CONVERTERS[name] for name in names] return tuple(names)
@property @lru_cache() def configurations(self): 'Determine the electronic configurations involved in a calculation.' valenceSubshell = self.element.valenceSubshell valenceOccupancy = self.element.valenceOccupancy configurations = list() initialConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(valenceSubshell,), occupancies=(valenceOccupancy,)) configurations.append(initialConfiguration) if self.experiment.isOneStep: if (not self.experiment.excitesToVacuum): valenceOccupancy += 1 (coreSubshell,) = self.edge.coreSubshells (coreOccupancy,) = self.edge.coreOccupancies coreOccupancy -= 1 finalConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(coreSubshell, valenceSubshell), occupancies=(coreOccupancy, valenceOccupancy)) configurations.append(finalConfiguration) else: if (not self.experiment.excitesToVacuum): valenceOccupancy += 1 (core1Subshell, core2Subshell) = self.edge.coreSubshells (core1Occupancy, core2Occupancy) = self.edge.coreOccupancies core1Occupancy -= 1 core2Occupancy -= 1 intermediateConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(core1Subshell, valenceSubshell), occupancies=(core1Occupancy, valenceOccupancy)) configurations.append(intermediateConfiguration) if (core2Subshell == valenceSubshell): finalConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(valenceSubshell,), occupancies=((valenceOccupancy - 1),)) else: finalConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(core2Subshell, valenceSubshell), occupancies=(core2Occupancy, valenceOccupancy)) configurations.append(finalConfiguration) return configurations
1,785,429,563,648,330,800
Determine the electronic configurations involved in a calculation.
crispy/gui/quanty/calculation.py
configurations
jminar/crispy
python
@property @lru_cache() def configurations(self): valenceSubshell = self.element.valenceSubshell valenceOccupancy = self.element.valenceOccupancy configurations = list() initialConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(valenceSubshell,), occupancies=(valenceOccupancy,)) configurations.append(initialConfiguration) if self.experiment.isOneStep: if (not self.experiment.excitesToVacuum): valenceOccupancy += 1 (coreSubshell,) = self.edge.coreSubshells (coreOccupancy,) = self.edge.coreOccupancies coreOccupancy -= 1 finalConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(coreSubshell, valenceSubshell), occupancies=(coreOccupancy, valenceOccupancy)) configurations.append(finalConfiguration) else: if (not self.experiment.excitesToVacuum): valenceOccupancy += 1 (core1Subshell, core2Subshell) = self.edge.coreSubshells (core1Occupancy, core2Occupancy) = self.edge.coreOccupancies core1Occupancy -= 1 core2Occupancy -= 1 intermediateConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(core1Subshell, valenceSubshell), occupancies=(core1Occupancy, valenceOccupancy)) configurations.append(intermediateConfiguration) if (core2Subshell == valenceSubshell): finalConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(valenceSubshell,), occupancies=((valenceOccupancy - 1),)) else: finalConfiguration = Configuration.fromSubshellsAndOccupancies(subshells=(core2Subshell, valenceSubshell), occupancies=(core2Occupancy, valenceOccupancy)) configurations.append(finalConfiguration) return configurations
@property def replacements(self): 'Replacements dictionary used to fill the calculation template. The\n construction of more complex items is delegated to the respective object.\n ' replacements = dict() replacements['Verbosity'] = settings.value('Quanty/Verbosity') replacements['DenseBorder'] = settings.value('Quanty/DenseBorder') replacements['ShiftToZero'] = settings.value('Quanty/ShiftSpectra') subshell = self.element.valenceSubshell occupancy = self.element.valenceOccupancy replacements[f'NElectrons_{subshell}'] = occupancy replacements['Temperature'] = self.temperature.value replacements['Prefix'] = self.value replacements.update(self.axes.xaxis.replacements) if self.experiment.isTwoDimensional: replacements.update(self.axes.yaxis.replacements) replacements.update(self.spectra.replacements) replacements.update(self.hamiltonian.replacements) return replacements
-975,504,811,248,162,400
Replacements dictionary used to fill the calculation template. The construction of more complex items is delegated to the respective object.
crispy/gui/quanty/calculation.py
replacements
jminar/crispy
python
@property def replacements(self): 'Replacements dictionary used to fill the calculation template. The\n construction of more complex items is delegated to the respective object.\n ' replacements = dict() replacements['Verbosity'] = settings.value('Quanty/Verbosity') replacements['DenseBorder'] = settings.value('Quanty/DenseBorder') replacements['ShiftToZero'] = settings.value('Quanty/ShiftSpectra') subshell = self.element.valenceSubshell occupancy = self.element.valenceOccupancy replacements[f'NElectrons_{subshell}'] = occupancy replacements['Temperature'] = self.temperature.value replacements['Prefix'] = self.value replacements.update(self.axes.xaxis.replacements) if self.experiment.isTwoDimensional: replacements.update(self.axes.yaxis.replacements) replacements.update(self.spectra.replacements) replacements.update(self.hamiltonian.replacements) return replacements
def resource(target, url, post=False, **kw_args): 'Specify a dependency on an online resource.\n\n Further takes in keyword arguments that are passed to the appropriate method\n from :mod:`requests` or :mod:`urllib`.\n\n Args:\n target (str): Target file.\n url (str): Source URL.\n post (bool, optional): Make a POST request instead of a GET request.\n Only applicable if the URL starts with "http" or "https". Defaults\n to `False`.\n ' if (not os.path.exists(target)): with wbml.out.Section('Downloading file'): wbml.out.kv('Source', url) wbml.out.kv('Target', target) make_dirs(target) if url.startswith('ftp'): with closing(urllib.request.urlopen(url, **kw_args)) as r: with open(target, 'wb') as f: shutil.copyfileobj(r, f) else: request = (requests.post if post else requests.get) with request(url, stream=True, **kw_args) as r: with open(target, 'wb') as f: shutil.copyfileobj(r.raw, f)
2,942,287,572,255,171,000
Specify a dependency on an online resource. Further takes in keyword arguments that are passed to the appropriate method from :mod:`requests` or :mod:`urllib`. Args: target (str): Target file. url (str): Source URL. post (bool, optional): Make a POST request instead of a GET request. Only applicable if the URL starts with "http" or "https". Defaults to `False`.
wbml/data/data.py
resource
wesselb/wbml
python
def resource(target, url, post=False, **kw_args): 'Specify a dependency on an online resource.\n\n Further takes in keyword arguments that are passed to the appropriate method\n from :mod:`requests` or :mod:`urllib`.\n\n Args:\n target (str): Target file.\n url (str): Source URL.\n post (bool, optional): Make a POST request instead of a GET request.\n Only applicable if the URL starts with "http" or "https". Defaults\n to `False`.\n ' if (not os.path.exists(target)): with wbml.out.Section('Downloading file'): wbml.out.kv('Source', url) wbml.out.kv('Target', target) make_dirs(target) if url.startswith('ftp'): with closing(urllib.request.urlopen(url, **kw_args)) as r: with open(target, 'wb') as f: shutil.copyfileobj(r, f) else: request = (requests.post if post else requests.get) with request(url, stream=True, **kw_args) as r: with open(target, 'wb') as f: shutil.copyfileobj(r.raw, f)
def dependency(target, source, commands): 'Specify a dependency that is generated from an existing file.\n\n Args:\n target (str): Target file.\n source (str): Source file.\n commands (list[str]): List of commands to generate target file.\n ' if (not os.path.exists(target)): with wbml.out.Section('Generating file'): wbml.out.kv('Source', source) wbml.out.kv('Target', target) if (not os.path.exists(source)): raise DependencyError(f'Source "{source}" asserted to exist, but it does not.') current_wd = os.getcwd() make_dirs(target) for command in commands: wbml.out.out(command) os.chdir(os.path.dirname(target)) subprocess.call(command, shell=True) os.chdir(current_wd)
-4,386,116,032,370,145,000
Specify a dependency that is generated from an existing file. Args: target (str): Target file. source (str): Source file. commands (list[str]): List of commands to generate target file.
wbml/data/data.py
dependency
wesselb/wbml
python
def dependency(target, source, commands): 'Specify a dependency that is generated from an existing file.\n\n Args:\n target (str): Target file.\n source (str): Source file.\n commands (list[str]): List of commands to generate target file.\n ' if (not os.path.exists(target)): with wbml.out.Section('Generating file'): wbml.out.kv('Source', source) wbml.out.kv('Target', target) if (not os.path.exists(source)): raise DependencyError(f'Source "{source}" asserted to exist, but it does not.') current_wd = os.getcwd() make_dirs(target) for command in commands: wbml.out.out(command) os.chdir(os.path.dirname(target)) subprocess.call(command, shell=True) os.chdir(current_wd)
def asserted_dependency(target): 'Specify a dependency that cannot be fetched.\n\n Args:\n target (str): Target file.\n ' if (not os.path.exists(target)): raise DependencyError(f'Dependency "{target}" is asserted to exist, but it does not, and it cannot be automatically fetched. Please put the file into place manually.')
853,585,098,750,543,500
Specify a dependency that cannot be fetched. Args: target (str): Target file.
wbml/data/data.py
asserted_dependency
wesselb/wbml
python
def asserted_dependency(target): 'Specify a dependency that cannot be fetched.\n\n Args:\n target (str): Target file.\n ' if (not os.path.exists(target)): raise DependencyError(f'Dependency "{target}" is asserted to exist, but it does not, and it cannot be automatically fetched. Please put the file into place manually.')
def make_dirs(path): 'Make the directories in the path of a file.\n\n Args:\n path (url): Path of a file.\n ' os.makedirs(os.path.dirname(path), exist_ok=True)
4,601,052,578,769,177,600
Make the directories in the path of a file. Args: path (url): Path of a file.
wbml/data/data.py
make_dirs
wesselb/wbml
python
def make_dirs(path): 'Make the directories in the path of a file.\n\n Args:\n path (url): Path of a file.\n ' os.makedirs(os.path.dirname(path), exist_ok=True)
def data_path(*xs): 'Get the path of a data file.\n\n Args:\n *xs (str): Parts of the path.\n\n Returns:\n str: Absolute path.\n ' return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'data', *xs))
-5,738,049,601,613,846,000
Get the path of a data file. Args: *xs (str): Parts of the path. Returns: str: Absolute path.
wbml/data/data.py
data_path
wesselb/wbml
python
def data_path(*xs): 'Get the path of a data file.\n\n Args:\n *xs (str): Parts of the path.\n\n Returns:\n str: Absolute path.\n ' return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'data', *xs))
def split_df(df, index_range, columns, iloc=False): 'Split a data frame by selecting from columns a particular range.\n\n Args:\n df (:class:`pd.DataFrame`): Data frame to split.\n index_range (tuple): Tuple containing lower and upper limit of the\n range to split the index by. If `index_range = (a, b)`, then\n `[a, b)` is taken.\n columns (list[object]): Columns to select.\n iloc (bool, optional): The index range is the integer location instead\n of the index value. Defaults to `False`.\n\n Returns:\n tuple[:class:`pd.DataFrame`]: Selected rows from selected columns\n and the remainder.\n ' if iloc: inds = np.arange(df.shape[0]) rows = ((inds >= index_range[0]) & (inds < index_range[1])) else: rows = ((df.index >= index_range[0]) & (df.index < index_range[1])) selected = pd.DataFrame([df[name][rows] for name in columns]).T remainder = pd.DataFrame(([df[name][(~ rows)] for name in columns] + [df[name] for name in (set(df.columns) - set(columns))])).T selected_inds = [i for (i, c) in enumerate(df.columns) if (c in columns)] selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1) remainder = remainder.reindex(df.columns, axis=1) return (selected, remainder)
-4,051,214,683,030,475,300
Split a data frame by selecting from columns a particular range. Args: df (:class:`pd.DataFrame`): Data frame to split. index_range (tuple): Tuple containing lower and upper limit of the range to split the index by. If `index_range = (a, b)`, then `[a, b)` is taken. columns (list[object]): Columns to select. iloc (bool, optional): The index range is the integer location instead of the index value. Defaults to `False`. Returns: tuple[:class:`pd.DataFrame`]: Selected rows from selected columns and the remainder.
wbml/data/data.py
split_df
wesselb/wbml
python
def split_df(df, index_range, columns, iloc=False): 'Split a data frame by selecting from columns a particular range.\n\n Args:\n df (:class:`pd.DataFrame`): Data frame to split.\n index_range (tuple): Tuple containing lower and upper limit of the\n range to split the index by. If `index_range = (a, b)`, then\n `[a, b)` is taken.\n columns (list[object]): Columns to select.\n iloc (bool, optional): The index range is the integer location instead\n of the index value. Defaults to `False`.\n\n Returns:\n tuple[:class:`pd.DataFrame`]: Selected rows from selected columns\n and the remainder.\n ' if iloc: inds = np.arange(df.shape[0]) rows = ((inds >= index_range[0]) & (inds < index_range[1])) else: rows = ((df.index >= index_range[0]) & (df.index < index_range[1])) selected = pd.DataFrame([df[name][rows] for name in columns]).T remainder = pd.DataFrame(([df[name][(~ rows)] for name in columns] + [df[name] for name in (set(df.columns) - set(columns))])).T selected_inds = [i for (i, c) in enumerate(df.columns) if (c in columns)] selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1) remainder = remainder.reindex(df.columns, axis=1) return (selected, remainder)
def date_to_decimal_year(date, format=None): 'Convert a date to decimal year.\n\n Args:\n date (str): Date as a string.\n format (str, optional): Format of the date if a conversion is needed.\n\n Returns:\n float: Decimal year corresponding to the date.\n ' if format: date = datetime.datetime.strptime(date, format) start = datetime.date(date.year, 1, 1).toordinal() year_length = (datetime.date((date.year + 1), 1, 1).toordinal() - start) subday_time = 0 if hasattr(date, 'hour'): subday_time += ((date.hour / year_length) / 24) if hasattr(date, 'minute'): subday_time += (((date.minute / year_length) / 24) / 60) if hasattr(date, 'second'): subday_time += ((((date.second / year_length) / 24) / 60) / 60) return ((date.year + (float((date.toordinal() - start)) / year_length)) + subday_time)
638,305,501,370,878,600
Convert a date to decimal year. Args: date (str): Date as a string. format (str, optional): Format of the date if a conversion is needed. Returns: float: Decimal year corresponding to the date.
wbml/data/data.py
date_to_decimal_year
wesselb/wbml
python
def date_to_decimal_year(date, format=None): 'Convert a date to decimal year.\n\n Args:\n date (str): Date as a string.\n format (str, optional): Format of the date if a conversion is needed.\n\n Returns:\n float: Decimal year corresponding to the date.\n ' if format: date = datetime.datetime.strptime(date, format) start = datetime.date(date.year, 1, 1).toordinal() year_length = (datetime.date((date.year + 1), 1, 1).toordinal() - start) subday_time = 0 if hasattr(date, 'hour'): subday_time += ((date.hour / year_length) / 24) if hasattr(date, 'minute'): subday_time += (((date.minute / year_length) / 24) / 60) if hasattr(date, 'second'): subday_time += ((((date.second / year_length) / 24) / 60) / 60) return ((date.year + (float((date.toordinal() - start)) / year_length)) + subday_time)
def check(file_to_check, testmode, debug): '\n Function open file, read each line and complete a dictionnary\n For each entry, launch check url : http/https or launch resolution then ping for MX/NS entry\n If one url not respond, launch email to alert \n\n Parameters\n ----------\n file_to_check : string\n This is the name of the fillethat contain list of url must be checked\n and mail for alert\n testmode : string\n This value is 0 by defaut and is to 1 if user launchscript on test mode:\n print enabled and no mail send\n debug : string\n This value is 0 by defaut and is to 1 if user launchscript on debug mode:\n more print enabled and no mail send\n\n Returns\n -------\n None.\n\n ' try: file = open(file_to_check, 'r') except: exit('open file failed') lines = file.readlines() file.close() url_dict = {} for line in lines: line = line.replace('\n', '') line = line.replace(' ', '\t') line = line.replace('\t\t\t', '\t') line = line.replace('\t\t', '\t') line = line.replace('http://', '') line = line.replace('https://', '') element = line.split('\t') cle = element[0] data = element[1] url_dict[cle] = data if (debug == 1): print('Url dict : \n', url_dict) if (testmode == 1): print('Check :') for (url, mail) in url_dict.items(): if (('ns://' not in url) and ('mx://' not in url) and ('ping://' not in url)): availability = str(request_url(url)) if ((availability == '200') or (availability == '301') or (availability == '302')): request_url_result = 'UP' else: request_url_result = 'DOWN' if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif ('ns://' in url): request_url_result = ping_name(url, 'NS') if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result NS :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif ('mx://' in url): request_url_result = ping_name(url, 'MX') if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result MX :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif ('ping://' in url): url = url.replace('ping://', '') request_url_result = ping_ip(url) if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result Ping :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif (testmode == 1): print('url : ', url, ' -> mail : ', mail, 'ignored') exit()
1,797,415,113,771,089,200
Function open file, read each line and complete a dictionnary For each entry, launch check url : http/https or launch resolution then ping for MX/NS entry If one url not respond, launch email to alert Parameters ---------- file_to_check : string This is the name of the fillethat contain list of url must be checked and mail for alert testmode : string This value is 0 by defaut and is to 1 if user launchscript on test mode: print enabled and no mail send debug : string This value is 0 by defaut and is to 1 if user launchscript on debug mode: more print enabled and no mail send Returns ------- None.
monitor2mail.py
check
bkittler/monitor2mail
python
def check(file_to_check, testmode, debug): '\n Function open file, read each line and complete a dictionnary\n For each entry, launch check url : http/https or launch resolution then ping for MX/NS entry\n If one url not respond, launch email to alert \n\n Parameters\n ----------\n file_to_check : string\n This is the name of the fillethat contain list of url must be checked\n and mail for alert\n testmode : string\n This value is 0 by defaut and is to 1 if user launchscript on test mode:\n print enabled and no mail send\n debug : string\n This value is 0 by defaut and is to 1 if user launchscript on debug mode:\n more print enabled and no mail send\n\n Returns\n -------\n None.\n\n ' try: file = open(file_to_check, 'r') except: exit('open file failed') lines = file.readlines() file.close() url_dict = {} for line in lines: line = line.replace('\n', ) line = line.replace(' ', '\t') line = line.replace('\t\t\t', '\t') line = line.replace('\t\t', '\t') line = line.replace('http://', ) line = line.replace('https://', ) element = line.split('\t') cle = element[0] data = element[1] url_dict[cle] = data if (debug == 1): print('Url dict : \n', url_dict) if (testmode == 1): print('Check :') for (url, mail) in url_dict.items(): if (('ns://' not in url) and ('mx://' not in url) and ('ping://' not in url)): availability = str(request_url(url)) if ((availability == '200') or (availability == '301') or (availability == '302')): request_url_result = 'UP' else: request_url_result = 'DOWN' if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif ('ns://' in url): request_url_result = ping_name(url, 'NS') if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result NS :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif ('mx://' in url): request_url_result = ping_name(url, 'MX') if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result MX :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif ('ping://' in url): url = url.replace('ping://', ) request_url_result = ping_ip(url) if (testmode == 1): print('url : ', url, ' -> mail : ', mail, ' Result Ping :', request_url_result) elif (request_url_result == 'DOWN'): alert_mail(mail, request_url_result, url) elif (testmode == 1): print('url : ', url, ' -> mail : ', mail, 'ignored') exit()
def request_url(url): '\n Function to send https or http request to this url and return code result.\n\n Parameters\n ----------\n url : string\n This variable contain url must be checked\n\n Returns\n -------\n status_code : int\n Code result\n\n ' try: url = ('https://' + format(url)) response = requests.head(url, allow_redirects=True, timeout=10) except: try: url = ('http://' + format(url)) response = requests.head(url, allow_redirects=True, timeout=10) except: return '404' if response.status_code: return response.status_code else: return '404'
8,563,870,220,353,574,000
Function to send https or http request to this url and return code result. Parameters ---------- url : string This variable contain url must be checked Returns ------- status_code : int Code result
monitor2mail.py
request_url
bkittler/monitor2mail
python
def request_url(url): '\n Function to send https or http request to this url and return code result.\n\n Parameters\n ----------\n url : string\n This variable contain url must be checked\n\n Returns\n -------\n status_code : int\n Code result\n\n ' try: url = ('https://' + format(url)) response = requests.head(url, allow_redirects=True, timeout=10) except: try: url = ('http://' + format(url)) response = requests.head(url, allow_redirects=True, timeout=10) except: return '404' if response.status_code: return response.status_code else: return '404'
def ping_name(name, dns_type): '\n Function to resolve name and ping this host.\n print the result of ping\n\n Parameters\n ----------\n name : string\n This variable contain the name (host) must be checked\n dns_type : string\n This variable contain the DNS type : A, NS, MX \n\n Returns\n -------\n status : String\n Status result : UP or DOWN\n\n ' name = name.replace('ns://', '') name = name.replace('mx://', '') if (dns_type == 'A'): try: addr1 = socket.gethostbyname_ex(name) print('Resolution -> {}'.format(addr1[2])) name = addr1[2] except: print('Resolution failed') if (dns_type == 'MX'): try: answers = dns.resolver.resolve(name, 'MX') for rdata in answers: addr1 = socket.gethostbyname_ex(str(rdata.exchange)) name = addr1[2] if (ping_ip(name) == 'UP'): return 'UP' return ping_ip(name) except: print('Resolution failed') return 'DOWN' if (dns_type == 'NS'): try: answers = dns.resolver.resolve(name, 'NS') for rdata in answers: addr1 = socket.gethostbyname_ex(str(rdata.target)) name = addr1[2] for srv in name: if (ping_ip(srv) == 'UP'): return 'UP' return ping_ip(name) except: print('Resolution failed') return 'DOWN'
5,352,204,561,138,642,000
Function to resolve name and ping this host. print the result of ping Parameters ---------- name : string This variable contain the name (host) must be checked dns_type : string This variable contain the DNS type : A, NS, MX Returns ------- status : String Status result : UP or DOWN
monitor2mail.py
ping_name
bkittler/monitor2mail
python
def ping_name(name, dns_type): '\n Function to resolve name and ping this host.\n print the result of ping\n\n Parameters\n ----------\n name : string\n This variable contain the name (host) must be checked\n dns_type : string\n This variable contain the DNS type : A, NS, MX \n\n Returns\n -------\n status : String\n Status result : UP or DOWN\n\n ' name = name.replace('ns://', ) name = name.replace('mx://', ) if (dns_type == 'A'): try: addr1 = socket.gethostbyname_ex(name) print('Resolution -> {}'.format(addr1[2])) name = addr1[2] except: print('Resolution failed') if (dns_type == 'MX'): try: answers = dns.resolver.resolve(name, 'MX') for rdata in answers: addr1 = socket.gethostbyname_ex(str(rdata.exchange)) name = addr1[2] if (ping_ip(name) == 'UP'): return 'UP' return ping_ip(name) except: print('Resolution failed') return 'DOWN' if (dns_type == 'NS'): try: answers = dns.resolver.resolve(name, 'NS') for rdata in answers: addr1 = socket.gethostbyname_ex(str(rdata.target)) name = addr1[2] for srv in name: if (ping_ip(srv) == 'UP'): return 'UP' return ping_ip(name) except: print('Resolution failed') return 'DOWN'
def ping_ip(name): '\n Function to ping name.\n return the result of ping\n\n Parameters\n ----------\n name : string\n This variable is IP address\n\n Returns\n -------\n status : String\n Status result : UP or DOWN\n\n ' try: name = str(name).strip('[]') name = str(name).strip("''") hostname = format(name) response = os.system((('ping -c 1 ' + hostname) + ' > /dev/null 2>&1')) if (response == 0): return 'UP' else: return 'DOWN' except requests.ConnectionError: return 'DOWN' return 'DOWN'
-4,169,143,952,883,940,000
Function to ping name. return the result of ping Parameters ---------- name : string This variable is IP address Returns ------- status : String Status result : UP or DOWN
monitor2mail.py
ping_ip
bkittler/monitor2mail
python
def ping_ip(name): '\n Function to ping name.\n return the result of ping\n\n Parameters\n ----------\n name : string\n This variable is IP address\n\n Returns\n -------\n status : String\n Status result : UP or DOWN\n\n ' try: name = str(name).strip('[]') name = str(name).strip() hostname = format(name) response = os.system((('ping -c 1 ' + hostname) + ' > /dev/null 2>&1')) if (response == 0): return 'UP' else: return 'DOWN' except requests.ConnectionError: return 'DOWN' return 'DOWN'
def alert_mail(email_receiver, service_status, url): '\n Function to send email Alert\n\n Parameters\n ----------\n email_receiver : string\n destination email for alert\n service_status : string\n service status\n url : string\n url concertned by alert\n\n Returns\n -------\n None.\n\n ' service_status = ('Subject:{}\n\n'.format(service_status) + 'Server :{} \n'.format(url)) context = ssl.create_default_context() with smtplib.SMTP_SSL(smtp_address, smtp_port, context=context) as server: server.login(email_address, email_password) server.sendmail(email_address, email_receiver, service_status)
-2,547,298,936,809,712,000
Function to send email Alert Parameters ---------- email_receiver : string destination email for alert service_status : string service status url : string url concertned by alert Returns ------- None.
monitor2mail.py
alert_mail
bkittler/monitor2mail
python
def alert_mail(email_receiver, service_status, url): '\n Function to send email Alert\n\n Parameters\n ----------\n email_receiver : string\n destination email for alert\n service_status : string\n service status\n url : string\n url concertned by alert\n\n Returns\n -------\n None.\n\n ' service_status = ('Subject:{}\n\n'.format(service_status) + 'Server :{} \n'.format(url)) context = ssl.create_default_context() with smtplib.SMTP_SSL(smtp_address, smtp_port, context=context) as server: server.login(email_address, email_password) server.sendmail(email_address, email_receiver, service_status)
def main(argv, testmode, debug): '\n Print the fileopened and lauchn the check of file with testmode / debug value\n \n Parameters\n ----------\n file_to_check : string\n This is the name of the fillethat contain list of url must be checked\n and mail for alert\n testmode : string\n This value is 0 by defaut and is to 1 if user launchscript on test mode:\n print enabled and no mail send\n debug : string\n This value is 0 by defaut and is to 1 if user launchscript on debug mode:\n more print enabled and no mail send\n\n Returns\n -------\n None.\n\n ' if (testmode == 1): print('Import file: {}'.format(argv[0])) file = str(argv[0]) check(file, testmode, debug)
1,062,999,027,258,753,900
Print the fileopened and lauchn the check of file with testmode / debug value Parameters ---------- file_to_check : string This is the name of the fillethat contain list of url must be checked and mail for alert testmode : string This value is 0 by defaut and is to 1 if user launchscript on test mode: print enabled and no mail send debug : string This value is 0 by defaut and is to 1 if user launchscript on debug mode: more print enabled and no mail send Returns ------- None.
monitor2mail.py
main
bkittler/monitor2mail
python
def main(argv, testmode, debug): '\n Print the fileopened and lauchn the check of file with testmode / debug value\n \n Parameters\n ----------\n file_to_check : string\n This is the name of the fillethat contain list of url must be checked\n and mail for alert\n testmode : string\n This value is 0 by defaut and is to 1 if user launchscript on test mode:\n print enabled and no mail send\n debug : string\n This value is 0 by defaut and is to 1 if user launchscript on debug mode:\n more print enabled and no mail send\n\n Returns\n -------\n None.\n\n ' if (testmode == 1): print('Import file: {}'.format(argv[0])) file = str(argv[0]) check(file, testmode, debug)
def __call__(self, results): 'Call function to augment common fields in results.\n\n Args:\n results (dict): Result dict contains the data to augment.\n\n Returns:\n dict: The result dict contains the data that is augmented with different scales and flips.\n ' aug_data = [] flip_aug = ([True] if self.flip else [False]) pcd_horizontal_flip_aug = ([False, True] if (self.flip and self.pcd_horizontal_flip) else [False]) pcd_vertical_flip_aug = ([False, True] if (self.flip and self.pcd_vertical_flip) else [False]) for scale in self.img_scale: for pts_scale_ratio in self.pts_scale_ratio: for flip in flip_aug: for pcd_horizontal_flip in pcd_horizontal_flip_aug: for pcd_vertical_flip in pcd_vertical_flip_aug: for direction in self.flip_direction: _results = deepcopy(results) _results['scale'] = scale _results['flip'] = flip _results['pcd_scale_factor'] = pts_scale_ratio _results['flip_direction'] = direction _results['pcd_horizontal_flip'] = pcd_horizontal_flip _results['pcd_vertical_flip'] = pcd_vertical_flip data = self.transforms(_results) aug_data.append(data) aug_data_dict = {key: [] for key in aug_data[0]} for data in aug_data: for (key, val) in data.items(): aug_data_dict[key].append(val) return aug_data_dict
-5,909,924,340,745,003,000
Call function to augment common fields in results. Args: results (dict): Result dict contains the data to augment. Returns: dict: The result dict contains the data that is augmented with different scales and flips.
mmdet3d/datasets/pipelines/test_time_aug.py
__call__
Comverser/mmdetection3d
python
def __call__(self, results): 'Call function to augment common fields in results.\n\n Args:\n results (dict): Result dict contains the data to augment.\n\n Returns:\n dict: The result dict contains the data that is augmented with different scales and flips.\n ' aug_data = [] flip_aug = ([True] if self.flip else [False]) pcd_horizontal_flip_aug = ([False, True] if (self.flip and self.pcd_horizontal_flip) else [False]) pcd_vertical_flip_aug = ([False, True] if (self.flip and self.pcd_vertical_flip) else [False]) for scale in self.img_scale: for pts_scale_ratio in self.pts_scale_ratio: for flip in flip_aug: for pcd_horizontal_flip in pcd_horizontal_flip_aug: for pcd_vertical_flip in pcd_vertical_flip_aug: for direction in self.flip_direction: _results = deepcopy(results) _results['scale'] = scale _results['flip'] = flip _results['pcd_scale_factor'] = pts_scale_ratio _results['flip_direction'] = direction _results['pcd_horizontal_flip'] = pcd_horizontal_flip _results['pcd_vertical_flip'] = pcd_vertical_flip data = self.transforms(_results) aug_data.append(data) aug_data_dict = {key: [] for key in aug_data[0]} for data in aug_data: for (key, val) in data.items(): aug_data_dict[key].append(val) return aug_data_dict
def __repr__(self): 'str: Return a string that describes the module.' repr_str = self.__class__.__name__ repr_str += f'(transforms={self.transforms}, ' repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, ' repr_str += f'flip_direction={self.flip_direction})' return repr_str
-1,499,162,343,701,100,800
str: Return a string that describes the module.
mmdet3d/datasets/pipelines/test_time_aug.py
__repr__
Comverser/mmdetection3d
python
def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(transforms={self.transforms}, ' repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, ' repr_str += f'flip_direction={self.flip_direction})' return repr_str
def testCinderVolumeSource(self): 'Test CinderVolumeSource' pass
-1,152,737,304,363,695,400
Test CinderVolumeSource
python/test/test_cinder_volume_source.py
testCinderVolumeSource
RafalSkolasinski/seldon-deploy-client
python
def testCinderVolumeSource(self): pass
def get_form_field(self): '\n Returns an instance of the form field class, used for constructing the\n filter form for a report.\n ' return self.form_field_class(required=(self.required and (not self.filter_set)), widget=self.form_field_widget, label=self.label)
-9,157,184,731,406,959,000
Returns an instance of the form field class, used for constructing the filter form for a report.
reporting/base.py
get_form_field
flagshipenterprise/django-prickly-reports
python
def get_form_field(self): '\n Returns an instance of the form field class, used for constructing the\n filter form for a report.\n ' return self.form_field_class(required=(self.required and (not self.filter_set)), widget=self.form_field_widget, label=self.label)
def get_data(self, name, data): '\n To get the data for this filter given the filter sets, we instantiate\n the form with the data, validate it, and return the cleaned data.\n ' cleaned_data = self.clean_data(name, data) return (cleaned_data if cleaned_data else self.default)
4,463,008,267,650,598,000
To get the data for this filter given the filter sets, we instantiate the form with the data, validate it, and return the cleaned data.
reporting/base.py
get_data
flagshipenterprise/django-prickly-reports
python
def get_data(self, name, data): '\n To get the data for this filter given the filter sets, we instantiate\n the form with the data, validate it, and return the cleaned data.\n ' cleaned_data = self.clean_data(name, data) return (cleaned_data if cleaned_data else self.default)
def get_data_set(self, name, data): '\n This horribly ugly little function is in charge of returning a list of\n data entries, given filter states, for a filter set. It does the same\n thing as get_data, but for every item in a filter set, returning the\n results in a list.\n ' if (not self.filter_set): return [self.get_data(name, data)] delete = data.get('delete', None) delete_index = None if delete: (n, i) = delete.split('.') if (n == name): delete_index = (int(i) + 1) filter_state_names = self.filter_state_names[:] filter_state_list = [data.getlist((state_name % name), []) for state_name in filter_state_names] filter_states = zip(*filter_state_list) data_set = [] for i in range(len(filter_states)): if (i == delete_index): continue state = filter_states[i] filter_dict = {} for i in range(0, len(filter_state_names)): filter_dict.update({(filter_state_names[i] % name): state[i]}) cleaned_data = self.clean_data(name, filter_dict) if cleaned_data: data_elem = cleaned_data data_set.append(data_elem) return data_set
-6,076,899,162,106,351,000
This horribly ugly little function is in charge of returning a list of data entries, given filter states, for a filter set. It does the same thing as get_data, but for every item in a filter set, returning the results in a list.
reporting/base.py
get_data_set
flagshipenterprise/django-prickly-reports
python
def get_data_set(self, name, data): '\n This horribly ugly little function is in charge of returning a list of\n data entries, given filter states, for a filter set. It does the same\n thing as get_data, but for every item in a filter set, returning the\n results in a list.\n ' if (not self.filter_set): return [self.get_data(name, data)] delete = data.get('delete', None) delete_index = None if delete: (n, i) = delete.split('.') if (n == name): delete_index = (int(i) + 1) filter_state_names = self.filter_state_names[:] filter_state_list = [data.getlist((state_name % name), []) for state_name in filter_state_names] filter_states = zip(*filter_state_list) data_set = [] for i in range(len(filter_states)): if (i == delete_index): continue state = filter_states[i] filter_dict = {} for i in range(0, len(filter_state_names)): filter_dict.update({(filter_state_names[i] % name): state[i]}) cleaned_data = self.clean_data(name, filter_dict) if cleaned_data: data_elem = cleaned_data data_set.append(data_elem) return data_set
def get_filter_state_from_data(self, name, data): '\n Another nasty little bit. This one (if not overridden) takes some\n data and encodes it, using the filter state names, to be a valid\n filter_state which would return the original data if passed to get_data\n\n TODO: Make sure this actually works for stuff other than\n NumericComparisonFilter\n\n TODO: Add good comments :P\n ' if (len(self.filter_state_names) > 1): if (not (hasattr(data, '__iter__') and (len(self.filter_state_names) == len(data)))): raise Exception() state = {} for i in range(0, len(data)): state.update({(self.filter_state_names[i] % name): data[i]}) return state else: return {(self.filter_state_names[0] % name): data}
-2,751,685,666,991,807,500
Another nasty little bit. This one (if not overridden) takes some data and encodes it, using the filter state names, to be a valid filter_state which would return the original data if passed to get_data TODO: Make sure this actually works for stuff other than NumericComparisonFilter TODO: Add good comments :P
reporting/base.py
get_filter_state_from_data
flagshipenterprise/django-prickly-reports
python
def get_filter_state_from_data(self, name, data): '\n Another nasty little bit. This one (if not overridden) takes some\n data and encodes it, using the filter state names, to be a valid\n filter_state which would return the original data if passed to get_data\n\n TODO: Make sure this actually works for stuff other than\n NumericComparisonFilter\n\n TODO: Add good comments :P\n ' if (len(self.filter_state_names) > 1): if (not (hasattr(data, '__iter__') and (len(self.filter_state_names) == len(data)))): raise Exception() state = {} for i in range(0, len(data)): state.update({(self.filter_state_names[i] % name): data[i]}) return state else: return {(self.filter_state_names[0] % name): data}
def __init__(self, filter_states={}): '\n filter_state will be a querydict with keys corresponding to the names\n of the filter members on this report object.\n ' if isinstance(filter_states, QueryDict): self.filter_states = filter_states else: self.filter_states = QueryDict('', mutable=True) self.filter_states.update(filter_states) self.title = (self.title or self.get_title_from_class_name())
4,264,191,987,349,734
filter_state will be a querydict with keys corresponding to the names of the filter members on this report object.
reporting/base.py
__init__
flagshipenterprise/django-prickly-reports
python
def __init__(self, filter_states={}): '\n filter_state will be a querydict with keys corresponding to the names\n of the filter members on this report object.\n ' if isinstance(filter_states, QueryDict): self.filter_states = filter_states else: self.filter_states = QueryDict(, mutable=True) self.filter_states.update(filter_states) self.title = (self.title or self.get_title_from_class_name())
def __getattribute__(self, name): "\n When getting a filter attribute, looks for the corresponding filter\n state and returns that instead of the filter object. If none is found,\n looks for the default value on the filter object. If that's not found\n either, then returns none.\n " attr = object.__getattribute__(self, name) if issubclass(type(attr), Filter): if (not attr.filter_set): return attr.get_data(name, self.filter_states) else: return attr.get_data_set(name, self.filter_states) return attr
-1,361,638,907,889,762,300
When getting a filter attribute, looks for the corresponding filter state and returns that instead of the filter object. If none is found, looks for the default value on the filter object. If that's not found either, then returns none.
reporting/base.py
__getattribute__
flagshipenterprise/django-prickly-reports
python
def __getattribute__(self, name): "\n When getting a filter attribute, looks for the corresponding filter\n state and returns that instead of the filter object. If none is found,\n looks for the default value on the filter object. If that's not found\n either, then returns none.\n " attr = object.__getattribute__(self, name) if issubclass(type(attr), Filter): if (not attr.filter_set): return attr.get_data(name, self.filter_states) else: return attr.get_data_set(name, self.filter_states) return attr
def get_title_from_class_name(self): '\n Split the class name into words, delimited by capitals.\n ' words = re.split('([A-Z])', self.__class__.__name__)[1:] words = [(words[i] + words[(i + 1)]) for i in range(0, (len(words) - 1), 2)] return ' '.join(words)
1,387,999,026,653,367,000
Split the class name into words, delimited by capitals.
reporting/base.py
get_title_from_class_name
flagshipenterprise/django-prickly-reports
python
def get_title_from_class_name(self): '\n \n ' words = re.split('([A-Z])', self.__class__.__name__)[1:] words = [(words[i] + words[(i + 1)]) for i in range(0, (len(words) - 1), 2)] return ' '.join(words)
def get_filter(self, name): "\n Perform the normal __getattribute__ call,\n and return it if it's a filter\n " attr = object.__getattribute__(self, name) return (attr if issubclass(type(attr), Filter) else None)
4,096,343,569,090,666,000
Perform the normal __getattribute__ call, and return it if it's a filter
reporting/base.py
get_filter
flagshipenterprise/django-prickly-reports
python
def get_filter(self, name): "\n Perform the normal __getattribute__ call,\n and return it if it's a filter\n " attr = object.__getattribute__(self, name) return (attr if issubclass(type(attr), Filter) else None)
def get_filters(self): '\n Return a list of all the names and attributes on this report instance\n which have a base class of Filter.\n ' filters = [] for name in dir(self): attr = object.__getattribute__(self, name) if issubclass(type(attr), Filter): filters.append((name, attr)) return sorted(filters, key=(lambda attr: attr[1].order))
8,849,445,637,423,616,000
Return a list of all the names and attributes on this report instance which have a base class of Filter.
reporting/base.py
get_filters
flagshipenterprise/django-prickly-reports
python
def get_filters(self): '\n Return a list of all the names and attributes on this report instance\n which have a base class of Filter.\n ' filters = [] for name in dir(self): attr = object.__getattribute__(self, name) if issubclass(type(attr), Filter): filters.append((name, attr)) return sorted(filters, key=(lambda attr: attr[1].order))
def get_row(self, item): "\n This can return a list for simple data that doesn't need special\n template rendering, or a dict for more complex data where individual\n fields will need to be rendered specially.\n " return []
1,667,515,990,143,625,200
This can return a list for simple data that doesn't need special template rendering, or a dict for more complex data where individual fields will need to be rendered specially.
reporting/base.py
get_row
flagshipenterprise/django-prickly-reports
python
def get_row(self, item): "\n This can return a list for simple data that doesn't need special\n template rendering, or a dict for more complex data where individual\n fields will need to be rendered specially.\n " return []
@staticmethod def encode_filter_states(data): '\n Converts a normal POST querydict to the filterstate data,\n to be stored in the url\n ' return data
-4,065,880,102,832,282,000
Converts a normal POST querydict to the filterstate data, to be stored in the url
reporting/base.py
encode_filter_states
flagshipenterprise/django-prickly-reports
python
@staticmethod def encode_filter_states(data): '\n Converts a normal POST querydict to the filterstate data,\n to be stored in the url\n ' return data
@staticmethod def decode_filter_states(data): '\n Opposite of encode_filter_states\n ' return data
5,662,247,490,372,187,000
Opposite of encode_filter_states
reporting/base.py
decode_filter_states
flagshipenterprise/django-prickly-reports
python
@staticmethod def decode_filter_states(data): '\n \n ' return data
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None): 'Add a Satchmo contact to a mailman mailing list.\n\n Parameters:\n - `Contact`: A Satchmo Contact\n - `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME\n - `send_welcome_msg`: True or False, defaulting to the list default\n - `admin_notify`: True of False, defaulting to the list default\n ' (mm, listname) = _get_maillist(listname) ((print >> sys.stderr), ('mailman adding %s to %s' % (contact.email, listname))) if (send_welcome_msg is None): send_welcome_msg = mm.send_welcome_msg userdesc = UserDesc() userdesc.fullname = contact.full_name userdesc.address = contact.email userdesc.digest = False if mm.isMember(contact.email): ((print >> sys.stderr), _(('Already Subscribed: %s' % contact.email))) else: try: try: mm.Lock() mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify) mm.Save() ((print >> sys.stderr), (_('Subscribed: %(email)s') % {'email': contact.email})) except Errors.MMAlreadyAMember: ((print >> sys.stderr), (_('Already a member: %(email)s') % {'email': contact.email})) except Errors.MMBadEmailError: if (userdesc.address == ''): ((print >> sys.stderr), _('Bad/Invalid email address: blank line')) else: ((print >> sys.stderr), (_('Bad/Invalid email address: %(email)s') % {'email': contact.email})) except Errors.MMHostileAddress: ((print >> sys.stderr), (_('Hostile address (illegal characters): %(email)s') % {'email': contact.email})) finally: mm.Unlock()
2,103,239,974,926,152,200
Add a Satchmo contact to a mailman mailing list. Parameters: - `Contact`: A Satchmo Contact - `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME - `send_welcome_msg`: True or False, defaulting to the list default - `admin_notify`: True of False, defaulting to the list default
satchmo/newsletter/mailman.py
mailman_add
sankroh/satchmo
python
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None): 'Add a Satchmo contact to a mailman mailing list.\n\n Parameters:\n - `Contact`: A Satchmo Contact\n - `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME\n - `send_welcome_msg`: True or False, defaulting to the list default\n - `admin_notify`: True of False, defaulting to the list default\n ' (mm, listname) = _get_maillist(listname) ((print >> sys.stderr), ('mailman adding %s to %s' % (contact.email, listname))) if (send_welcome_msg is None): send_welcome_msg = mm.send_welcome_msg userdesc = UserDesc() userdesc.fullname = contact.full_name userdesc.address = contact.email userdesc.digest = False if mm.isMember(contact.email): ((print >> sys.stderr), _(('Already Subscribed: %s' % contact.email))) else: try: try: mm.Lock() mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify) mm.Save() ((print >> sys.stderr), (_('Subscribed: %(email)s') % {'email': contact.email})) except Errors.MMAlreadyAMember: ((print >> sys.stderr), (_('Already a member: %(email)s') % {'email': contact.email})) except Errors.MMBadEmailError: if (userdesc.address == ): ((print >> sys.stderr), _('Bad/Invalid email address: blank line')) else: ((print >> sys.stderr), (_('Bad/Invalid email address: %(email)s') % {'email': contact.email})) except Errors.MMHostileAddress: ((print >> sys.stderr), (_('Hostile address (illegal characters): %(email)s') % {'email': contact.email})) finally: mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None): 'Remove a Satchmo contact from a Mailman mailing list\n\n Parameters:\n - `contact`: A Satchmo contact\n - `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME\n - `userack`: True or False, whether to notify the user, defaulting to the list default\n - `admin_notify`: True or False, defaulting to the list default\n ' (mm, listname) = _get_maillist(listname) ((print >> sys.stderr), ('mailman removing %s from %s' % (contact.email, listname))) if mm.isMember(contact.email): try: mm.Lock() mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack) mm.Save() finally: mm.Unlock()
8,348,246,222,939,337,000
Remove a Satchmo contact from a Mailman mailing list Parameters: - `contact`: A Satchmo contact - `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME - `userack`: True or False, whether to notify the user, defaulting to the list default - `admin_notify`: True or False, defaulting to the list default
satchmo/newsletter/mailman.py
mailman_remove
sankroh/satchmo
python
def mailman_remove(contact, listname=None, userack=None, admin_notify=None): 'Remove a Satchmo contact from a Mailman mailing list\n\n Parameters:\n - `contact`: A Satchmo contact\n - `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME\n - `userack`: True or False, whether to notify the user, defaulting to the list default\n - `admin_notify`: True or False, defaulting to the list default\n ' (mm, listname) = _get_maillist(listname) ((print >> sys.stderr), ('mailman removing %s from %s' % (contact.email, listname))) if mm.isMember(contact.email): try: mm.Lock() mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack) mm.Save() finally: mm.Unlock()
def runtests(*test_args): 'Setup and run django-lockdowns test suite.' os.environ['DJANGO_SETTINGS_MODULE'] = 'lockdown.tests.test_settings' django.setup() if (not test_args): test_args = ['lockdown.tests'] test_runner = get_runner(settings)() failures = test_runner.run_tests(test_args) sys.exit(bool(failures))
6,142,930,323,732,631,000
Setup and run django-lockdowns test suite.
runtests.py
runtests
carta/django-lockdown
python
def runtests(*test_args): os.environ['DJANGO_SETTINGS_MODULE'] = 'lockdown.tests.test_settings' django.setup() if (not test_args): test_args = ['lockdown.tests'] test_runner = get_runner(settings)() failures = test_runner.run_tests(test_args) sys.exit(bool(failures))
def __init__(self, url: str, demand_address_exists: bool=True, timeout: float=10.0, keep_alive: bool=True, **kwargs): '\n :param url: The address of the IO Base module from/to which IO is written\n ' url = ('http://{}'.format(url) if ('http' not in url) else url) url = '{}/customState.json'.format(url) self._url = url self._io = dict() self._previous_read_io = dict() self._changes = dict() self._first_read = False self._last_hardware_read_time = None self._req = (requests if (not keep_alive) else requests.Session()) self.update_reads_on_write = bool(kwargs.get('update_reads_on_write', False)) self.demand_address_exists = demand_address_exists self.timeout = timeout
-6,242,997,756,507,493,000
:param url: The address of the IO Base module from/to which IO is written
controlpyweb/reader_writer.py
__init__
washad/ControlPyWeb
python
def __init__(self, url: str, demand_address_exists: bool=True, timeout: float=10.0, keep_alive: bool=True, **kwargs): '\n \n ' url = ('http://{}'.format(url) if ('http' not in url) else url) url = '{}/customState.json'.format(url) self._url = url self._io = dict() self._previous_read_io = dict() self._changes = dict() self._first_read = False self._last_hardware_read_time = None self._req = (requests if (not keep_alive) else requests.Session()) self.update_reads_on_write = bool(kwargs.get('update_reads_on_write', False)) self.demand_address_exists = demand_address_exists self.timeout = timeout
def _get(self, timeout: float=None) -> dict: ' Does an http get and returns the results as key/value pairs' timeout = (self.timeout if (timeout is None) else timeout) self._first_read = True r = self._req.get(self._url, timeout=timeout) r = (None if (r is None) else r.json()) return r
127,079,083,397,478,980
Does an http get and returns the results as key/value pairs
controlpyweb/reader_writer.py
_get
washad/ControlPyWeb
python
def _get(self, timeout: float=None) -> dict: ' ' timeout = (self.timeout if (timeout is None) else timeout) self._first_read = True r = self._req.get(self._url, timeout=timeout) r = (None if (r is None) else r.json()) return r
@property def changes(self): 'Returns a dictionary of all changes made since the last read or write' return self._changes
-4,161,327,294,875,323,400
Returns a dictionary of all changes made since the last read or write
controlpyweb/reader_writer.py
changes
washad/ControlPyWeb
python
@property def changes(self): return self._changes
def dumps(self, changes_only: bool=False): 'Returns the current IO key/values as json string' with lock: if changes_only: if (len(self._changes) == 0): return '' return json.dumps(self._changes) return json.dumps(self._io)
6,574,497,103,023,072,000
Returns the current IO key/values as json string
controlpyweb/reader_writer.py
dumps
washad/ControlPyWeb
python
def dumps(self, changes_only: bool=False): with lock: if changes_only: if (len(self._changes) == 0): return return json.dumps(self._changes) return json.dumps(self._io)
def flush_changes(self): ' Erases the collection of changes stored in memory' with lock: self._changes = dict()
-8,752,846,957,242,403,000
Erases the collection of changes stored in memory
controlpyweb/reader_writer.py
flush_changes
washad/ControlPyWeb
python
def flush_changes(self): ' ' with lock: self._changes = dict()
def loads(self, json_str: str): 'Replaces the current IO key/values with that from the json string' with lock: self._first_read = True self._io = json.loads(json_str)
6,937,131,681,817,986,000
Replaces the current IO key/values with that from the json string
controlpyweb/reader_writer.py
loads
washad/ControlPyWeb
python
def loads(self, json_str: str): with lock: self._first_read = True self._io = json.loads(json_str)
def read(self, addr: str) -> Optional[Union[(bool, int, float, str)]]: '\n Returns the value of a single IO from the memory store\n ' with lock: if (not self._first_read): return None self._check_for_address(addr) val = self._io.get(addr) return val
-4,504,070,342,942,920,000
Returns the value of a single IO from the memory store
controlpyweb/reader_writer.py
read
washad/ControlPyWeb
python
def read(self, addr: str) -> Optional[Union[(bool, int, float, str)]]: '\n \n ' with lock: if (not self._first_read): return None self._check_for_address(addr) val = self._io.get(addr) return val
def read_immediate(self, addr: str, timeout: float=None) -> object: '\n Makes a hardware call to the base module to retrieve the value of the IO. This is inefficient and should\n be used sparingly.\n ' try: self._check_for_address(addr) timeout = (self.timeout if (timeout is None) else timeout) vals = self._get(timeout=timeout) if (vals is None): return None return vals.get(addr) except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex: raise WebIOConnectionError(ex)
-7,971,862,631,153,361,000
Makes a hardware call to the base module to retrieve the value of the IO. This is inefficient and should be used sparingly.
controlpyweb/reader_writer.py
read_immediate
washad/ControlPyWeb
python
def read_immediate(self, addr: str, timeout: float=None) -> object: '\n Makes a hardware call to the base module to retrieve the value of the IO. This is inefficient and should\n be used sparingly.\n ' try: self._check_for_address(addr) timeout = (self.timeout if (timeout is None) else timeout) vals = self._get(timeout=timeout) if (vals is None): return None return vals.get(addr) except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex: raise WebIOConnectionError(ex)
def to_hardware(self, timeout: float=None): ' Same as send_changes_to_hardware' return self.send_changes_to_hardware(timeout)
-4,038,051,524,453,115,400
Same as send_changes_to_hardware
controlpyweb/reader_writer.py
to_hardware
washad/ControlPyWeb
python
def to_hardware(self, timeout: float=None): ' ' return self.send_changes_to_hardware(timeout)
def send_changes_to_hardware(self, timeout: float=None): ' Takes the collection of changes made using the write command and\n sends them all to the hardware collectively. ' try: with lock: if ((self._changes is None) or (len(self._changes) == 0)): return timeout = (self.timeout if (timeout is None) else timeout) self._req.get(self._url, params=self._changes, timeout=timeout) self.flush_changes() except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex: raise WebIOConnectionError(ex)
-4,451,905,811,028,989,000
Takes the collection of changes made using the write command and sends them all to the hardware collectively.
controlpyweb/reader_writer.py
send_changes_to_hardware
washad/ControlPyWeb
python
def send_changes_to_hardware(self, timeout: float=None): ' Takes the collection of changes made using the write command and\n sends them all to the hardware collectively. ' try: with lock: if ((self._changes is None) or (len(self._changes) == 0)): return timeout = (self.timeout if (timeout is None) else timeout) self._req.get(self._url, params=self._changes, timeout=timeout) self.flush_changes() except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex: raise WebIOConnectionError(ex)
def from_hardware(self, timeout: float=None): ' Same as update_from_hardware' self.update_from_hardware(timeout)
5,353,684,869,602,444,000
Same as update_from_hardware
controlpyweb/reader_writer.py
from_hardware
washad/ControlPyWeb
python
def from_hardware(self, timeout: float=None): ' ' self.update_from_hardware(timeout)
def update_from_hardware(self, timeout: float=None): 'Makes a hardware call to the base module to retrieve the value of all IOs, storing their\n results in memory.' try: timeout = (self.timeout if (timeout is None) else timeout) with lock: vals = self._get(timeout) self._last_hardware_read_time = time.time() if (vals is not None): self._io = vals self.flush_changes() except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex: raise WebIOConnectionError(ex)
7,733,040,249,127,075,000
Makes a hardware call to the base module to retrieve the value of all IOs, storing their results in memory.
controlpyweb/reader_writer.py
update_from_hardware
washad/ControlPyWeb
python
def update_from_hardware(self, timeout: float=None): 'Makes a hardware call to the base module to retrieve the value of all IOs, storing their\n results in memory.' try: timeout = (self.timeout if (timeout is None) else timeout) with lock: vals = self._get(timeout) self._last_hardware_read_time = time.time() if (vals is not None): self._io = vals self.flush_changes() except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex: raise WebIOConnectionError(ex)
def write(self, addr: str, value: object) -> None: '\n Stores the write value in memory to be written as part of a group write when changes are sent to\n hardware.' with lock: to_str = self._value_to_str(value) if self.update_reads_on_write: self._io[addr] = value self._changes[addr] = to_str
-2,172,726,099,156,868,600
Stores the write value in memory to be written as part of a group write when changes are sent to hardware.
controlpyweb/reader_writer.py
write
washad/ControlPyWeb
python
def write(self, addr: str, value: object) -> None: '\n Stores the write value in memory to be written as part of a group write when changes are sent to\n hardware.' with lock: to_str = self._value_to_str(value) if self.update_reads_on_write: self._io[addr] = value self._changes[addr] = to_str