body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,047,115,851,061,118,500 | Resource tags. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | tags | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]]=None, sql_pool_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, workspace_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n A sql pool resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] location: The geo-location where the resource lives\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.\n :param pulumi.Input[str] sql_pool_name: The name of the sql pool.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n "
... | 8,612,853,346,195,900,000 | A sql pool resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | __init__ | sebtelko/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]]=None, sql_pool_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, workspace_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n A sql pool resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] location: The geo-location where the resource lives\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.\n :param pulumi.Input[str] sql_pool_name: The name of the sql pool.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n "
... |
@overload
def __init__(__self__, resource_name: str, args: SqlPoolsV3Args, opts: Optional[pulumi.ResourceOptions]=None):
"\n A sql pool resource.\n\n :param str resource_name: The name of the resource.\n :param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... | -833,163,991,211,773,700 | A sql pool resource.
:param str resource_name: The name of the resource.
:param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | __init__ | sebtelko/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, args: SqlPoolsV3Args, opts: Optional[pulumi.ResourceOptions]=None):
"\n A sql pool resource.\n\n :param str resource_name: The name of the resource.\n :param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... |
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'SqlPoolsV3':
"\n Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__['current_service_objective_name'] = None
__props__.__dict__['kind'] = None
__props__.__dict__['location'] = None
__props__.__dict__['name'] = None
__props__.__dict__['requested_service_objective_name'] = None
__props__.__dict__['sku'] = None
__props__.__dict__['sql_pool_guid'] = None
__props__.__dict__['status'] = None
__props__.__dict__['system_data'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
return SqlPoolsV3(resource_name, opts=opts, __props__=__props__) | 1,144,220,005,969,504,100 | Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | get | sebtelko/pulumi-azure-native | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'SqlPoolsV3':
"\n Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__['current_service_objective_name'] = None
__props__.__dict__['kind'] = None
__props__.__dict__['location'] = None
__props__.__dict__['name'] = None
__props__.__dict__['requested_service_objective_name'] = None
__props__.__dict__['sku'] = None
__props__.__dict__['sql_pool_guid'] = None
__props__.__dict__['status'] = None
__props__.__dict__['system_data'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
return SqlPoolsV3(resource_name, opts=opts, __props__=__props__) |
@property
@pulumi.getter(name='currentServiceObjectiveName')
def current_service_objective_name(self) -> pulumi.Output[str]:
'\n The current service level objective name of the sql pool.\n '
return pulumi.get(self, 'current_service_objective_name') | 5,218,573,891,819,161,000 | The current service level objective name of the sql pool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | current_service_objective_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='currentServiceObjectiveName')
def current_service_objective_name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'current_service_objective_name') |
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
'\n Kind of SqlPool.\n '
return pulumi.get(self, 'kind') | 8,167,977,468,519,664,000 | Kind of SqlPool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | kind | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'kind') |
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
'\n The geo-location where the resource lives\n '
return pulumi.get(self, 'location') | -1,096,732,000,402,900,900 | The geo-location where the resource lives | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | location | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n The name of the resource\n '
return pulumi.get(self, 'name') | 2,231,345,607,626,165,800 | The name of the resource | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name') |
@property
@pulumi.getter(name='requestedServiceObjectiveName')
def requested_service_objective_name(self) -> pulumi.Output[str]:
'\n The requested service level objective name of the sql pool.\n '
return pulumi.get(self, 'requested_service_objective_name') | -5,669,831,218,008,058,000 | The requested service level objective name of the sql pool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | requested_service_objective_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='requestedServiceObjectiveName')
def requested_service_objective_name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'requested_service_objective_name') |
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
'\n The sql pool SKU. The list of SKUs may vary by region and support offer.\n '
return pulumi.get(self, 'sku') | 3,816,815,483,821,266,400 | The sql pool SKU. The list of SKUs may vary by region and support offer. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sku | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
'\n \n '
return pulumi.get(self, 'sku') |
@property
@pulumi.getter(name='sqlPoolGuid')
def sql_pool_guid(self) -> pulumi.Output[str]:
'\n The Guid of the sql pool.\n '
return pulumi.get(self, 'sql_pool_guid') | 2,210,988,124,487,927,600 | The Guid of the sql pool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sql_pool_guid | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='sqlPoolGuid')
def sql_pool_guid(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'sql_pool_guid') |
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
'\n The status of the sql pool.\n '
return pulumi.get(self, 'status') | 8,908,813,554,611,460,000 | The status of the sql pool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | status | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'status') |
@property
@pulumi.getter(name='systemData')
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
'\n SystemData of SqlPool.\n '
return pulumi.get(self, 'system_data') | -8,966,815,235,898,524,000 | SystemData of SqlPool. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | system_data | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='systemData')
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
'\n \n '
return pulumi.get(self, 'system_data') |
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,929,197,049,816,896,000 | Resource tags. | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | tags | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"\n '
return pulumi.get(self, 'type') | -5,449,551,391,296,740,000 | The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | type | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type') |
def afficher(self):
"Méthode à redéfinir retournant l'affichage de l'objectif."
if self.doit_reculer:
return 'Doit reculer'
navire = self.navire
distance = self.get_distance()
direction = ((distance.direction + 90) % 360)
msg_dist = get_nom_distance(distance)
return 'Cap sur {}° ({}), à {}'.format(round(direction), distance.nom_direction, msg_dist) | 7,196,368,381,657,184,000 | Méthode à redéfinir retournant l'affichage de l'objectif. | src/secondaires/navigation/equipage/objectifs/rejoindre.py | afficher | stormi/tsunami | python | def afficher(self):
if self.doit_reculer:
return 'Doit reculer'
navire = self.navire
distance = self.get_distance()
direction = ((distance.direction + 90) % 360)
msg_dist = get_nom_distance(distance)
return 'Cap sur {}° ({}), à {}'.format(round(direction), distance.nom_direction, msg_dist) |
def get_distance(self):
'Retourne la distance (Vecteur) entre le navire et la destination.\n\n Cette méthode crée un vecteur (class Vecteur définie dans\n le module primaire vehicule) qui représente la distance entre\n la position du navire et la destination.\n\n '
navire = self.navire
position = navire.opt_position
o_x = position.x
o_y = position.y
d_x = self.x
d_y = self.y
distance = Vecteur((d_x - o_x), (d_y - o_y), 0)
return distance | 6,704,754,725,337,368,000 | Retourne la distance (Vecteur) entre le navire et la destination.
Cette méthode crée un vecteur (class Vecteur définie dans
le module primaire vehicule) qui représente la distance entre
la position du navire et la destination. | src/secondaires/navigation/equipage/objectifs/rejoindre.py | get_distance | stormi/tsunami | python | def get_distance(self):
'Retourne la distance (Vecteur) entre le navire et la destination.\n\n Cette méthode crée un vecteur (class Vecteur définie dans\n le module primaire vehicule) qui représente la distance entre\n la position du navire et la destination.\n\n '
navire = self.navire
position = navire.opt_position
o_x = position.x
o_y = position.y
d_x = self.x
d_y = self.y
distance = Vecteur((d_x - o_x), (d_y - o_y), 0)
return distance |
def trouver_distance_min(self, cible):
"Trouve la distance minimum.\n\n Cette distance est fonction de la distance minimum entre\n une salle du navire d'origine et une salle du navire cible.\n\n "
navire = self.navire
etendue = navire.etendue
altitude = etendue.altitude
salle_cible = None
distance = None
for salle in navire.salles.values():
if (salle.coords.z != altitude):
continue
(x, y) = (salle.coords.x, salle.coords.y)
for t_salle in cible.salles.values():
if (t_salle.coords.z != altitude):
continue
(t_x, t_y) = (t_salle.coords.x, t_salle.coords.y)
t_distance = sqrt((((t_x - x) ** 2) + ((t_y - y) ** 2)))
if ((distance is None) or (t_distance < distance)):
distance = t_distance
salle_cible = t_salle
return (distance, salle_cible) | 1,206,798,301,761,401,600 | Trouve la distance minimum.
Cette distance est fonction de la distance minimum entre
une salle du navire d'origine et une salle du navire cible. | src/secondaires/navigation/equipage/objectifs/rejoindre.py | trouver_distance_min | stormi/tsunami | python | def trouver_distance_min(self, cible):
"Trouve la distance minimum.\n\n Cette distance est fonction de la distance minimum entre\n une salle du navire d'origine et une salle du navire cible.\n\n "
navire = self.navire
etendue = navire.etendue
altitude = etendue.altitude
salle_cible = None
distance = None
for salle in navire.salles.values():
if (salle.coords.z != altitude):
continue
(x, y) = (salle.coords.x, salle.coords.y)
for t_salle in cible.salles.values():
if (t_salle.coords.z != altitude):
continue
(t_x, t_y) = (t_salle.coords.x, t_salle.coords.y)
t_distance = sqrt((((t_x - x) ** 2) + ((t_y - y) ** 2)))
if ((distance is None) or (t_distance < distance)):
distance = t_distance
salle_cible = t_salle
return (distance, salle_cible) |
def transmettre_controles(self):
'Donne les contrôles indiqués (vitesse et direction).'
equipage = self.equipage
navire = self.navire
distance = self.get_distance()
if self.autre_direction:
direction = round(self.autre_direction)
else:
direction = round(distance.direction)
if equipage.controles.get('direction'):
equipage.controles['direction'].direction = direction
else:
equipage.controler('direction', direction)
vitesse = self.vitesse
if equipage.controles.get('vitesse'):
ancienne_vitesse = equipage.controles['vitesse'].vitesse
equipage.controles['vitesse'].vitesse = vitesse
if (vitesse != ancienne_vitesse):
equipage.controles['vitesse'].calculer_vitesse()
else:
equipage.controler('vitesse', self.vitesse, self.autoriser_vitesse_sup) | -6,538,764,887,885,260,000 | Donne les contrôles indiqués (vitesse et direction). | src/secondaires/navigation/equipage/objectifs/rejoindre.py | transmettre_controles | stormi/tsunami | python | def transmettre_controles(self):
equipage = self.equipage
navire = self.navire
distance = self.get_distance()
if self.autre_direction:
direction = round(self.autre_direction)
else:
direction = round(distance.direction)
if equipage.controles.get('direction'):
equipage.controles['direction'].direction = direction
else:
equipage.controler('direction', direction)
vitesse = self.vitesse
if equipage.controles.get('vitesse'):
ancienne_vitesse = equipage.controles['vitesse'].vitesse
equipage.controles['vitesse'].vitesse = vitesse
if (vitesse != ancienne_vitesse):
equipage.controles['vitesse'].calculer_vitesse()
else:
equipage.controler('vitesse', self.vitesse, self.autoriser_vitesse_sup) |
def trouver_cap(self):
'Trouve le cap, tenant compte des obstacles.'
equipage = self.equipage
navire = self.navire
if self.doit_reculer:
(x, y) = self.doit_reculer
p_x = navire.position.x
p_y = navire.position.y
max_distance = navire.get_max_distance_au_centre()
if (sqrt((((x - p_x) ** 2) + ((y - p_y) ** 2))) > (max_distance + 1)):
self.doit_reculer = ()
else:
return
tries = equipage.vigie_tries
if (not tries):
self.autre_direction = None
self.transmettre_controles()
return
obstacles = tries.get('obstacle', {}).copy()
obstacles.update(tries.get('salle', {}))
obstacles.update(tries.get('sallenavire', {}))
dangereux = obstacles.copy()
for angle in obstacles.keys():
if ((angle < (- 90)) or (angle > 90)):
del dangereux[angle]
if (not dangereux):
self.ancienne_vitesse = None
self.autre_direction = None
self.transmettre_controles()
return
min_angle = None
min_distance = None
for (angle, (vecteur, point)) in dangereux.items():
if ((min_distance is None) or (vecteur.mag < min_distance)):
min_distance = vecteur.mag
min_angle = angle
if ((- 45) <= min_angle <= 45):
if (min_distance <= 2):
self.vitesse = 0.05
elif (min_distance < 10):
self.vitesse = 0.2
elif (min_distance < 25):
self.vitesse = 0.6
distance = 30
angles = [(i * 5) for i in range(0, 35)]
for i in range(1, 35):
angles.append((i * (- 5)))
o_distance = self.get_distance()
if (o_distance.norme < 30):
distance = o_distance.norme
relative = (o_distance.direction - navire.direction.direction)
angles = sorted(angles, key=(lambda a: fabs((a - relative))))
position = navire.opt_position
while (distance > 0):
for angle in angles:
vecteur = navire.opt_direction
vecteur.mag = distance
vecteur.around_z(radians(angle))
if (not navire.controller_collision(vecteur, collision=False, marge=0.8)):
if (angle != 0):
self.info('Cap libre sur {}°'.format(angle))
self.autre_direction = round(((navire.direction.direction + angle) % 360))
if (fabs(angle) > 30):
self.vitesse = 0
self.transmettre_controles()
return
distance -= 5
self.transmettre_controles() | -7,917,848,045,556,536,000 | Trouve le cap, tenant compte des obstacles. | src/secondaires/navigation/equipage/objectifs/rejoindre.py | trouver_cap | stormi/tsunami | python | def trouver_cap(self):
equipage = self.equipage
navire = self.navire
if self.doit_reculer:
(x, y) = self.doit_reculer
p_x = navire.position.x
p_y = navire.position.y
max_distance = navire.get_max_distance_au_centre()
if (sqrt((((x - p_x) ** 2) + ((y - p_y) ** 2))) > (max_distance + 1)):
self.doit_reculer = ()
else:
return
tries = equipage.vigie_tries
if (not tries):
self.autre_direction = None
self.transmettre_controles()
return
obstacles = tries.get('obstacle', {}).copy()
obstacles.update(tries.get('salle', {}))
obstacles.update(tries.get('sallenavire', {}))
dangereux = obstacles.copy()
for angle in obstacles.keys():
if ((angle < (- 90)) or (angle > 90)):
del dangereux[angle]
if (not dangereux):
self.ancienne_vitesse = None
self.autre_direction = None
self.transmettre_controles()
return
min_angle = None
min_distance = None
for (angle, (vecteur, point)) in dangereux.items():
if ((min_distance is None) or (vecteur.mag < min_distance)):
min_distance = vecteur.mag
min_angle = angle
if ((- 45) <= min_angle <= 45):
if (min_distance <= 2):
self.vitesse = 0.05
elif (min_distance < 10):
self.vitesse = 0.2
elif (min_distance < 25):
self.vitesse = 0.6
distance = 30
angles = [(i * 5) for i in range(0, 35)]
for i in range(1, 35):
angles.append((i * (- 5)))
o_distance = self.get_distance()
if (o_distance.norme < 30):
distance = o_distance.norme
relative = (o_distance.direction - navire.direction.direction)
angles = sorted(angles, key=(lambda a: fabs((a - relative))))
position = navire.opt_position
while (distance > 0):
for angle in angles:
vecteur = navire.opt_direction
vecteur.mag = distance
vecteur.around_z(radians(angle))
if (not navire.controller_collision(vecteur, collision=False, marge=0.8)):
if (angle != 0):
self.info('Cap libre sur {}°'.format(angle))
self.autre_direction = round(((navire.direction.direction + angle) % 360))
if (fabs(angle) > 30):
self.vitesse = 0
self.transmettre_controles()
return
distance -= 5
self.transmettre_controles() |
def creer(self):
"L'objectif est créé.\n\n On crée les contrôles associéss pour atteindre l'objectif\n visé, à savoir, rejoindre le point (x, y), en essayant\n de trouver les obstacles corresondant et un cap de remplacement\n si nécessaire.\n\n "
equipage = self.equipage
commandant = self.commandant
if (commandant is None):
return
self.trouver_cap() | -5,156,652,403,956,548,000 | L'objectif est créé.
On crée les contrôles associéss pour atteindre l'objectif
visé, à savoir, rejoindre le point (x, y), en essayant
de trouver les obstacles corresondant et un cap de remplacement
si nécessaire. | src/secondaires/navigation/equipage/objectifs/rejoindre.py | creer | stormi/tsunami | python | def creer(self):
"L'objectif est créé.\n\n On crée les contrôles associéss pour atteindre l'objectif\n visé, à savoir, rejoindre le point (x, y), en essayant\n de trouver les obstacles corresondant et un cap de remplacement\n si nécessaire.\n\n "
equipage = self.equipage
commandant = self.commandant
if (commandant is None):
return
self.trouver_cap() |
def verifier(self, prioritaire):
"Vérifie que l'objectif est toujours valide.\n\n Dans cette méthode, on vérifie :\n Qu'il n'y a aucun obstacle sur la trajectoire assignée\n\n "
equipage = self.equipage
navire = self.navire
commandant = self.commandant
if (commandant is None):
return
if prioritaire:
self.trouver_cap() | -1,071,381,795,624,332,800 | Vérifie que l'objectif est toujours valide.
Dans cette méthode, on vérifie :
Qu'il n'y a aucun obstacle sur la trajectoire assignée | src/secondaires/navigation/equipage/objectifs/rejoindre.py | verifier | stormi/tsunami | python | def verifier(self, prioritaire):
"Vérifie que l'objectif est toujours valide.\n\n Dans cette méthode, on vérifie :\n Qu'il n'y a aucun obstacle sur la trajectoire assignée\n\n "
equipage = self.equipage
navire = self.navire
commandant = self.commandant
if (commandant is None):
return
if prioritaire:
self.trouver_cap() |
def reagir_collision(self, salle, contre):
'Réagit à une collision.'
if (not self.doit_reculer):
commandant = self.commandant
if (commandant is None):
return
personnage = commandant.personnage
navire = self.navire
equipage = self.equipage
p_x = navire.position.x
p_y = navire.position.y
self.warning('Essaye de faire reculer le navire')
self.doit_reculer = (p_x, p_y)
equipage.retirer_controle('direction')
if navire.gouvernail:
equipage.demander('relacher_gouvernail', personnage=personnage)
if any((v.hissee for v in navire.voiles)):
equipage.demander('plier_voiles', None, personnage=personnage)
rames = navire.rames
if rames:
if any(((r.orientation != 0) for r in rames)):
equipage.demander('ramer', 'centre', personnage=personnage)
equipage.demander('ramer', 'arrière', personnage=personnage) | 239,206,026,680,985,800 | Réagit à une collision. | src/secondaires/navigation/equipage/objectifs/rejoindre.py | reagir_collision | stormi/tsunami | python | def reagir_collision(self, salle, contre):
if (not self.doit_reculer):
commandant = self.commandant
if (commandant is None):
return
personnage = commandant.personnage
navire = self.navire
equipage = self.equipage
p_x = navire.position.x
p_y = navire.position.y
self.warning('Essaye de faire reculer le navire')
self.doit_reculer = (p_x, p_y)
equipage.retirer_controle('direction')
if navire.gouvernail:
equipage.demander('relacher_gouvernail', personnage=personnage)
if any((v.hissee for v in navire.voiles)):
equipage.demander('plier_voiles', None, personnage=personnage)
rames = navire.rames
if rames:
if any(((r.orientation != 0) for r in rames)):
equipage.demander('ramer', 'centre', personnage=personnage)
equipage.demander('ramer', 'arrière', personnage=personnage) |
def validate_rsa_key(key, is_secret=True):
'\n Validate the format and type of an RSA key.\n '
if key.startswith('ssh-rsa '):
raise forms.ValidationError('OpenSSH line format is not supported. Please ensure that your public is in PEM (base64) format.')
try:
key = RSA.importKey(key)
except ValueError:
raise forms.ValidationError('Invalid RSA key. Please ensure that your key is in PEM (base64) format.')
except Exception as e:
raise forms.ValidationError('Invalid key detected: {}'.format(e))
if (is_secret and (not key.has_private())):
raise forms.ValidationError('This looks like a public key. Please provide your private RSA key.')
elif ((not is_secret) and key.has_private()):
raise forms.ValidationError('This looks like a private key. Please provide your public RSA key.')
try:
PKCS1_OAEP.new(key)
except Exception:
raise forms.ValidationError('Error validating RSA key. Please ensure that your key supports PKCS#1 OAEP.') | 3,125,702,511,178,122,000 | Validate the format and type of an RSA key. | netbox/secrets/forms.py | validate_rsa_key | Megzo/netbox | python | def validate_rsa_key(key, is_secret=True):
'\n \n '
if key.startswith('ssh-rsa '):
raise forms.ValidationError('OpenSSH line format is not supported. Please ensure that your public is in PEM (base64) format.')
try:
key = RSA.importKey(key)
except ValueError:
raise forms.ValidationError('Invalid RSA key. Please ensure that your key is in PEM (base64) format.')
except Exception as e:
raise forms.ValidationError('Invalid key detected: {}'.format(e))
if (is_secret and (not key.has_private())):
raise forms.ValidationError('This looks like a public key. Please provide your private RSA key.')
elif ((not is_secret) and key.has_private()):
raise forms.ValidationError('This looks like a private key. Please provide your public RSA key.')
try:
PKCS1_OAEP.new(key)
except Exception:
raise forms.ValidationError('Error validating RSA key. Please ensure that your key supports PKCS#1 OAEP.') |
def _tf_fspecial_gauss(size, sigma, ch=1):
"Function to mimic the 'fspecial' gaussian MATLAB function\n "
(x_data, y_data) = np.mgrid[(((- size) // 2) + 1):((size // 2) + 1), (((- size) // 2) + 1):((size // 2) + 1)]
x_data = np.expand_dims(x_data, axis=(- 1))
x_data = np.expand_dims(x_data, axis=(- 1))
y_data = np.expand_dims(y_data, axis=(- 1))
y_data = np.expand_dims(y_data, axis=(- 1))
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp((- (((x ** 2) + (y ** 2)) / (2.0 * (sigma ** 2)))))
g = tf.tile(g, [1, 1, ch, 1])
return (g / tf.reduce_sum(g)) | -8,443,937,505,276,022,000 | Function to mimic the 'fspecial' gaussian MATLAB function | ssim.py | _tf_fspecial_gauss | 97chenxa/Multiview2Novelview | python | def _tf_fspecial_gauss(size, sigma, ch=1):
"\n "
(x_data, y_data) = np.mgrid[(((- size) // 2) + 1):((size // 2) + 1), (((- size) // 2) + 1):((size // 2) + 1)]
x_data = np.expand_dims(x_data, axis=(- 1))
x_data = np.expand_dims(x_data, axis=(- 1))
y_data = np.expand_dims(y_data, axis=(- 1))
y_data = np.expand_dims(y_data, axis=(- 1))
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp((- (((x ** 2) + (y ** 2)) / (2.0 * (sigma ** 2)))))
g = tf.tile(g, [1, 1, ch, 1])
return (g / tf.reduce_sum(g)) |
def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False):
'Run the forward model.\n\n Args:\n num_times: The number of times to run it for. This is a maximum and\n it will stop early if we reach a done.\n board: The board state to run it from.\n agents: The agents to use to run it.\n bombs: The starting bombs.\n items: The starting items.\n flames: The starting flames.\n is_partially_observable: Whether the board is partially observable or\n not. Only applies to TeamRadio.\n agent_view_size: If it\'s partially observable, then the size of the\n square that the agent can view.\n action_space: The actions that each agent can take.\n training_agent: The training agent to pass to done.\n is_communicative: Whether the action depends on communication\n observations as well.\n\n Returns:\n steps: The list of step results, which are each a dict of "obs",\n "next_obs", "reward", "action".\n board: Updated board.\n agents: Updated agents, same models though.\n bombs: Updated bombs.\n items: Updated items.\n flames: Updated flames.\n done: Whether we completed the game in these steps.\n info: The result of the game if it\'s completed.\n '
steps = []
for _ in num_times:
obs = self.get_observations(board, agents, bombs, is_partially_observable, agent_view_size)
actions = self.act(agents, obs, action_space, is_communicative=is_communicative)
(board, agents, bombs, items, flames) = self.step(actions, board, agents, bombs, items, flames)
next_obs = self.get_observations(board, agents, bombs, is_partially_observable, agent_view_size)
reward = self.get_rewards(agents, game_type, step_count, max_steps)
done = self.get_done(agents, game_type, step_count, max_steps, training_agent)
info = self.get_info(done, rewards, game_type, agents)
steps.append({'obs': obs, 'next_obs': next_obs, 'reward': reward, 'actions': actions})
if done:
for agent in agents:
agent.episode_end(reward[agent.agent_id])
break
return (steps, board, agents, bombs, items, flames, done, info) | 37,734,878,252,812,904 | Run the forward model.
Args:
num_times: The number of times to run it for. This is a maximum and
it will stop early if we reach a done.
board: The board state to run it from.
agents: The agents to use to run it.
bombs: The starting bombs.
items: The starting items.
flames: The starting flames.
is_partially_observable: Whether the board is partially observable or
not. Only applies to TeamRadio.
agent_view_size: If it's partially observable, then the size of the
square that the agent can view.
action_space: The actions that each agent can take.
training_agent: The training agent to pass to done.
is_communicative: Whether the action depends on communication
observations as well.
Returns:
steps: The list of step results, which are each a dict of "obs",
"next_obs", "reward", "action".
board: Updated board.
agents: Updated agents, same models though.
bombs: Updated bombs.
items: Updated items.
flames: Updated flames.
done: Whether we completed the game in these steps.
info: The result of the game if it's completed. | pommerman/forward_model.py | run | psyoblade/playground | python | def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False):
'Run the forward model.\n\n Args:\n num_times: The number of times to run it for. This is a maximum and\n it will stop early if we reach a done.\n board: The board state to run it from.\n agents: The agents to use to run it.\n bombs: The starting bombs.\n items: The starting items.\n flames: The starting flames.\n is_partially_observable: Whether the board is partially observable or\n not. Only applies to TeamRadio.\n agent_view_size: If it\'s partially observable, then the size of the\n square that the agent can view.\n action_space: The actions that each agent can take.\n training_agent: The training agent to pass to done.\n is_communicative: Whether the action depends on communication\n observations as well.\n\n Returns:\n steps: The list of step results, which are each a dict of "obs",\n "next_obs", "reward", "action".\n board: Updated board.\n agents: Updated agents, same models though.\n bombs: Updated bombs.\n items: Updated items.\n flames: Updated flames.\n done: Whether we completed the game in these steps.\n info: The result of the game if it\'s completed.\n '
steps = []
for _ in num_times:
obs = self.get_observations(board, agents, bombs, is_partially_observable, agent_view_size)
actions = self.act(agents, obs, action_space, is_communicative=is_communicative)
(board, agents, bombs, items, flames) = self.step(actions, board, agents, bombs, items, flames)
next_obs = self.get_observations(board, agents, bombs, is_partially_observable, agent_view_size)
reward = self.get_rewards(agents, game_type, step_count, max_steps)
done = self.get_done(agents, game_type, step_count, max_steps, training_agent)
info = self.get_info(done, rewards, game_type, agents)
steps.append({'obs': obs, 'next_obs': next_obs, 'reward': reward, 'actions': actions})
if done:
for agent in agents:
agent.episode_end(reward[agent.agent_id])
break
return (steps, board, agents, bombs, items, flames, done, info) |
@staticmethod
def act(agents, obs, action_space, is_communicative=False):
'Returns actions for each agent in this list.\n\n Args:\n agents: A list of agent objects.\n obs: A list of matching observations per agent.\n action_space: The action space for the environment using this model.\n is_communicative: Whether the action depends on communication\n observations as well.\n\n Returns a list of actions.\n '
def act_ex_communication(agent):
"Handles agent's move without communication"
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value
def act_with_communication(agent):
"Handles agent's move with communication"
if agent.is_alive:
action = agent.act(obs[agent.agent_id], action_space=action_space)
if (type(action) == int):
action = ([action] + [0, 0])
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0]
ret = []
for agent in agents:
if is_communicative:
ret.append(act_with_communication(agent))
else:
ret.append(act_ex_communication(agent))
return ret | 2,517,179,653,179,589,000 | Returns actions for each agent in this list.
Args:
agents: A list of agent objects.
obs: A list of matching observations per agent.
action_space: The action space for the environment using this model.
is_communicative: Whether the action depends on communication
observations as well.
Returns a list of actions. | pommerman/forward_model.py | act | psyoblade/playground | python | @staticmethod
def act(agents, obs, action_space, is_communicative=False):
'Returns actions for each agent in this list.\n\n Args:\n agents: A list of agent objects.\n obs: A list of matching observations per agent.\n action_space: The action space for the environment using this model.\n is_communicative: Whether the action depends on communication\n observations as well.\n\n Returns a list of actions.\n '
def act_ex_communication(agent):
"Handles agent's move without communication"
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value
def act_with_communication(agent):
"Handles agent's move with communication"
if agent.is_alive:
action = agent.act(obs[agent.agent_id], action_space=action_space)
if (type(action) == int):
action = ([action] + [0, 0])
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0]
ret = []
for agent in agents:
if is_communicative:
ret.append(act_with_communication(agent))
else:
ret.append(act_ex_communication(agent))
return ret |
def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size, game_type, game_env):
'Gets the observations as an np.array of the visible squares.\n\n The agent gets to choose whether it wants to keep the fogged part in\n memory.\n '
board_size = len(curr_board)
def make_bomb_maps(position):
' Makes an array of an agents bombs and the bombs attributes '
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
for bomb in bombs:
(x, y) = bomb.position
if ((not is_partially_observable) or in_view_range(position, x, y)):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
return (blast_strengths, life)
def in_view_range(position, v_row, v_col):
'Checks to see if a tile is in an agents viewing area'
(row, col) = position
return all([(row >= (v_row - agent_view_size)), (row <= (v_row + agent_view_size)), (col >= (v_col - agent_view_size)), (col <= (v_col + agent_view_size))])
attrs = ['position', 'blast_strength', 'can_kick', 'teammate', 'ammo', 'enemies']
alive_agents = [utility.agent_value(agent.agent_id) for agent in agents if agent.is_alive]
observations = []
for agent in agents:
agent_obs = {'alive': alive_agents}
board = curr_board
if is_partially_observable:
board = board.copy()
for row in range(board_size):
for col in range(board_size):
if (not in_view_range(agent.position, row, col)):
board[(row, col)] = constants.Item.Fog.value
agent_obs['board'] = board
(bomb_blast_strengths, bomb_life) = make_bomb_maps(agent.position)
agent_obs['bomb_blast_strength'] = bomb_blast_strengths
agent_obs['bomb_life'] = bomb_life
agent_obs['game_type'] = game_type.value
agent_obs['game_env'] = game_env
for attr in attrs:
assert hasattr(agent, attr)
agent_obs[attr] = getattr(agent, attr)
observations.append(agent_obs)
return observations | 8,998,917,911,216,073,000 | Gets the observations as an np.array of the visible squares.
The agent gets to choose whether it wants to keep the fogged part in
memory. | pommerman/forward_model.py | get_observations | psyoblade/playground | python | def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size, game_type, game_env):
'Gets the observations as an np.array of the visible squares.\n\n The agent gets to choose whether it wants to keep the fogged part in\n memory.\n '
board_size = len(curr_board)
def make_bomb_maps(position):
' Makes an array of an agents bombs and the bombs attributes '
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
for bomb in bombs:
(x, y) = bomb.position
if ((not is_partially_observable) or in_view_range(position, x, y)):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
return (blast_strengths, life)
def in_view_range(position, v_row, v_col):
'Checks to see if a tile is in an agents viewing area'
(row, col) = position
return all([(row >= (v_row - agent_view_size)), (row <= (v_row + agent_view_size)), (col >= (v_col - agent_view_size)), (col <= (v_col + agent_view_size))])
attrs = ['position', 'blast_strength', 'can_kick', 'teammate', 'ammo', 'enemies']
alive_agents = [utility.agent_value(agent.agent_id) for agent in agents if agent.is_alive]
observations = []
for agent in agents:
agent_obs = {'alive': alive_agents}
board = curr_board
if is_partially_observable:
board = board.copy()
for row in range(board_size):
for col in range(board_size):
if (not in_view_range(agent.position, row, col)):
board[(row, col)] = constants.Item.Fog.value
agent_obs['board'] = board
(bomb_blast_strengths, bomb_life) = make_bomb_maps(agent.position)
agent_obs['bomb_blast_strength'] = bomb_blast_strengths
agent_obs['bomb_life'] = bomb_life
agent_obs['game_type'] = game_type.value
agent_obs['game_env'] = game_env
for attr in attrs:
assert hasattr(agent, attr)
agent_obs[attr] = getattr(agent, attr)
observations.append(agent_obs)
return observations |
def act_ex_communication(agent):
"Handles agent's move without communication"
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value | 128,353,056,565,292,160 | Handles agent's move without communication | pommerman/forward_model.py | act_ex_communication | psyoblade/playground | python | def act_ex_communication(agent):
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value |
def act_with_communication(agent):
"Handles agent's move with communication"
if agent.is_alive:
action = agent.act(obs[agent.agent_id], action_space=action_space)
if (type(action) == int):
action = ([action] + [0, 0])
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0] | 5,499,220,043,339,737,000 | Handles agent's move with communication | pommerman/forward_model.py | act_with_communication | psyoblade/playground | python | def act_with_communication(agent):
if agent.is_alive:
action = agent.act(obs[agent.agent_id], action_space=action_space)
if (type(action) == int):
action = ([action] + [0, 0])
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0] |
def crossing(current, desired):
'Checks to see if an agent is crossing paths'
(current_x, current_y) = current
(desired_x, desired_y) = desired
if (current_x != desired_x):
assert (current_y == desired_y)
return ('X', min(current_x, desired_x), current_y)
assert (current_x == desired_x)
return ('Y', current_x, min(current_y, desired_y)) | -2,236,711,760,669,926,700 | Checks to see if an agent is crossing paths | pommerman/forward_model.py | crossing | psyoblade/playground | python | def crossing(current, desired):
(current_x, current_y) = current
(desired_x, desired_y) = desired
if (current_x != desired_x):
assert (current_y == desired_y)
return ('X', min(current_x, desired_x), current_y)
assert (current_x == desired_x)
return ('Y', current_x, min(current_y, desired_y)) |
def make_bomb_maps(position):
' Makes an array of an agents bombs and the bombs attributes '
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
for bomb in bombs:
(x, y) = bomb.position
if ((not is_partially_observable) or in_view_range(position, x, y)):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
return (blast_strengths, life) | 7,158,962,727,249,455,000 | Makes an array of an agents bombs and the bombs attributes | pommerman/forward_model.py | make_bomb_maps | psyoblade/playground | python | def make_bomb_maps(position):
' '
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
for bomb in bombs:
(x, y) = bomb.position
if ((not is_partially_observable) or in_view_range(position, x, y)):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
return (blast_strengths, life) |
def in_view_range(position, v_row, v_col):
'Checks to see if a tile is in an agents viewing area'
(row, col) = position
return all([(row >= (v_row - agent_view_size)), (row <= (v_row + agent_view_size)), (col >= (v_col - agent_view_size)), (col <= (v_col + agent_view_size))]) | 6,712,188,905,099,196,000 | Checks to see if a tile is in an agents viewing area | pommerman/forward_model.py | in_view_range | psyoblade/playground | python | def in_view_range(position, v_row, v_col):
(row, col) = position
return all([(row >= (v_row - agent_view_size)), (row <= (v_row + agent_view_size)), (col >= (v_col - agent_view_size)), (col <= (v_col + agent_view_size))]) |
def any_lst_equal(lst, values):
'Checks if list are equal'
return any([(lst == v) for v in values]) | 7,340,273,300,579,551,000 | Checks if list are equal | pommerman/forward_model.py | any_lst_equal | psyoblade/playground | python | def any_lst_equal(lst, values):
return any([(lst == v) for v in values]) |
@app.route('/<path:page_path>')
def get_index_page(page_path):
"\n Handle requests which urls don't end with '.html' (for example, '/doc/')\n\n We don't need any generator here, because such urls are equivalent to the same urls\n with 'index.html' at the end.\n\n :param page_path: str\n :return: str\n "
if (not page_path.endswith('/')):
page_path += '/'
return process_page((page_path + 'index')) | -9,031,548,282,840,157,000 | Handle requests which urls don't end with '.html' (for example, '/doc/')
We don't need any generator here, because such urls are equivalent to the same urls
with 'index.html' at the end.
:param page_path: str
:return: str | kotlin-website.py | get_index_page | Chinay-Domitrix/kotlin-web-site | python | @app.route('/<path:page_path>')
def get_index_page(page_path):
"\n Handle requests which urls don't end with '.html' (for example, '/doc/')\n\n We don't need any generator here, because such urls are equivalent to the same urls\n with 'index.html' at the end.\n\n :param page_path: str\n :return: str\n "
if (not page_path.endswith('/')):
page_path += '/'
return process_page((page_path + 'index')) |
def mean_precision_k(y_true, y_score, k=10):
'Mean precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n mean precision @k : float\n '
p_ks = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
p_ks.append(ranking_precision_score(y_t, y_s, k=k))
return np.mean(p_ks) | 5,770,015,242,635,812,000 | Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float | voc_classifier/metrics_for_multilabel.py | mean_precision_k | myeonghak/kobert-multi-label-VOC-classifier | python | def mean_precision_k(y_true, y_score, k=10):
'Mean precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n mean precision @k : float\n '
p_ks = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
p_ks.append(ranking_precision_score(y_t, y_s, k=k))
return np.mean(p_ks) |
def mean_recall_k(y_true, y_score, k=10):
'Mean recall at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n mean recall @k : float\n '
r_ks = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
r_ks.append(ranking_recall_score(y_t, y_s, k=k))
return np.mean(r_ks) | 7,608,365,527,797,240,000 | Mean recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean recall @k : float | voc_classifier/metrics_for_multilabel.py | mean_recall_k | myeonghak/kobert-multi-label-VOC-classifier | python | def mean_recall_k(y_true, y_score, k=10):
'Mean recall at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n mean recall @k : float\n '
r_ks = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
r_ks.append(ranking_recall_score(y_t, y_s, k=k))
return np.mean(r_ks) |
def mean_ndcg_score(y_true, y_score, k=10, gains='exponential'):
'Normalized discounted cumulative gain (NDCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n gains : str\n Whether gains should be "exponential" (default) or "linear".\n Returns\n -------\n Mean NDCG @k : float\n '
ndcg_s = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains))
return np.mean(ndcg_s) | 5,159,041,303,848,239,000 | Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
Mean NDCG @k : float | voc_classifier/metrics_for_multilabel.py | mean_ndcg_score | myeonghak/kobert-multi-label-VOC-classifier | python | def mean_ndcg_score(y_true, y_score, k=10, gains='exponential'):
'Normalized discounted cumulative gain (NDCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n gains : str\n Whether gains should be "exponential" (default) or "linear".\n Returns\n -------\n Mean NDCG @k : float\n '
ndcg_s = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains))
return np.mean(ndcg_s) |
def mean_rprecision_k(y_true, y_score, k=10):
'Mean precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n mean precision @k : float\n '
p_ks = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
p_ks.append(ranking_rprecision_score(y_t, y_s, k=k))
return np.mean(p_ks) | 424,993,604,326,264,000 | Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float | voc_classifier/metrics_for_multilabel.py | mean_rprecision_k | myeonghak/kobert-multi-label-VOC-classifier | python | def mean_rprecision_k(y_true, y_score, k=10):
'Mean precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n mean precision @k : float\n '
p_ks = []
for (y_t, y_s) in zip(y_true, y_score):
if np.sum((y_t == 1)):
p_ks.append(ranking_rprecision_score(y_t, y_s, k=k))
return np.mean(p_ks) |
def ranking_recall_score(y_true, y_score, k=10):
'Recall at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
n_pos = np.sum((y_true == pos_label))
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum((y_true == pos_label))
return (float(n_relevant) / n_pos) | 8,914,194,712,639,622,000 | Recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float | voc_classifier/metrics_for_multilabel.py | ranking_recall_score | myeonghak/kobert-multi-label-VOC-classifier | python | def ranking_recall_score(y_true, y_score, k=10):
'Recall at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
n_pos = np.sum((y_true == pos_label))
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum((y_true == pos_label))
return (float(n_relevant) / n_pos) |
def ranking_precision_score(y_true, y_score, k=10):
'Precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum((y_true == pos_label))
return (float(n_relevant) / k) | -6,949,153,071,367,454,000 | Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float | voc_classifier/metrics_for_multilabel.py | ranking_precision_score | myeonghak/kobert-multi-label-VOC-classifier | python | def ranking_precision_score(y_true, y_score, k=10):
'Precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum((y_true == pos_label))
return (float(n_relevant) / k) |
def ranking_rprecision_score(y_true, y_score, k=10):
'Precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
n_pos = np.sum((y_true == pos_label))
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum((y_true == pos_label))
return (float(n_relevant) / min(k, n_pos)) | -2,170,543,630,146,516,200 | Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float | voc_classifier/metrics_for_multilabel.py | ranking_rprecision_score | myeonghak/kobert-multi-label-VOC-classifier | python | def ranking_rprecision_score(y_true, y_score, k=10):
'Precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
n_pos = np.sum((y_true == pos_label))
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum((y_true == pos_label))
return (float(n_relevant) / min(k, n_pos)) |
def average_precision_score(y_true, y_score, k=10):
'Average precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n average precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
n_pos = np.sum((y_true == pos_label))
order = np.argsort(y_score)[::(- 1)][:min(n_pos, k)]
y_true = np.asarray(y_true)[order]
score = 0
for i in range(len(y_true)):
if (y_true[i] == pos_label):
prec = 0
for j in range(0, (i + 1)):
if (y_true[j] == pos_label):
prec += 1.0
prec /= (i + 1.0)
score += prec
if (n_pos == 0):
return 0
return (score / n_pos) | -1,121,636,309,767,000,700 | Average precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
average precision @k : float | voc_classifier/metrics_for_multilabel.py | average_precision_score | myeonghak/kobert-multi-label-VOC-classifier | python | def average_precision_score(y_true, y_score, k=10):
'Average precision at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n Returns\n -------\n average precision @k : float\n '
unique_y = np.unique(y_true)
if (len(unique_y) == 1):
return ValueError('The score cannot be approximated.')
elif (len(unique_y) > 2):
raise ValueError('Only supported for two relevance levels.')
pos_label = unique_y[1]
n_pos = np.sum((y_true == pos_label))
order = np.argsort(y_score)[::(- 1)][:min(n_pos, k)]
y_true = np.asarray(y_true)[order]
score = 0
for i in range(len(y_true)):
if (y_true[i] == pos_label):
prec = 0
for j in range(0, (i + 1)):
if (y_true[j] == pos_label):
prec += 1.0
prec /= (i + 1.0)
score += prec
if (n_pos == 0):
return 0
return (score / n_pos) |
def dcg_score(y_true, y_score, k=10, gains='exponential'):
'Discounted cumulative gain (DCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n gains : str\n Whether gains should be "exponential" (default) or "linear".\n Returns\n -------\n DCG @k : float\n '
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
if (gains == 'exponential'):
gains = ((2 ** y_true) - 1)
elif (gains == 'linear'):
gains = y_true
else:
raise ValueError('Invalid gains option.')
discounts = np.log2((np.arange(len(y_true)) + 2))
return np.sum((gains / discounts)) | 1,590,550,218,463,529,700 | Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float | voc_classifier/metrics_for_multilabel.py | dcg_score | myeonghak/kobert-multi-label-VOC-classifier | python | def dcg_score(y_true, y_score, k=10, gains='exponential'):
'Discounted cumulative gain (DCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n gains : str\n Whether gains should be "exponential" (default) or "linear".\n Returns\n -------\n DCG @k : float\n '
order = np.argsort(y_score)[::(- 1)]
y_true = np.take(y_true, order[:k])
if (gains == 'exponential'):
gains = ((2 ** y_true) - 1)
elif (gains == 'linear'):
gains = y_true
else:
raise ValueError('Invalid gains option.')
discounts = np.log2((np.arange(len(y_true)) + 2))
return np.sum((gains / discounts)) |
def ndcg_score(y_true, y_score, k=10, gains='exponential'):
'Normalized discounted cumulative gain (NDCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n gains : str\n Whether gains should be "exponential" (default) or "linear".\n Returns\n -------\n NDCG @k : float\n '
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return (actual / best) | 5,496,232,716,610,578,000 | Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float | voc_classifier/metrics_for_multilabel.py | ndcg_score | myeonghak/kobert-multi-label-VOC-classifier | python | def ndcg_score(y_true, y_score, k=10, gains='exponential'):
'Normalized discounted cumulative gain (NDCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n y_score : array-like, shape = [n_samples]\n Predicted scores.\n k : int\n Rank.\n gains : str\n Whether gains should be "exponential" (default) or "linear".\n Returns\n -------\n NDCG @k : float\n '
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return (actual / best) |
def dcg_from_ranking(y_true, ranking):
'Discounted cumulative gain (DCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n ranking : array-like, shape = [k]\n Document indices, i.e.,\n ranking[0] is the index of top-ranked document,\n ranking[1] is the index of second-ranked document,\n ...\n k : int\n Rank.\n Returns\n -------\n DCG @k : float\n '
y_true = np.asarray(y_true)
ranking = np.asarray(ranking)
rel = y_true[ranking]
gains = ((2 ** rel) - 1)
discounts = np.log2((np.arange(len(ranking)) + 2))
return np.sum((gains / discounts)) | 837,021,588,487,954,000 | Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
DCG @k : float | voc_classifier/metrics_for_multilabel.py | dcg_from_ranking | myeonghak/kobert-multi-label-VOC-classifier | python | def dcg_from_ranking(y_true, ranking):
'Discounted cumulative gain (DCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n ranking : array-like, shape = [k]\n Document indices, i.e.,\n ranking[0] is the index of top-ranked document,\n ranking[1] is the index of second-ranked document,\n ...\n k : int\n Rank.\n Returns\n -------\n DCG @k : float\n '
y_true = np.asarray(y_true)
ranking = np.asarray(ranking)
rel = y_true[ranking]
gains = ((2 ** rel) - 1)
discounts = np.log2((np.arange(len(ranking)) + 2))
return np.sum((gains / discounts)) |
def ndcg_from_ranking(y_true, ranking):
'Normalized discounted cumulative gain (NDCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n ranking : array-like, shape = [k]\n Document indices, i.e.,\n ranking[0] is the index of top-ranked document,\n ranking[1] is the index of second-ranked document,\n ...\n k : int\n Rank.\n Returns\n -------\n NDCG @k : float\n '
k = len(ranking)
best_ranking = np.argsort(y_true)[::(- 1)]
best = dcg_from_ranking(y_true, best_ranking[:k])
return (dcg_from_ranking(y_true, ranking) / best) | 4,945,442,521,592,618,000 | Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
NDCG @k : float | voc_classifier/metrics_for_multilabel.py | ndcg_from_ranking | myeonghak/kobert-multi-label-VOC-classifier | python | def ndcg_from_ranking(y_true, ranking):
'Normalized discounted cumulative gain (NDCG) at rank k\n Parameters\n ----------\n y_true : array-like, shape = [n_samples]\n Ground truth (true relevance labels).\n ranking : array-like, shape = [k]\n Document indices, i.e.,\n ranking[0] is the index of top-ranked document,\n ranking[1] is the index of second-ranked document,\n ...\n k : int\n Rank.\n Returns\n -------\n NDCG @k : float\n '
k = len(ranking)
best_ranking = np.argsort(y_true)[::(- 1)]
best = dcg_from_ranking(y_true, best_ranking[:k])
return (dcg_from_ranking(y_true, ranking) / best) |
def __init__(self):
'Noise, system setting and x0 settings'
super(NarendraLiBenchmark, self).__init__(nx=2) | 3,262,071,650,773,849,600 | Noise, system setting and x0 settings | deepSI/systems/narendra_li_benchmark.py | __init__ | csutakbalazs/deepSI | python | def __init__(self):
super(NarendraLiBenchmark, self).__init__(nx=2) |
@pytest.fixture()
def test_filename(change_to_resources_dir, storage, request) -> Generator[(str, None, None)]:
'Pushes a file to remote storage, yields its filename and then deletes it from remote storage'
filename = request.param
storage.push_file(filename)
(yield filename)
storage.delete(filename) | -5,960,721,702,347,764,000 | Pushes a file to remote storage, yields its filename and then deletes it from remote storage | tests/accsr/test_remote_storage.py | test_filename | AnesBenmerzoug/accsr | python | @pytest.fixture()
def test_filename(change_to_resources_dir, storage, request) -> Generator[(str, None, None)]:
filename = request.param
storage.push_file(filename)
(yield filename)
storage.delete(filename) |
@pytest.fixture()
def setup_name_collision(change_to_resources_dir, storage):
'\n Pushes files and dirs with colliding names to remote storage, yields files pushed\n and deletes everything at cleanup\n '
pushed_objects = storage.push(NAME_COLLISIONS_DIR_NAME)
(yield pushed_objects)
storage.delete(NAME_COLLISIONS_DIR_NAME) | 7,052,228,047,297,320,000 | Pushes files and dirs with colliding names to remote storage, yields files pushed
and deletes everything at cleanup | tests/accsr/test_remote_storage.py | setup_name_collision | AnesBenmerzoug/accsr | python | @pytest.fixture()
def setup_name_collision(change_to_resources_dir, storage):
'\n Pushes files and dirs with colliding names to remote storage, yields files pushed\n and deletes everything at cleanup\n '
pushed_objects = storage.push(NAME_COLLISIONS_DIR_NAME)
(yield pushed_objects)
storage.delete(NAME_COLLISIONS_DIR_NAME) |
@pytest.fixture()
def test_dirname(change_to_resources_dir, storage, request) -> Generator[(str, None, None)]:
'Pushes a directory to remote storage, yields its name and then deletes it from remote storage'
dirname = request.param
storage.push_directory(dirname)
(yield dirname)
storage.delete(dirname) | 5,710,146,440,443,977,000 | Pushes a directory to remote storage, yields its name and then deletes it from remote storage | tests/accsr/test_remote_storage.py | test_dirname | AnesBenmerzoug/accsr | python | @pytest.fixture()
def test_dirname(change_to_resources_dir, storage, request) -> Generator[(str, None, None)]:
dirname = request.param
storage.push_directory(dirname)
(yield dirname)
storage.delete(dirname) |
def read_infile(infile):
'STUB'
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',') | 2,087,596,246,220,939,300 | STUB | scripts/csv_xml.py | read_infile | CRobeck/RAJAPerf | python | def read_infile(infile):
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',') |
def get_date():
'STUB'
date = datetime.now().strftime('%-Y-%m-%dT%H:%M:%S')
return date | -4,645,876,791,713,465,000 | STUB | scripts/csv_xml.py | get_date | CRobeck/RAJAPerf | python | def get_date():
date = datetime.now().strftime('%-Y-%m-%dT%H:%M:%S')
return date |
def associate_timings_with_xml(xml_element, timing_dict, suite_or_test_name):
'STUB -- xml_element will be an element of perf_report;\n timing_dict = a map of variant names to test run times\n '
for (key, value) in timing_dict.items():
xml_element.set(key.lower(), str(value))
xml_element.set('name', suite_or_test_name.strip()) | -7,996,876,121,615,786,000 | STUB -- xml_element will be an element of perf_report;
timing_dict = a map of variant names to test run times | scripts/csv_xml.py | associate_timings_with_xml | CRobeck/RAJAPerf | python | def associate_timings_with_xml(xml_element, timing_dict, suite_or_test_name):
'STUB -- xml_element will be an element of perf_report;\n timing_dict = a map of variant names to test run times\n '
for (key, value) in timing_dict.items():
xml_element.set(key.lower(), str(value))
xml_element.set('name', suite_or_test_name.strip()) |
def create_RPS_xml_report(suite_name, suite_data_list):
'STUB - suite_name is a string = Basic, KokkosMechanics, etc.;\n suite_data_list will be the values for a key, Basic or KokkosMechanics\n '
aggregate_results_dict = dict()
for list_item in suite_data_list:
for (index, timing) in enumerate(list_item[1:]):
if ('Not run' in timing):
continue
variant_name = col_meanings_dict[(index + 1)]
if (variant_name not in aggregate_results_dict):
aggregate_results_dict[variant_name] = 0.0
aggregate_results_dict[variant_name] += float(timing)
suite_root = ET.SubElement(perf_root, 'timing')
associate_timings_with_xml(suite_root, aggregate_results_dict, suite_name)
for list_item in suite_data_list:
test_timings_dict = dict()
for (index, timing) in enumerate(list_item[1:]):
if ('Not run' in timing):
continue
variant_name = col_meanings_dict[(index + 1)]
test_timings_dict[variant_name] = float(timing)
xml_element_for_a_kernel_test = ET.SubElement(suite_root, 'timing')
associate_timings_with_xml(xml_element_for_a_kernel_test, test_timings_dict, list_item[0]) | -2,429,547,739,994,376,700 | STUB - suite_name is a string = Basic, KokkosMechanics, etc.;
suite_data_list will be the values for a key, Basic or KokkosMechanics | scripts/csv_xml.py | create_RPS_xml_report | CRobeck/RAJAPerf | python | def create_RPS_xml_report(suite_name, suite_data_list):
'STUB - suite_name is a string = Basic, KokkosMechanics, etc.;\n suite_data_list will be the values for a key, Basic or KokkosMechanics\n '
aggregate_results_dict = dict()
for list_item in suite_data_list:
for (index, timing) in enumerate(list_item[1:]):
if ('Not run' in timing):
continue
variant_name = col_meanings_dict[(index + 1)]
if (variant_name not in aggregate_results_dict):
aggregate_results_dict[variant_name] = 0.0
aggregate_results_dict[variant_name] += float(timing)
suite_root = ET.SubElement(perf_root, 'timing')
associate_timings_with_xml(suite_root, aggregate_results_dict, suite_name)
for list_item in suite_data_list:
test_timings_dict = dict()
for (index, timing) in enumerate(list_item[1:]):
if ('Not run' in timing):
continue
variant_name = col_meanings_dict[(index + 1)]
test_timings_dict[variant_name] = float(timing)
xml_element_for_a_kernel_test = ET.SubElement(suite_root, 'timing')
associate_timings_with_xml(xml_element_for_a_kernel_test, test_timings_dict, list_item[0]) |
def run():
'STUB'
read_infile(infile)
for key in heirarch_dict.keys():
create_RPS_xml_report(key, heirarch_dict[key])
ET.dump(perf_report) | 873,157,699,713,065,100 | STUB | scripts/csv_xml.py | run | CRobeck/RAJAPerf | python | def run():
read_infile(infile)
for key in heirarch_dict.keys():
create_RPS_xml_report(key, heirarch_dict[key])
ET.dump(perf_report) |
def choose_device(self, window, keystore):
'This dialog box should be usable even if the user has\n forgotten their PIN or it is in bootloader mode.'
device_id = self.device_manager().xpub_id(keystore.xpub)
if (not device_id):
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id | -8,359,277,091,128,003,000 | This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode. | qtum_electrum/plugins/hw_wallet/qt.py | choose_device | mikehash/qtum-electrum | python | def choose_device(self, window, keystore):
'This dialog box should be usable even if the user has\n forgotten their PIN or it is in bootloader mode.'
device_id = self.device_manager().xpub_id(keystore.xpub)
if (not device_id):
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id |
def executeJob(sc=None, app: PyCryptoBot=None, state: AppState=None, trading_data=pd.DataFrame()):
'Trading bot job which runs at a scheduled interval'
global technical_analysis
if (app.isLive() and (app.getTime() is None)):
Logger.warning('Your connection to the exchange has gone down, will retry in 1 minute!')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
return
state.iterations = (state.iterations + 1)
if (not app.isSimulation()):
trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())
elif (len(trading_data) == 0):
return None
if (app.isSimulation() and (len(trading_data.columns) > 8)):
df = trading_data
else:
trading_dataCopy = trading_data.copy()
technical_analysis = TechnicalAnalysis(trading_dataCopy)
technical_analysis.addAll()
df = technical_analysis.getDataFrame()
if app.isSimulation():
df_last = app.getInterval(df, state.iterations)
else:
df_last = app.getInterval(df)
if (len(df_last.index.format()) > 0):
current_df_index = str(df_last.index.format()[0])
else:
current_df_index = state.last_df_index
formatted_current_df_index = (f'{current_df_index} 00:00:00' if (len(current_df_index) == 10) else current_df_index)
if ((app.getSmartSwitch() == 1) and (app.getGranularity() == 3600) and (app.is1hEMA1226Bull() is True) and (app.is6hEMA1226Bull() is True)):
Logger.info('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')
app.notifyTelegram((app.getMarket() + ' smart switch from granularity 3600 (1 hour) to 900 (15 min)'))
app.setGranularity(900)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if ((app.getSmartSwitch() == 1) and (app.getGranularity() == 900) and (app.is1hEMA1226Bull() is False) and (app.is6hEMA1226Bull() is False)):
Logger.info('*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***')
app.notifyTelegram((app.getMarket() + ' smart switch from granularity 900 (15 min) to 3600 (1 hour)'))
app.setGranularity(3600)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if ((app.getExchange() == 'binance') and (app.getGranularity() == 86400)):
if (len(df) < 250):
Logger.error((('error: data frame length is < 250 (' + str(len(df))) + ')'))
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
elif (len(df) < 300):
if (not app.isSimulation()):
Logger.error((('error: data frame length is < 300 (' + str(len(df))) + ')'))
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
if (len(df_last) > 0):
now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
if (not app.isSimulation()):
ticker = app.getTicker(app.getMarket())
now = ticker[0]
price = ticker[1]
if ((price < df_last['low'].values[0]) or (price == 0)):
price = float(df_last['close'].values[0])
else:
price = float(df_last['close'].values[0])
if (price < 0.0001):
raise Exception((app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!'))
ema12gtema26 = bool(df_last['ema12gtema26'].values[0])
ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])
goldencross = bool(df_last['goldencross'].values[0])
macdgtsignal = bool(df_last['macdgtsignal'].values[0])
macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])
ema12ltema26 = bool(df_last['ema12ltema26'].values[0])
ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])
macdltsignal = bool(df_last['macdltsignal'].values[0])
macdltsignalco = bool(df_last['macdltsignalco'].values[0])
obv = float(df_last['obv'].values[0])
obv_pc = float(df_last['obv_pc'].values[0])
elder_ray_buy = bool(df_last['eri_buy'].values[0])
elder_ray_sell = bool(df_last['eri_sell'].values[0])
if (app.isSimulation() and (state.iterations < 200)):
goldencross = True
hammer = bool(df_last['hammer'].values[0])
inverted_hammer = bool(df_last['inverted_hammer'].values[0])
hanging_man = bool(df_last['hanging_man'].values[0])
shooting_star = bool(df_last['shooting_star'].values[0])
three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])
three_black_crows = bool(df_last['three_black_crows'].values[0])
morning_star = bool(df_last['morning_star'].values[0])
evening_star = bool(df_last['evening_star'].values[0])
three_line_strike = bool(df_last['three_line_strike'].values[0])
abandoned_baby = bool(df_last['abandoned_baby'].values[0])
morning_doji_star = bool(df_last['morning_doji_star'].values[0])
evening_doji_star = bool(df_last['evening_doji_star'].values[0])
two_black_gapping = bool(df_last['two_black_gapping'].values[0])
strategy = Strategy(app, state, df, state.iterations)
state.action = strategy.getAction()
immediate_action = False
(margin, profit, sell_fee) = (0, 0, 0)
if ((state.last_buy_size > 0) and (state.last_buy_price > 0) and (price > 0) and (state.last_action == 'BUY')):
if (price > state.last_buy_high):
state.last_buy_high = price
if (state.last_buy_high > 0):
change_pcnt_high = (((price / state.last_buy_high) - 1) * 100)
else:
change_pcnt_high = 0
state.last_buy_fee = round((state.last_buy_size * app.getTakerFee()), 8)
state.last_buy_filled = round(((state.last_buy_size - state.last_buy_fee) / state.last_buy_price), 8)
if (not app.isSimulation()):
exchange_last_buy = app.getLastBuy()
if (exchange_last_buy is not None):
if (state.last_buy_size != exchange_last_buy['size']):
state.last_buy_size = exchange_last_buy['size']
if (state.last_buy_filled != exchange_last_buy['filled']):
state.last_buy_filled = exchange_last_buy['filled']
if (state.last_buy_price != exchange_last_buy['price']):
state.last_buy_price = exchange_last_buy['price']
if (app.getExchange() == 'coinbasepro'):
if (state.last_buy_fee != exchange_last_buy['fee']):
state.last_buy_fee = exchange_last_buy['fee']
(margin, profit, sell_fee) = calculate_margin(buy_size=state.last_buy_size, buy_filled=state.last_buy_filled, buy_price=state.last_buy_price, buy_fee=state.last_buy_fee, sell_percent=app.getSellPercent(), sell_price=price, sell_taker_fee=app.getTakerFee())
if strategy.isSellTrigger(price, technical_analysis.getTradeExit(price), margin, change_pcnt_high, obv_pc, macdltsignal):
state.action = 'SELL'
state.last_action = 'BUY'
immediate_action = True
if strategy.isWaitTrigger(margin):
state.action = 'WAIT'
state.last_action = 'BUY'
immediate_action = False
bullbeartext = ''
if ((app.disableBullOnly() is True) or (df_last['sma50'].values[0] == df_last['sma200'].values[0])):
bullbeartext = ''
elif (goldencross is True):
bullbeartext = ' (BULL)'
elif (goldencross is False):
bullbeartext = ' (BEAR)'
if ((immediate_action is True) or (state.last_df_index != current_df_index)):
precision = 4
if (price < 0.01):
precision = 8
truncate = functools.partial(_truncate, n=precision)
price_text = ('Close: ' + truncate(price))
ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)
macd_text = ''
if (app.disableBuyMACD() is False):
macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)
obv_text = ''
if (app.disableBuyOBV() is False):
obv_text = (((('OBV: ' + truncate(df_last['obv'].values[0])) + ' (') + str(truncate(df_last['obv_pc'].values[0]))) + '%)')
state.eri_text = ''
if (app.disableBuyElderRay() is False):
if (elder_ray_buy is True):
state.eri_text = 'ERI: buy | '
elif (elder_ray_sell is True):
state.eri_text = 'ERI: sell | '
else:
state.eri_text = 'ERI: | '
if (hammer is True):
log_text = '* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up")'
Logger.info(log_text)
if (shooting_star is True):
log_text = '* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
if (hanging_man is True):
log_text = '* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")'
Logger.info(log_text)
if (inverted_hammer is True):
log_text = '* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")'
Logger.info(log_text)
if (three_white_soldiers is True):
log_text = '*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (three_black_crows is True):
log_text = '* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (morning_star is True):
log_text = '*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (evening_star is True):
log_text = '*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (three_line_strike is True):
log_text = '** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (abandoned_baby is True):
log_text = '** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (morning_doji_star is True):
log_text = '** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (evening_doji_star is True):
log_text = '** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (two_black_gapping is True):
log_text = '*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
ema_co_prefix = ''
ema_co_suffix = ''
if (ema12gtema26co is True):
ema_co_prefix = '*^ '
ema_co_suffix = ' ^*'
elif (ema12ltema26co is True):
ema_co_prefix = '*v '
ema_co_suffix = ' v*'
elif (ema12gtema26 is True):
ema_co_prefix = '^ '
ema_co_suffix = ' ^'
elif (ema12ltema26 is True):
ema_co_prefix = 'v '
ema_co_suffix = ' v'
macd_co_prefix = ''
macd_co_suffix = ''
if (app.disableBuyMACD() is False):
if (macdgtsignalco is True):
macd_co_prefix = '*^ '
macd_co_suffix = ' ^*'
elif (macdltsignalco is True):
macd_co_prefix = '*v '
macd_co_suffix = ' v*'
elif (macdgtsignal is True):
macd_co_prefix = '^ '
macd_co_suffix = ' ^'
elif (macdltsignal is True):
macd_co_prefix = 'v '
macd_co_suffix = ' v'
obv_prefix = ''
obv_suffix = ''
if (app.disableBuyOBV() is False):
if (float(obv_pc) > 0):
obv_prefix = '^ '
obv_suffix = ' ^ | '
elif (float(obv_pc) < 0):
obv_prefix = 'v '
obv_suffix = ' v | '
if (not app.isVerbose()):
if (state.last_action != ''):
output_text = (((((((((((((((((((((((formatted_current_df_index + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | ') + ema_co_prefix) + ema_text) + ema_co_suffix) + ' | ') + macd_co_prefix) + macd_text) + macd_co_suffix) + obv_prefix) + obv_text) + obv_suffix) + state.eri_text) + ' | ') + state.action) + ' | Last Action: ') + state.last_action)
else:
output_text = ((((((((((((((((((((((formatted_current_df_index + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | ') + ema_co_prefix) + ema_text) + ema_co_suffix) + ' | ') + macd_co_prefix) + macd_text) + macd_co_suffix) + obv_prefix) + obv_text) + obv_suffix) + state.eri_text) + ' | ') + state.action) + ' ')
if (state.last_action == 'BUY'):
if (state.last_buy_size > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
output_text += ((((' | ' + margin_text) + ' (delta: ') + str(round((price - state.last_buy_price), precision))) + ')')
Logger.info(output_text)
if (not app.isSimulation()):
try:
prediction = technical_analysis.seasonalARIMAModelPrediction((int((app.getGranularity() / 60)) * 3))
Logger.info(f'Seasonal ARIMA model predicts the closing price will be {str(round(prediction[1], 2))} at {prediction[0]} (delta: {round((prediction[1] - price), 2)})')
except:
pass
if (state.last_action == 'BUY'):
Logger.info(technical_analysis.printSupportResistanceFibonacciLevels(price))
else:
Logger.debug(((('-- Iteration: ' + str(state.iterations)) + ' --') + bullbeartext))
if (state.last_action == 'BUY'):
if (state.last_buy_size > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
Logger.debug((('-- Margin: ' + margin_text) + ' --'))
Logger.debug(('price: ' + truncate(price)))
Logger.debug(('ema12: ' + truncate(float(df_last['ema12'].values[0]))))
Logger.debug(('ema26: ' + truncate(float(df_last['ema26'].values[0]))))
Logger.debug(('ema12gtema26co: ' + str(ema12gtema26co)))
Logger.debug(('ema12gtema26: ' + str(ema12gtema26)))
Logger.debug(('ema12ltema26co: ' + str(ema12ltema26co)))
Logger.debug(('ema12ltema26: ' + str(ema12ltema26)))
Logger.debug(('sma50: ' + truncate(float(df_last['sma50'].values[0]))))
Logger.debug(('sma200: ' + truncate(float(df_last['sma200'].values[0]))))
Logger.debug(('macd: ' + truncate(float(df_last['macd'].values[0]))))
Logger.debug(('signal: ' + truncate(float(df_last['signal'].values[0]))))
Logger.debug(('macdgtsignal: ' + str(macdgtsignal)))
Logger.debug(('macdltsignal: ' + str(macdltsignal)))
Logger.debug(('obv: ' + str(obv)))
Logger.debug(('obv_pc: ' + str(obv_pc)))
Logger.debug(('action: ' + state.action))
Logger.info('')
Logger.info('================================================================================')
txt = ((' Iteration : ' + str(state.iterations)) + bullbeartext)
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Timestamp : ' + str(df_last.index.format()[0]))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('--------------------------------------------------------------------------------')
txt = (' Close : ' + truncate(price))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' EMA12 : ' + truncate(float(df_last['ema12'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' EMA26 : ' + truncate(float(df_last['ema26'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Crossing Above : ' + str(ema12gtema26co))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Above : ' + str(ema12gtema26))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Crossing Below : ' + str(ema12ltema26co))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Below : ' + str(ema12ltema26))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
if ((ema12gtema26 is True) and (ema12gtema26co is True)):
txt = ' Condition : EMA12 is currently crossing above EMA26'
elif ((ema12gtema26 is True) and (ema12gtema26co is False)):
txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'
elif ((ema12ltema26 is True) and (ema12ltema26co is True)):
txt = ' Condition : EMA12 is currently crossing below EMA26'
elif ((ema12ltema26 is True) and (ema12ltema26co is False)):
txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'
else:
txt = ' Condition : -'
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' SMA20 : ' + truncate(float(df_last['sma20'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' SMA200 : ' + truncate(float(df_last['sma200'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('--------------------------------------------------------------------------------')
txt = (' MACD : ' + truncate(float(df_last['macd'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Signal : ' + truncate(float(df_last['signal'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Above : ' + str(macdgtsignal))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Below : ' + str(macdltsignal))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
if ((macdgtsignal is True) and (macdgtsignalco is True)):
txt = ' Condition : MACD is currently crossing above Signal'
elif ((macdgtsignal is True) and (macdgtsignalco is False)):
txt = ' Condition : MACD is currently above Signal and has crossed over'
elif ((macdltsignal is True) and (macdltsignalco is True)):
txt = ' Condition : MACD is currently crossing below Signal'
elif ((macdltsignal is True) and (macdltsignalco is False)):
txt = ' Condition : MACD is currently below Signal and has crossed over'
else:
txt = ' Condition : -'
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('--------------------------------------------------------------------------------')
txt = (' Action : ' + state.action)
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('================================================================================')
if (state.last_action == 'BUY'):
txt = (' Margin : ' + margin_text)
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('================================================================================')
if (state.action == 'BUY'):
state.last_buy_price = price
state.last_buy_high = state.last_buy_price
if app.isLive():
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') BUY at ') + price_text))
if (not app.isVerbose()):
Logger.info((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | BUY'))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
Logger.info(((app.getBaseCurrency() + ' balance before order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance before order: ') + str(account.getBalance(app.getQuoteCurrency()))))
state.last_buy_size = float(account.getBalance(app.getQuoteCurrency()))
if (app.getBuyMaxSize() and (state.last_buy_size > app.getBuyMaxSize())):
state.last_buy_size = app.getBuyMaxSize()
resp = app.marketBuy(app.getMarket(), state.last_buy_size, app.getBuyPercent())
Logger.debug(resp)
Logger.info(((app.getBaseCurrency() + ' balance after order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance after order: ') + str(account.getBalance(app.getQuoteCurrency()))))
else:
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') TEST BUY at ') + price_text))
if ((state.last_buy_size == 0) and (state.last_buy_filled == 0)):
state.last_buy_size = 1000
state.first_buy_size = 1000
state.buy_count = (state.buy_count + 1)
state.buy_sum = (state.buy_sum + state.last_buy_size)
if (not app.isVerbose()):
Logger.info((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | BUY'))
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info((' Fibonacci Retracement Levels:' + str(bands)))
technical_analysis.printSupportResistanceLevel(float(price))
if ((len(bands) >= 1) and (len(bands) <= 2)):
if (len(bands) == 1):
first_key = list(bands.keys())[0]
if (first_key == 'ratio1'):
state.fib_low = 0
state.fib_high = bands[first_key]
if (first_key == 'ratio1_618'):
state.fib_low = bands[first_key]
state.fib_high = (bands[first_key] * 2)
else:
state.fib_low = bands[first_key]
elif (len(bands) == 2):
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = (((((app.getMarket() + '_') + app.printGranularity()) + '_buy_') + str(ts)) + '.png')
tradinggraphs.renderEMAandMACD(len(trading_data), ('graphs/' + filename), True)
elif (state.action == 'SELL'):
if app.isLive():
app.notifyTelegram((((((((((app.getMarket() + ' (') + app.printGranularity()) + ') SELL at ') + price_text) + ' (margin: ') + margin_text) + ', (delta: ') + str(round((price - state.last_buy_price), precision))) + ')'))
if (not app.isVerbose()):
Logger.info((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | SELL'))
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info((' Fibonacci Retracement Levels:' + str(bands)))
if ((len(bands) >= 1) and (len(bands) <= 2)):
if (len(bands) == 1):
first_key = list(bands.keys())[0]
if (first_key == 'ratio1'):
state.fib_low = 0
state.fib_high = bands[first_key]
if (first_key == 'ratio1_618'):
state.fib_low = bands[first_key]
state.fib_high = (bands[first_key] * 2)
else:
state.fib_low = bands[first_key]
elif (len(bands) == 2):
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
Logger.info(((app.getBaseCurrency() + ' balance before order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance before order: ') + str(account.getBalance(app.getQuoteCurrency()))))
resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())), app.getSellPercent())
Logger.debug(resp)
Logger.info(((app.getBaseCurrency() + ' balance after order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance after order: ') + str(account.getBalance(app.getQuoteCurrency()))))
else:
(margin, profit, sell_fee) = calculate_margin(buy_size=state.last_buy_size, buy_filled=state.last_buy_filled, buy_price=state.last_buy_price, buy_fee=state.last_buy_fee, sell_percent=app.getSellPercent(), sell_price=price, sell_taker_fee=app.getTakerFee())
if (state.last_buy_size > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
app.notifyTelegram((((((((((app.getMarket() + ' (') + app.printGranularity()) + ') TEST SELL at ') + price_text) + ' (margin: ') + margin_text) + ', (delta: ') + str(round((price - state.last_buy_price), precision))) + ')'))
state.sell_count = (state.sell_count + 1)
buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = (buy_size - sell_fee)
state.sell_sum = (state.sell_sum + state.last_buy_size)
if (not app.isVerbose()):
if (price > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
Logger.info(((((((((((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | SELL | ') + str(price)) + ' | BUY | ') + str(state.last_buy_price)) + ' | DIFF | ') + str((price - state.last_buy_price))) + ' | DIFF | ') + str(profit)) + ' | MARGIN NO FEES | ') + margin_text) + ' | MARGIN FEES | ') + str(round(sell_fee, precision))))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = (((((app.getMarket() + '_') + app.printGranularity()) + '_sell_') + str(ts)) + '.png')
tradinggraphs.renderEMAandMACD(len(trading_data), ('graphs/' + filename), True)
if (state.action in ['BUY', 'SELL']):
state.last_action = state.action
state.last_df_index = str(df_last.index.format()[0])
if ((not app.isLive()) and (state.iterations == len(df))):
Logger.info('\nSimulation Summary: ')
if ((state.buy_count > state.sell_count) and app.allowSellAtLoss()):
state.last_buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = (state.last_buy_size - (state.last_buy_price * app.getTakerFee()))
state.sell_sum = (state.sell_sum + state.last_buy_size)
state.sell_count = (state.sell_count + 1)
elif ((state.buy_count > state.sell_count) and (not app.allowSellAtLoss())):
Logger.info('\n')
Logger.info(' Note : "sell at loss" is disabled and you have an open trade, if the margin')
Logger.info(' result below is negative it will assume you sold at the end of the')
Logger.info(' simulation which may not be ideal. Try setting --sellatloss 1')
Logger.info('\n')
Logger.info((' Buy Count : ' + str(state.buy_count)))
Logger.info((' Sell Count : ' + str(state.sell_count)))
Logger.info((' First Buy : ' + str(state.first_buy_size)))
Logger.info((' Last Sell : ' + str(state.last_buy_size)))
app.notifyTelegram(f'''Simulation Summary
Buy Count: {state.buy_count}
Sell Count: {state.sell_count}
First Buy: {state.first_buy_size}
Last Sell: {state.last_buy_size}
''')
if (state.sell_count > 0):
Logger.info('\n')
Logger.info(((' Margin : ' + _truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)) + '%'))
Logger.info('\n')
Logger.info(' ** non-live simulation, assuming highest fees')
app.notifyTelegram(f''' Margin: {_truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)}%
** non-live simulation, assuming highest fees
''')
else:
if ((state.last_buy_size > 0) and (state.last_buy_price > 0) and (price > 0) and (state.last_action == 'BUY')):
Logger.info((((((((((((now + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | Current Price: ') + str(price)) + ' | Margin: ') + str(margin)) + ' | Profit: ') + str(profit)))
else:
Logger.info((((((((now + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | Current Price: ') + str(price)))
state.iterations = (state.iterations - 1)
if ((not app.disableTracker()) and app.isLive()):
if (app.getExchange() == 'binance'):
account.saveTrackerCSV(app.getMarket())
elif (app.getExchange() == 'coinbasepro'):
account.saveTrackerCSV()
if app.isSimulation():
if (state.iterations < 300):
if (app.simuluationSpeed() in ['fast', 'fast-sample']):
list(map(s.cancel, s.queue))
s.enter(0, 1, executeJob, (sc, app, state, df))
else:
list(map(s.cancel, s.queue))
s.enter(1, 1, executeJob, (sc, app, state, df))
else:
list(map(s.cancel, s.queue))
s.enter(60, 1, executeJob, (sc, app, state)) | 1,999,010,670,596,664,600 | Trading bot job which runs at a scheduled interval | pycryptobot.py | executeJob | treggit/pycryptobot | python | def executeJob(sc=None, app: PyCryptoBot=None, state: AppState=None, trading_data=pd.DataFrame()):
global technical_analysis
if (app.isLive() and (app.getTime() is None)):
Logger.warning('Your connection to the exchange has gone down, will retry in 1 minute!')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
return
state.iterations = (state.iterations + 1)
if (not app.isSimulation()):
trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())
elif (len(trading_data) == 0):
return None
if (app.isSimulation() and (len(trading_data.columns) > 8)):
df = trading_data
else:
trading_dataCopy = trading_data.copy()
technical_analysis = TechnicalAnalysis(trading_dataCopy)
technical_analysis.addAll()
df = technical_analysis.getDataFrame()
if app.isSimulation():
df_last = app.getInterval(df, state.iterations)
else:
df_last = app.getInterval(df)
if (len(df_last.index.format()) > 0):
current_df_index = str(df_last.index.format()[0])
else:
current_df_index = state.last_df_index
formatted_current_df_index = (f'{current_df_index} 00:00:00' if (len(current_df_index) == 10) else current_df_index)
if ((app.getSmartSwitch() == 1) and (app.getGranularity() == 3600) and (app.is1hEMA1226Bull() is True) and (app.is6hEMA1226Bull() is True)):
Logger.info('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')
app.notifyTelegram((app.getMarket() + ' smart switch from granularity 3600 (1 hour) to 900 (15 min)'))
app.setGranularity(900)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if ((app.getSmartSwitch() == 1) and (app.getGranularity() == 900) and (app.is1hEMA1226Bull() is False) and (app.is6hEMA1226Bull() is False)):
Logger.info('*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***')
app.notifyTelegram((app.getMarket() + ' smart switch from granularity 900 (15 min) to 3600 (1 hour)'))
app.setGranularity(3600)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if ((app.getExchange() == 'binance') and (app.getGranularity() == 86400)):
if (len(df) < 250):
Logger.error((('error: data frame length is < 250 (' + str(len(df))) + ')'))
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
elif (len(df) < 300):
if (not app.isSimulation()):
Logger.error((('error: data frame length is < 300 (' + str(len(df))) + ')'))
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
if (len(df_last) > 0):
now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
if (not app.isSimulation()):
ticker = app.getTicker(app.getMarket())
now = ticker[0]
price = ticker[1]
if ((price < df_last['low'].values[0]) or (price == 0)):
price = float(df_last['close'].values[0])
else:
price = float(df_last['close'].values[0])
if (price < 0.0001):
raise Exception((app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!'))
ema12gtema26 = bool(df_last['ema12gtema26'].values[0])
ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])
goldencross = bool(df_last['goldencross'].values[0])
macdgtsignal = bool(df_last['macdgtsignal'].values[0])
macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])
ema12ltema26 = bool(df_last['ema12ltema26'].values[0])
ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])
macdltsignal = bool(df_last['macdltsignal'].values[0])
macdltsignalco = bool(df_last['macdltsignalco'].values[0])
obv = float(df_last['obv'].values[0])
obv_pc = float(df_last['obv_pc'].values[0])
elder_ray_buy = bool(df_last['eri_buy'].values[0])
elder_ray_sell = bool(df_last['eri_sell'].values[0])
if (app.isSimulation() and (state.iterations < 200)):
goldencross = True
hammer = bool(df_last['hammer'].values[0])
inverted_hammer = bool(df_last['inverted_hammer'].values[0])
hanging_man = bool(df_last['hanging_man'].values[0])
shooting_star = bool(df_last['shooting_star'].values[0])
three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])
three_black_crows = bool(df_last['three_black_crows'].values[0])
morning_star = bool(df_last['morning_star'].values[0])
evening_star = bool(df_last['evening_star'].values[0])
three_line_strike = bool(df_last['three_line_strike'].values[0])
abandoned_baby = bool(df_last['abandoned_baby'].values[0])
morning_doji_star = bool(df_last['morning_doji_star'].values[0])
evening_doji_star = bool(df_last['evening_doji_star'].values[0])
two_black_gapping = bool(df_last['two_black_gapping'].values[0])
strategy = Strategy(app, state, df, state.iterations)
state.action = strategy.getAction()
immediate_action = False
(margin, profit, sell_fee) = (0, 0, 0)
if ((state.last_buy_size > 0) and (state.last_buy_price > 0) and (price > 0) and (state.last_action == 'BUY')):
if (price > state.last_buy_high):
state.last_buy_high = price
if (state.last_buy_high > 0):
change_pcnt_high = (((price / state.last_buy_high) - 1) * 100)
else:
change_pcnt_high = 0
state.last_buy_fee = round((state.last_buy_size * app.getTakerFee()), 8)
state.last_buy_filled = round(((state.last_buy_size - state.last_buy_fee) / state.last_buy_price), 8)
if (not app.isSimulation()):
exchange_last_buy = app.getLastBuy()
if (exchange_last_buy is not None):
if (state.last_buy_size != exchange_last_buy['size']):
state.last_buy_size = exchange_last_buy['size']
if (state.last_buy_filled != exchange_last_buy['filled']):
state.last_buy_filled = exchange_last_buy['filled']
if (state.last_buy_price != exchange_last_buy['price']):
state.last_buy_price = exchange_last_buy['price']
if (app.getExchange() == 'coinbasepro'):
if (state.last_buy_fee != exchange_last_buy['fee']):
state.last_buy_fee = exchange_last_buy['fee']
(margin, profit, sell_fee) = calculate_margin(buy_size=state.last_buy_size, buy_filled=state.last_buy_filled, buy_price=state.last_buy_price, buy_fee=state.last_buy_fee, sell_percent=app.getSellPercent(), sell_price=price, sell_taker_fee=app.getTakerFee())
if strategy.isSellTrigger(price, technical_analysis.getTradeExit(price), margin, change_pcnt_high, obv_pc, macdltsignal):
state.action = 'SELL'
state.last_action = 'BUY'
immediate_action = True
if strategy.isWaitTrigger(margin):
state.action = 'WAIT'
state.last_action = 'BUY'
immediate_action = False
bullbeartext =
if ((app.disableBullOnly() is True) or (df_last['sma50'].values[0] == df_last['sma200'].values[0])):
bullbeartext =
elif (goldencross is True):
bullbeartext = ' (BULL)'
elif (goldencross is False):
bullbeartext = ' (BEAR)'
if ((immediate_action is True) or (state.last_df_index != current_df_index)):
precision = 4
if (price < 0.01):
precision = 8
truncate = functools.partial(_truncate, n=precision)
price_text = ('Close: ' + truncate(price))
ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)
macd_text =
if (app.disableBuyMACD() is False):
macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)
obv_text =
if (app.disableBuyOBV() is False):
obv_text = (((('OBV: ' + truncate(df_last['obv'].values[0])) + ' (') + str(truncate(df_last['obv_pc'].values[0]))) + '%)')
state.eri_text =
if (app.disableBuyElderRay() is False):
if (elder_ray_buy is True):
state.eri_text = 'ERI: buy | '
elif (elder_ray_sell is True):
state.eri_text = 'ERI: sell | '
else:
state.eri_text = 'ERI: | '
if (hammer is True):
log_text = '* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up")'
Logger.info(log_text)
if (shooting_star is True):
log_text = '* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
if (hanging_man is True):
log_text = '* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")'
Logger.info(log_text)
if (inverted_hammer is True):
log_text = '* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")'
Logger.info(log_text)
if (three_white_soldiers is True):
log_text = '*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (three_black_crows is True):
log_text = '* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (morning_star is True):
log_text = '*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (evening_star is True):
log_text = '*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (three_line_strike is True):
log_text = '** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (abandoned_baby is True):
log_text = '** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (morning_doji_star is True):
log_text = '** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (evening_doji_star is True):
log_text = '** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
if (two_black_gapping is True):
log_text = '*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') ') + log_text))
ema_co_prefix =
ema_co_suffix =
if (ema12gtema26co is True):
ema_co_prefix = '*^ '
ema_co_suffix = ' ^*'
elif (ema12ltema26co is True):
ema_co_prefix = '*v '
ema_co_suffix = ' v*'
elif (ema12gtema26 is True):
ema_co_prefix = '^ '
ema_co_suffix = ' ^'
elif (ema12ltema26 is True):
ema_co_prefix = 'v '
ema_co_suffix = ' v'
macd_co_prefix =
macd_co_suffix =
if (app.disableBuyMACD() is False):
if (macdgtsignalco is True):
macd_co_prefix = '*^ '
macd_co_suffix = ' ^*'
elif (macdltsignalco is True):
macd_co_prefix = '*v '
macd_co_suffix = ' v*'
elif (macdgtsignal is True):
macd_co_prefix = '^ '
macd_co_suffix = ' ^'
elif (macdltsignal is True):
macd_co_prefix = 'v '
macd_co_suffix = ' v'
obv_prefix =
obv_suffix =
if (app.disableBuyOBV() is False):
if (float(obv_pc) > 0):
obv_prefix = '^ '
obv_suffix = ' ^ | '
elif (float(obv_pc) < 0):
obv_prefix = 'v '
obv_suffix = ' v | '
if (not app.isVerbose()):
if (state.last_action != ):
output_text = (((((((((((((((((((((((formatted_current_df_index + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | ') + ema_co_prefix) + ema_text) + ema_co_suffix) + ' | ') + macd_co_prefix) + macd_text) + macd_co_suffix) + obv_prefix) + obv_text) + obv_suffix) + state.eri_text) + ' | ') + state.action) + ' | Last Action: ') + state.last_action)
else:
output_text = ((((((((((((((((((((((formatted_current_df_index + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | ') + ema_co_prefix) + ema_text) + ema_co_suffix) + ' | ') + macd_co_prefix) + macd_text) + macd_co_suffix) + obv_prefix) + obv_text) + obv_suffix) + state.eri_text) + ' | ') + state.action) + ' ')
if (state.last_action == 'BUY'):
if (state.last_buy_size > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
output_text += ((((' | ' + margin_text) + ' (delta: ') + str(round((price - state.last_buy_price), precision))) + ')')
Logger.info(output_text)
if (not app.isSimulation()):
try:
prediction = technical_analysis.seasonalARIMAModelPrediction((int((app.getGranularity() / 60)) * 3))
Logger.info(f'Seasonal ARIMA model predicts the closing price will be {str(round(prediction[1], 2))} at {prediction[0]} (delta: {round((prediction[1] - price), 2)})')
except:
pass
if (state.last_action == 'BUY'):
Logger.info(technical_analysis.printSupportResistanceFibonacciLevels(price))
else:
Logger.debug(((('-- Iteration: ' + str(state.iterations)) + ' --') + bullbeartext))
if (state.last_action == 'BUY'):
if (state.last_buy_size > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
Logger.debug((('-- Margin: ' + margin_text) + ' --'))
Logger.debug(('price: ' + truncate(price)))
Logger.debug(('ema12: ' + truncate(float(df_last['ema12'].values[0]))))
Logger.debug(('ema26: ' + truncate(float(df_last['ema26'].values[0]))))
Logger.debug(('ema12gtema26co: ' + str(ema12gtema26co)))
Logger.debug(('ema12gtema26: ' + str(ema12gtema26)))
Logger.debug(('ema12ltema26co: ' + str(ema12ltema26co)))
Logger.debug(('ema12ltema26: ' + str(ema12ltema26)))
Logger.debug(('sma50: ' + truncate(float(df_last['sma50'].values[0]))))
Logger.debug(('sma200: ' + truncate(float(df_last['sma200'].values[0]))))
Logger.debug(('macd: ' + truncate(float(df_last['macd'].values[0]))))
Logger.debug(('signal: ' + truncate(float(df_last['signal'].values[0]))))
Logger.debug(('macdgtsignal: ' + str(macdgtsignal)))
Logger.debug(('macdltsignal: ' + str(macdltsignal)))
Logger.debug(('obv: ' + str(obv)))
Logger.debug(('obv_pc: ' + str(obv_pc)))
Logger.debug(('action: ' + state.action))
Logger.info()
Logger.info('================================================================================')
txt = ((' Iteration : ' + str(state.iterations)) + bullbeartext)
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Timestamp : ' + str(df_last.index.format()[0]))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('--------------------------------------------------------------------------------')
txt = (' Close : ' + truncate(price))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' EMA12 : ' + truncate(float(df_last['ema12'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' EMA26 : ' + truncate(float(df_last['ema26'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Crossing Above : ' + str(ema12gtema26co))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Above : ' + str(ema12gtema26))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Crossing Below : ' + str(ema12ltema26co))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Below : ' + str(ema12ltema26))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
if ((ema12gtema26 is True) and (ema12gtema26co is True)):
txt = ' Condition : EMA12 is currently crossing above EMA26'
elif ((ema12gtema26 is True) and (ema12gtema26co is False)):
txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'
elif ((ema12ltema26 is True) and (ema12ltema26co is True)):
txt = ' Condition : EMA12 is currently crossing below EMA26'
elif ((ema12ltema26 is True) and (ema12ltema26co is False)):
txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'
else:
txt = ' Condition : -'
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' SMA20 : ' + truncate(float(df_last['sma20'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' SMA200 : ' + truncate(float(df_last['sma200'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('--------------------------------------------------------------------------------')
txt = (' MACD : ' + truncate(float(df_last['macd'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Signal : ' + truncate(float(df_last['signal'].values[0])))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Above : ' + str(macdgtsignal))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
txt = (' Currently Below : ' + str(macdltsignal))
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
if ((macdgtsignal is True) and (macdgtsignalco is True)):
txt = ' Condition : MACD is currently crossing above Signal'
elif ((macdgtsignal is True) and (macdgtsignalco is False)):
txt = ' Condition : MACD is currently above Signal and has crossed over'
elif ((macdltsignal is True) and (macdltsignalco is True)):
txt = ' Condition : MACD is currently crossing below Signal'
elif ((macdltsignal is True) and (macdltsignalco is False)):
txt = ' Condition : MACD is currently below Signal and has crossed over'
else:
txt = ' Condition : -'
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('--------------------------------------------------------------------------------')
txt = (' Action : ' + state.action)
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('================================================================================')
if (state.last_action == 'BUY'):
txt = (' Margin : ' + margin_text)
Logger.info((((' | ' + txt) + (' ' * (75 - len(txt)))) + ' | '))
Logger.info('================================================================================')
if (state.action == 'BUY'):
state.last_buy_price = price
state.last_buy_high = state.last_buy_price
if app.isLive():
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') BUY at ') + price_text))
if (not app.isVerbose()):
Logger.info((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | BUY'))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
Logger.info(((app.getBaseCurrency() + ' balance before order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance before order: ') + str(account.getBalance(app.getQuoteCurrency()))))
state.last_buy_size = float(account.getBalance(app.getQuoteCurrency()))
if (app.getBuyMaxSize() and (state.last_buy_size > app.getBuyMaxSize())):
state.last_buy_size = app.getBuyMaxSize()
resp = app.marketBuy(app.getMarket(), state.last_buy_size, app.getBuyPercent())
Logger.debug(resp)
Logger.info(((app.getBaseCurrency() + ' balance after order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance after order: ') + str(account.getBalance(app.getQuoteCurrency()))))
else:
app.notifyTelegram(((((app.getMarket() + ' (') + app.printGranularity()) + ') TEST BUY at ') + price_text))
if ((state.last_buy_size == 0) and (state.last_buy_filled == 0)):
state.last_buy_size = 1000
state.first_buy_size = 1000
state.buy_count = (state.buy_count + 1)
state.buy_sum = (state.buy_sum + state.last_buy_size)
if (not app.isVerbose()):
Logger.info((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | BUY'))
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info((' Fibonacci Retracement Levels:' + str(bands)))
technical_analysis.printSupportResistanceLevel(float(price))
if ((len(bands) >= 1) and (len(bands) <= 2)):
if (len(bands) == 1):
first_key = list(bands.keys())[0]
if (first_key == 'ratio1'):
state.fib_low = 0
state.fib_high = bands[first_key]
if (first_key == 'ratio1_618'):
state.fib_low = bands[first_key]
state.fib_high = (bands[first_key] * 2)
else:
state.fib_low = bands[first_key]
elif (len(bands) == 2):
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = (((((app.getMarket() + '_') + app.printGranularity()) + '_buy_') + str(ts)) + '.png')
tradinggraphs.renderEMAandMACD(len(trading_data), ('graphs/' + filename), True)
elif (state.action == 'SELL'):
if app.isLive():
app.notifyTelegram((((((((((app.getMarket() + ' (') + app.printGranularity()) + ') SELL at ') + price_text) + ' (margin: ') + margin_text) + ', (delta: ') + str(round((price - state.last_buy_price), precision))) + ')'))
if (not app.isVerbose()):
Logger.info((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | ') + price_text) + ' | SELL'))
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info((' Fibonacci Retracement Levels:' + str(bands)))
if ((len(bands) >= 1) and (len(bands) <= 2)):
if (len(bands) == 1):
first_key = list(bands.keys())[0]
if (first_key == 'ratio1'):
state.fib_low = 0
state.fib_high = bands[first_key]
if (first_key == 'ratio1_618'):
state.fib_low = bands[first_key]
state.fib_high = (bands[first_key] * 2)
else:
state.fib_low = bands[first_key]
elif (len(bands) == 2):
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
Logger.info(((app.getBaseCurrency() + ' balance before order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance before order: ') + str(account.getBalance(app.getQuoteCurrency()))))
resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())), app.getSellPercent())
Logger.debug(resp)
Logger.info(((app.getBaseCurrency() + ' balance after order: ') + str(account.getBalance(app.getBaseCurrency()))))
Logger.info(((app.getQuoteCurrency() + ' balance after order: ') + str(account.getBalance(app.getQuoteCurrency()))))
else:
(margin, profit, sell_fee) = calculate_margin(buy_size=state.last_buy_size, buy_filled=state.last_buy_filled, buy_price=state.last_buy_price, buy_fee=state.last_buy_fee, sell_percent=app.getSellPercent(), sell_price=price, sell_taker_fee=app.getTakerFee())
if (state.last_buy_size > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
app.notifyTelegram((((((((((app.getMarket() + ' (') + app.printGranularity()) + ') TEST SELL at ') + price_text) + ' (margin: ') + margin_text) + ', (delta: ') + str(round((price - state.last_buy_price), precision))) + ')'))
state.sell_count = (state.sell_count + 1)
buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = (buy_size - sell_fee)
state.sell_sum = (state.sell_sum + state.last_buy_size)
if (not app.isVerbose()):
if (price > 0):
margin_text = (truncate(margin) + '%')
else:
margin_text = '0%'
Logger.info(((((((((((((((((formatted_current_df_index + ' | ') + app.getMarket()) + ' | ') + app.printGranularity()) + ' | SELL | ') + str(price)) + ' | BUY | ') + str(state.last_buy_price)) + ' | DIFF | ') + str((price - state.last_buy_price))) + ' | DIFF | ') + str(profit)) + ' | MARGIN NO FEES | ') + margin_text) + ' | MARGIN FEES | ') + str(round(sell_fee, precision))))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = (((((app.getMarket() + '_') + app.printGranularity()) + '_sell_') + str(ts)) + '.png')
tradinggraphs.renderEMAandMACD(len(trading_data), ('graphs/' + filename), True)
if (state.action in ['BUY', 'SELL']):
state.last_action = state.action
state.last_df_index = str(df_last.index.format()[0])
if ((not app.isLive()) and (state.iterations == len(df))):
Logger.info('\nSimulation Summary: ')
if ((state.buy_count > state.sell_count) and app.allowSellAtLoss()):
state.last_buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = (state.last_buy_size - (state.last_buy_price * app.getTakerFee()))
state.sell_sum = (state.sell_sum + state.last_buy_size)
state.sell_count = (state.sell_count + 1)
elif ((state.buy_count > state.sell_count) and (not app.allowSellAtLoss())):
Logger.info('\n')
Logger.info(' Note : "sell at loss" is disabled and you have an open trade, if the margin')
Logger.info(' result below is negative it will assume you sold at the end of the')
Logger.info(' simulation which may not be ideal. Try setting --sellatloss 1')
Logger.info('\n')
Logger.info((' Buy Count : ' + str(state.buy_count)))
Logger.info((' Sell Count : ' + str(state.sell_count)))
Logger.info((' First Buy : ' + str(state.first_buy_size)))
Logger.info((' Last Sell : ' + str(state.last_buy_size)))
app.notifyTelegram(f'Simulation Summary
Buy Count: {state.buy_count}
Sell Count: {state.sell_count}
First Buy: {state.first_buy_size}
Last Sell: {state.last_buy_size}
')
if (state.sell_count > 0):
Logger.info('\n')
Logger.info(((' Margin : ' + _truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)) + '%'))
Logger.info('\n')
Logger.info(' ** non-live simulation, assuming highest fees')
app.notifyTelegram(f' Margin: {_truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)}%
** non-live simulation, assuming highest fees
')
else:
if ((state.last_buy_size > 0) and (state.last_buy_price > 0) and (price > 0) and (state.last_action == 'BUY')):
Logger.info((((((((((((now + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | Current Price: ') + str(price)) + ' | Margin: ') + str(margin)) + ' | Profit: ') + str(profit)))
else:
Logger.info((((((((now + ' | ') + app.getMarket()) + bullbeartext) + ' | ') + app.printGranularity()) + ' | Current Price: ') + str(price)))
state.iterations = (state.iterations - 1)
if ((not app.disableTracker()) and app.isLive()):
if (app.getExchange() == 'binance'):
account.saveTrackerCSV(app.getMarket())
elif (app.getExchange() == 'coinbasepro'):
account.saveTrackerCSV()
if app.isSimulation():
if (state.iterations < 300):
if (app.simuluationSpeed() in ['fast', 'fast-sample']):
list(map(s.cancel, s.queue))
s.enter(0, 1, executeJob, (sc, app, state, df))
else:
list(map(s.cancel, s.queue))
s.enter(1, 1, executeJob, (sc, app, state, df))
else:
list(map(s.cancel, s.queue))
s.enter(60, 1, executeJob, (sc, app, state)) |
def get_parser(parser=None, required=True):
'Get default arguments.'
if (parser is None):
parser = configargparse.ArgumentParser(description='Train an automatic speech recognition (ASR) model on one CPU, one or multiple GPUs', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add('--config', is_config_file=True, help='config file path')
parser.add('--config2', is_config_file=True, help='second config file path that overwrites the settings in `--config`.')
parser.add('--config3', is_config_file=True, help='third config file path that overwrites the settings in `--config` and `--config2`.')
parser.add_argument('--ngpu', default=None, type=int, help='Number of GPUs. If not given, use all visible devices')
parser.add_argument('--train-dtype', default='float32', choices=['float16', 'float32', 'float64', 'O0', 'O1', 'O2', 'O3'], help='Data type for training (only pytorch backend). O0,O1,.. flags require apex. See https://nvidia.github.io/apex/amp.html#opt-levels')
parser.add_argument('--backend', default='chainer', type=str, choices=['chainer', 'pytorch'], help='Backend library')
parser.add_argument('--outdir', type=str, required=required, help='Output directory')
parser.add_argument('--debugmode', default=1, type=int, help='Debugmode')
parser.add_argument('--dict', required=required, help='Dictionary')
parser.add_argument('--seed', default=1, type=int, help='Random seed')
parser.add_argument('--debugdir', type=str, help='Output directory for debugging')
parser.add_argument('--resume', '-r', default='', nargs='?', help='Resume the training from snapshot')
parser.add_argument('--minibatches', '-N', type=int, default='-1', help='Process only N minibatches (for debug)')
parser.add_argument('--verbose', '-V', default=0, type=int, help='Verbose option')
parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help='Tensorboard log dir path')
parser.add_argument('--report-interval-iters', default=100, type=int, help='Report interval iterations')
parser.add_argument('--save-interval-iters', default=0, type=int, help='Save snapshot interval iterations')
parser.add_argument('--train-json', type=str, default=None, help='Filename of train label data (json)')
parser.add_argument('--valid-json', type=str, default=None, help='Filename of validation label data (json)')
parser.add_argument('--model-module', type=str, default=None, help='model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)')
parser.add_argument('--num-encs', default=1, type=int, help='Number of encoders in the model.')
parser.add_argument('--ctc_type', default='warpctc', type=str, choices=['builtin', 'warpctc'], help='Type of CTC implementation to calculate loss.')
parser.add_argument('--mtlalpha', default=0.5, type=float, help='Multitask learning coefficient, alpha: alpha*ctc_loss + (1-alpha)*att_loss ')
parser.add_argument('--lsm-weight', default=0.0, type=float, help='Label smoothing weight')
parser.add_argument('--report-cer', default=False, action='store_true', help='Compute CER on development set')
parser.add_argument('--report-wer', default=False, action='store_true', help='Compute WER on development set')
parser.add_argument('--nbest', type=int, default=1, help='Output N-best hypotheses')
parser.add_argument('--beam-size', type=int, default=4, help='Beam size')
parser.add_argument('--penalty', default=0.0, type=float, help='Incertion penalty')
parser.add_argument('--maxlenratio', default=0.0, type=float, help='Input length ratio to obtain max output length.\n If maxlenratio=0.0 (default), it uses a end-detect function\n to automatically find maximum hypothesis lengths')
parser.add_argument('--minlenratio', default=0.0, type=float, help='Input length ratio to obtain min output length')
parser.add_argument('--ctc-weight', default=0.3, type=float, help='CTC weight in joint decoding')
parser.add_argument('--rnnlm', type=str, default=None, help='RNNLM model file to read')
parser.add_argument('--rnnlm-conf', type=str, default=None, help='RNNLM model config file to read')
parser.add_argument('--lm-weight', default=0.1, type=float, help='RNNLM weight.')
parser.add_argument('--sym-space', default='<space>', type=str, help='Space symbol')
parser.add_argument('--sym-blank', default='<blank>', type=str, help='Blank symbol')
parser.add_argument('--sortagrad', default=0, type=int, nargs='?', help='How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs')
parser.add_argument('--batch-count', default='auto', choices=BATCH_COUNT_CHOICES, help='How to count batch_size. The default (auto) will find how to count by args.')
parser.add_argument('--batch-size', '--batch-seqs', '-b', default=0, type=int, help='Maximum seqs in a minibatch (0 to disable)')
parser.add_argument('--batch-bins', default=0, type=int, help='Maximum bins in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-in', default=0, type=int, help='Maximum input frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-out', default=0, type=int, help='Maximum output frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-inout', default=0, type=int, help='Maximum input+output frames in a minibatch (0 to disable)')
parser.add_argument('--maxlen-in', '--batch-seq-maxlen-in', default=800, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the input sequence length > ML.')
parser.add_argument('--maxlen-out', '--batch-seq-maxlen-out', default=150, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the output sequence length > ML')
parser.add_argument('--n-iter-processes', default=0, type=int, help='Number of processes of iterator')
parser.add_argument('--preprocess-conf', type=str, default=None, nargs='?', help='The configuration file for the pre-processing')
parser.add_argument('--opt', default='adadelta', type=str, choices=['adadelta', 'adam', 'noam'], help='Optimizer')
parser.add_argument('--accum-grad', default=1, type=int, help='Number of gradient accumuration')
parser.add_argument('--eps', default=1e-08, type=float, help='Epsilon constant for optimizer')
parser.add_argument('--eps-decay', default=0.01, type=float, help='Decaying ratio of epsilon')
parser.add_argument('--weight-decay', default=0.0, type=float, help='Weight decay ratio')
parser.add_argument('--criterion', default='acc', type=str, choices=['loss', 'acc'], help='Criterion to perform epsilon decay')
parser.add_argument('--threshold', default=0.0001, type=float, help='Threshold to stop iteration')
parser.add_argument('--epochs', '-e', default=30, type=int, help='Maximum number of epochs')
parser.add_argument('--early-stop-criterion', default='validation/main/acc', type=str, nargs='?', help='Value to monitor to trigger an early stopping of the training')
parser.add_argument('--patience', default=3, type=int, nargs='?', help='Number of epochs to wait without improvement before stopping the training')
parser.add_argument('--grad-clip', default=5, type=float, help='Gradient norm threshold to clip')
parser.add_argument('--num-save-attention', default=3, type=int, help='Number of samples of attention to be saved')
parser.add_argument('--num-save-ctc', default=3, type=int, help='Number of samples of CTC probability to be saved')
parser.add_argument('--grad-noise', type=strtobool, default=False, help='The flag to switch to use noise injection to gradients during training')
parser.add_argument('--num-spkrs', default=1, type=int, choices=[1, 2], help='Number of speakers in the speech.')
parser.add_argument('--context-residual', default=False, type=strtobool, nargs='?', help='The flag to switch to use context vector residual in the decoder network')
parser.add_argument('--enc-init', default=None, type=str, help='Pre-trained ASR model to initialize encoder.')
parser.add_argument('--enc-init-mods', default='enc.enc.', type=(lambda s: [str(mod) for mod in s.split(',') if (s != '')]), help='List of encoder modules to initialize, separated by a comma.')
parser.add_argument('--dec-init', default=None, type=str, help='Pre-trained ASR, MT or LM model to initialize decoder.')
parser.add_argument('--dec-init-mods', default='att., dec.', type=(lambda s: [str(mod) for mod in s.split(',') if (s != '')]), help='List of decoder modules to initialize, separated by a comma.')
parser.add_argument('--freeze-mods', default=None, type=(lambda s: [str(mod) for mod in s.split(',') if (s != '')]), help='List of modules to freeze, separated by a comma.')
parser.add_argument('--use-frontend', type=strtobool, default=False, help='The flag to switch to use frontend system.')
parser.add_argument('--use-wpe', type=strtobool, default=False, help='Apply Weighted Prediction Error')
parser.add_argument('--wtype', default='blstmp', type=str, choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm', 'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'], help='Type of encoder network architecture of the mask estimator for WPE. ')
parser.add_argument('--wlayers', type=int, default=2, help='')
parser.add_argument('--wunits', type=int, default=300, help='')
parser.add_argument('--wprojs', type=int, default=300, help='')
parser.add_argument('--wdropout-rate', type=float, default=0.0, help='')
parser.add_argument('--wpe-taps', type=int, default=5, help='')
parser.add_argument('--wpe-delay', type=int, default=3, help='')
parser.add_argument('--use-dnn-mask-for-wpe', type=strtobool, default=False, help='Use DNN to estimate the power spectrogram. This option is experimental.')
parser.add_argument('--use-beamformer', type=strtobool, default=True, help='')
parser.add_argument('--btype', default='blstmp', type=str, choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm', 'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'], help='Type of encoder network architecture of the mask estimator for Beamformer.')
parser.add_argument('--blayers', type=int, default=2, help='')
parser.add_argument('--bunits', type=int, default=300, help='')
parser.add_argument('--bprojs', type=int, default=300, help='')
parser.add_argument('--badim', type=int, default=320, help='')
parser.add_argument('--bnmask', type=int, default=2, help='Number of beamforming masks, default is 2 for [speech, noise].')
parser.add_argument('--ref-channel', type=int, default=(- 1), help='The reference channel used for beamformer. By default, the channel is estimated by DNN.')
parser.add_argument('--bdropout-rate', type=float, default=0.0, help='')
parser.add_argument('--stats-file', type=str, default=None, help='The stats file for the feature normalization')
parser.add_argument('--apply-uttmvn', type=strtobool, default=True, help='Apply utterance level mean variance normalization.')
parser.add_argument('--uttmvn-norm-means', type=strtobool, default=True, help='')
parser.add_argument('--uttmvn-norm-vars', type=strtobool, default=False, help='')
parser.add_argument('--fbank-fs', type=int, default=16000, help='The sample frequency used for the mel-fbank creation.')
parser.add_argument('--n-mels', type=int, default=80, help='The number of mel-frequency bins.')
parser.add_argument('--fbank-fmin', type=float, default=0.0, help='')
parser.add_argument('--fbank-fmax', type=float, default=None, help='')
return parser | -3,972,185,739,843,250,000 | Get default arguments. | espnet/bin/asr_train.py | get_parser | Advanjef/espnet | python | def get_parser(parser=None, required=True):
if (parser is None):
parser = configargparse.ArgumentParser(description='Train an automatic speech recognition (ASR) model on one CPU, one or multiple GPUs', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add('--config', is_config_file=True, help='config file path')
parser.add('--config2', is_config_file=True, help='second config file path that overwrites the settings in `--config`.')
parser.add('--config3', is_config_file=True, help='third config file path that overwrites the settings in `--config` and `--config2`.')
parser.add_argument('--ngpu', default=None, type=int, help='Number of GPUs. If not given, use all visible devices')
parser.add_argument('--train-dtype', default='float32', choices=['float16', 'float32', 'float64', 'O0', 'O1', 'O2', 'O3'], help='Data type for training (only pytorch backend). O0,O1,.. flags require apex. See https://nvidia.github.io/apex/amp.html#opt-levels')
parser.add_argument('--backend', default='chainer', type=str, choices=['chainer', 'pytorch'], help='Backend library')
parser.add_argument('--outdir', type=str, required=required, help='Output directory')
parser.add_argument('--debugmode', default=1, type=int, help='Debugmode')
parser.add_argument('--dict', required=required, help='Dictionary')
parser.add_argument('--seed', default=1, type=int, help='Random seed')
parser.add_argument('--debugdir', type=str, help='Output directory for debugging')
parser.add_argument('--resume', '-r', default=, nargs='?', help='Resume the training from snapshot')
parser.add_argument('--minibatches', '-N', type=int, default='-1', help='Process only N minibatches (for debug)')
parser.add_argument('--verbose', '-V', default=0, type=int, help='Verbose option')
parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help='Tensorboard log dir path')
parser.add_argument('--report-interval-iters', default=100, type=int, help='Report interval iterations')
parser.add_argument('--save-interval-iters', default=0, type=int, help='Save snapshot interval iterations')
parser.add_argument('--train-json', type=str, default=None, help='Filename of train label data (json)')
parser.add_argument('--valid-json', type=str, default=None, help='Filename of validation label data (json)')
parser.add_argument('--model-module', type=str, default=None, help='model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)')
parser.add_argument('--num-encs', default=1, type=int, help='Number of encoders in the model.')
parser.add_argument('--ctc_type', default='warpctc', type=str, choices=['builtin', 'warpctc'], help='Type of CTC implementation to calculate loss.')
parser.add_argument('--mtlalpha', default=0.5, type=float, help='Multitask learning coefficient, alpha: alpha*ctc_loss + (1-alpha)*att_loss ')
parser.add_argument('--lsm-weight', default=0.0, type=float, help='Label smoothing weight')
parser.add_argument('--report-cer', default=False, action='store_true', help='Compute CER on development set')
parser.add_argument('--report-wer', default=False, action='store_true', help='Compute WER on development set')
parser.add_argument('--nbest', type=int, default=1, help='Output N-best hypotheses')
parser.add_argument('--beam-size', type=int, default=4, help='Beam size')
parser.add_argument('--penalty', default=0.0, type=float, help='Incertion penalty')
parser.add_argument('--maxlenratio', default=0.0, type=float, help='Input length ratio to obtain max output length.\n If maxlenratio=0.0 (default), it uses a end-detect function\n to automatically find maximum hypothesis lengths')
parser.add_argument('--minlenratio', default=0.0, type=float, help='Input length ratio to obtain min output length')
parser.add_argument('--ctc-weight', default=0.3, type=float, help='CTC weight in joint decoding')
parser.add_argument('--rnnlm', type=str, default=None, help='RNNLM model file to read')
parser.add_argument('--rnnlm-conf', type=str, default=None, help='RNNLM model config file to read')
parser.add_argument('--lm-weight', default=0.1, type=float, help='RNNLM weight.')
parser.add_argument('--sym-space', default='<space>', type=str, help='Space symbol')
parser.add_argument('--sym-blank', default='<blank>', type=str, help='Blank symbol')
parser.add_argument('--sortagrad', default=0, type=int, nargs='?', help='How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs')
parser.add_argument('--batch-count', default='auto', choices=BATCH_COUNT_CHOICES, help='How to count batch_size. The default (auto) will find how to count by args.')
parser.add_argument('--batch-size', '--batch-seqs', '-b', default=0, type=int, help='Maximum seqs in a minibatch (0 to disable)')
parser.add_argument('--batch-bins', default=0, type=int, help='Maximum bins in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-in', default=0, type=int, help='Maximum input frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-out', default=0, type=int, help='Maximum output frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-inout', default=0, type=int, help='Maximum input+output frames in a minibatch (0 to disable)')
parser.add_argument('--maxlen-in', '--batch-seq-maxlen-in', default=800, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the input sequence length > ML.')
parser.add_argument('--maxlen-out', '--batch-seq-maxlen-out', default=150, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the output sequence length > ML')
parser.add_argument('--n-iter-processes', default=0, type=int, help='Number of processes of iterator')
parser.add_argument('--preprocess-conf', type=str, default=None, nargs='?', help='The configuration file for the pre-processing')
parser.add_argument('--opt', default='adadelta', type=str, choices=['adadelta', 'adam', 'noam'], help='Optimizer')
parser.add_argument('--accum-grad', default=1, type=int, help='Number of gradient accumuration')
parser.add_argument('--eps', default=1e-08, type=float, help='Epsilon constant for optimizer')
parser.add_argument('--eps-decay', default=0.01, type=float, help='Decaying ratio of epsilon')
parser.add_argument('--weight-decay', default=0.0, type=float, help='Weight decay ratio')
parser.add_argument('--criterion', default='acc', type=str, choices=['loss', 'acc'], help='Criterion to perform epsilon decay')
parser.add_argument('--threshold', default=0.0001, type=float, help='Threshold to stop iteration')
parser.add_argument('--epochs', '-e', default=30, type=int, help='Maximum number of epochs')
parser.add_argument('--early-stop-criterion', default='validation/main/acc', type=str, nargs='?', help='Value to monitor to trigger an early stopping of the training')
parser.add_argument('--patience', default=3, type=int, nargs='?', help='Number of epochs to wait without improvement before stopping the training')
parser.add_argument('--grad-clip', default=5, type=float, help='Gradient norm threshold to clip')
parser.add_argument('--num-save-attention', default=3, type=int, help='Number of samples of attention to be saved')
parser.add_argument('--num-save-ctc', default=3, type=int, help='Number of samples of CTC probability to be saved')
parser.add_argument('--grad-noise', type=strtobool, default=False, help='The flag to switch to use noise injection to gradients during training')
parser.add_argument('--num-spkrs', default=1, type=int, choices=[1, 2], help='Number of speakers in the speech.')
parser.add_argument('--context-residual', default=False, type=strtobool, nargs='?', help='The flag to switch to use context vector residual in the decoder network')
parser.add_argument('--enc-init', default=None, type=str, help='Pre-trained ASR model to initialize encoder.')
parser.add_argument('--enc-init-mods', default='enc.enc.', type=(lambda s: [str(mod) for mod in s.split(',') if (s != )]), help='List of encoder modules to initialize, separated by a comma.')
parser.add_argument('--dec-init', default=None, type=str, help='Pre-trained ASR, MT or LM model to initialize decoder.')
parser.add_argument('--dec-init-mods', default='att., dec.', type=(lambda s: [str(mod) for mod in s.split(',') if (s != )]), help='List of decoder modules to initialize, separated by a comma.')
parser.add_argument('--freeze-mods', default=None, type=(lambda s: [str(mod) for mod in s.split(',') if (s != )]), help='List of modules to freeze, separated by a comma.')
parser.add_argument('--use-frontend', type=strtobool, default=False, help='The flag to switch to use frontend system.')
parser.add_argument('--use-wpe', type=strtobool, default=False, help='Apply Weighted Prediction Error')
parser.add_argument('--wtype', default='blstmp', type=str, choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm', 'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'], help='Type of encoder network architecture of the mask estimator for WPE. ')
parser.add_argument('--wlayers', type=int, default=2, help=)
parser.add_argument('--wunits', type=int, default=300, help=)
parser.add_argument('--wprojs', type=int, default=300, help=)
parser.add_argument('--wdropout-rate', type=float, default=0.0, help=)
parser.add_argument('--wpe-taps', type=int, default=5, help=)
parser.add_argument('--wpe-delay', type=int, default=3, help=)
parser.add_argument('--use-dnn-mask-for-wpe', type=strtobool, default=False, help='Use DNN to estimate the power spectrogram. This option is experimental.')
parser.add_argument('--use-beamformer', type=strtobool, default=True, help=)
parser.add_argument('--btype', default='blstmp', type=str, choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm', 'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'], help='Type of encoder network architecture of the mask estimator for Beamformer.')
parser.add_argument('--blayers', type=int, default=2, help=)
parser.add_argument('--bunits', type=int, default=300, help=)
parser.add_argument('--bprojs', type=int, default=300, help=)
parser.add_argument('--badim', type=int, default=320, help=)
parser.add_argument('--bnmask', type=int, default=2, help='Number of beamforming masks, default is 2 for [speech, noise].')
parser.add_argument('--ref-channel', type=int, default=(- 1), help='The reference channel used for beamformer. By default, the channel is estimated by DNN.')
parser.add_argument('--bdropout-rate', type=float, default=0.0, help=)
parser.add_argument('--stats-file', type=str, default=None, help='The stats file for the feature normalization')
parser.add_argument('--apply-uttmvn', type=strtobool, default=True, help='Apply utterance level mean variance normalization.')
parser.add_argument('--uttmvn-norm-means', type=strtobool, default=True, help=)
parser.add_argument('--uttmvn-norm-vars', type=strtobool, default=False, help=)
parser.add_argument('--fbank-fs', type=int, default=16000, help='The sample frequency used for the mel-fbank creation.')
parser.add_argument('--n-mels', type=int, default=80, help='The number of mel-frequency bins.')
parser.add_argument('--fbank-fmin', type=float, default=0.0, help=)
parser.add_argument('--fbank-fmax', type=float, default=None, help=)
return parser |
def main(cmd_args):
'Run the main training function.'
parser = get_parser()
(args, _) = parser.parse_known_args(cmd_args)
if ((args.backend == 'chainer') and (args.train_dtype != 'float32')):
raise NotImplementedError(f'chainer backend does not support --train-dtype {args.train_dtype}.Use --dtype float32.')
if ((args.ngpu == 0) and (args.train_dtype in ('O0', 'O1', 'O2', 'O3', 'float16'))):
raise ValueError(f'--train-dtype {args.train_dtype} does not support the CPU backend.')
from espnet.utils.dynamic_import import dynamic_import
if (args.model_module is None):
model_module = (('espnet.nets.' + args.backend) + '_backend.e2e_asr:E2E')
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if ('chainer_backend' in args.model_module):
args.backend = 'chainer'
if ('pytorch_backend' in args.model_module):
args.backend = 'pytorch'
if (args.verbose > 0):
logging.basicConfig(level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
else:
logging.basicConfig(level=logging.WARN, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
logging.warning('Skip DEBUG/INFO messages')
if (args.ngpu is None):
cvd = os.environ.get('CUDA_VISIBLE_DEVICES')
if (cvd is not None):
ngpu = len(cvd.split(','))
else:
logging.warning('CUDA_VISIBLE_DEVICES is not set.')
try:
p = subprocess.run(['nvidia-smi', '-L'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = (len(p.stderr.decode().split('\n')) - 1)
else:
if (is_torch_1_2_plus and (args.ngpu != 1)):
logging.debug(('There are some bugs with multi-GPU processing in PyTorch 1.2+' + ' (see https://github.com/pytorch/pytorch/issues/21108)'))
ngpu = args.ngpu
logging.info(f'ngpu: {ngpu}')
logging.info(('python path = ' + os.environ.get('PYTHONPATH', '(None)')))
logging.info(('random seed = %d' % args.seed))
random.seed(args.seed)
np.random.seed(args.seed)
if (args.dict is not None):
with open(args.dict, 'rb') as f:
dictionary = f.readlines()
char_list = [entry.decode('utf-8').split(' ')[0] for entry in dictionary]
char_list.insert(0, '<blank>')
char_list.append('<eos>')
if (hasattr(args, 'decoder_mode') and (args.decoder_mode == 'maskctc')):
char_list.append('<mask>')
args.char_list = char_list
else:
args.char_list = None
logging.info(('backend = ' + args.backend))
if (args.num_spkrs == 1):
if (args.backend == 'chainer'):
from espnet.asr.chainer_backend.asr import train
train(args)
elif (args.backend == 'pytorch'):
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError('Only chainer and pytorch are supported.')
elif (args.backend == 'pytorch'):
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError('Only pytorch is supported.') | -4,138,780,070,864,323,600 | Run the main training function. | espnet/bin/asr_train.py | main | Advanjef/espnet | python | def main(cmd_args):
parser = get_parser()
(args, _) = parser.parse_known_args(cmd_args)
if ((args.backend == 'chainer') and (args.train_dtype != 'float32')):
raise NotImplementedError(f'chainer backend does not support --train-dtype {args.train_dtype}.Use --dtype float32.')
if ((args.ngpu == 0) and (args.train_dtype in ('O0', 'O1', 'O2', 'O3', 'float16'))):
raise ValueError(f'--train-dtype {args.train_dtype} does not support the CPU backend.')
from espnet.utils.dynamic_import import dynamic_import
if (args.model_module is None):
model_module = (('espnet.nets.' + args.backend) + '_backend.e2e_asr:E2E')
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if ('chainer_backend' in args.model_module):
args.backend = 'chainer'
if ('pytorch_backend' in args.model_module):
args.backend = 'pytorch'
if (args.verbose > 0):
logging.basicConfig(level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
else:
logging.basicConfig(level=logging.WARN, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
logging.warning('Skip DEBUG/INFO messages')
if (args.ngpu is None):
cvd = os.environ.get('CUDA_VISIBLE_DEVICES')
if (cvd is not None):
ngpu = len(cvd.split(','))
else:
logging.warning('CUDA_VISIBLE_DEVICES is not set.')
try:
p = subprocess.run(['nvidia-smi', '-L'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = (len(p.stderr.decode().split('\n')) - 1)
else:
if (is_torch_1_2_plus and (args.ngpu != 1)):
logging.debug(('There are some bugs with multi-GPU processing in PyTorch 1.2+' + ' (see https://github.com/pytorch/pytorch/issues/21108)'))
ngpu = args.ngpu
logging.info(f'ngpu: {ngpu}')
logging.info(('python path = ' + os.environ.get('PYTHONPATH', '(None)')))
logging.info(('random seed = %d' % args.seed))
random.seed(args.seed)
np.random.seed(args.seed)
if (args.dict is not None):
with open(args.dict, 'rb') as f:
dictionary = f.readlines()
char_list = [entry.decode('utf-8').split(' ')[0] for entry in dictionary]
char_list.insert(0, '<blank>')
char_list.append('<eos>')
if (hasattr(args, 'decoder_mode') and (args.decoder_mode == 'maskctc')):
char_list.append('<mask>')
args.char_list = char_list
else:
args.char_list = None
logging.info(('backend = ' + args.backend))
if (args.num_spkrs == 1):
if (args.backend == 'chainer'):
from espnet.asr.chainer_backend.asr import train
train(args)
elif (args.backend == 'pytorch'):
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError('Only chainer and pytorch are supported.')
elif (args.backend == 'pytorch'):
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError('Only pytorch is supported.') |
def hextriplet(s):
'\n Wrap clldutils.color.rgb_as_hex to provide unified error handling.\n '
if (s in BASE_COLORS):
return rgb_as_hex([float(d) for d in BASE_COLORS[s]])
if (s in CSS4_COLORS):
return CSS4_COLORS[s]
try:
return rgb_as_hex(s)
except (AssertionError, ValueError) as e:
raise ValueError('Invalid color spec: "{}" ({})'.format(s, str(e))) | 7,425,507,542,396,912,000 | Wrap clldutils.color.rgb_as_hex to provide unified error handling. | src/cldfviz/colormap.py | hextriplet | cldf/cldfviz | python | def hextriplet(s):
'\n \n '
if (s in BASE_COLORS):
return rgb_as_hex([float(d) for d in BASE_COLORS[s]])
if (s in CSS4_COLORS):
return CSS4_COLORS[s]
try:
return rgb_as_hex(s)
except (AssertionError, ValueError) as e:
raise ValueError('Invalid color spec: "{}" ({})'.format(s, str(e))) |
def fold_split(self, random_seed=None):
'\n Splitting the folds.\n\n Args:\n random_seed: Random seed for reproducibility\n\n Returns:\n tensor containing indices for folds, where dim=0 is the fold number\n\n '
if (random_seed is not None):
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view((- 1), self.fold_size)
return fold_idx | 8,683,108,462,610,148,000 | Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number | pymatch/utils/KFold.py | fold_split | raharth/PyMatch | python | def fold_split(self, random_seed=None):
'\n Splitting the folds.\n\n Args:\n random_seed: Random seed for reproducibility\n\n Returns:\n tensor containing indices for folds, where dim=0 is the fold number\n\n '
if (random_seed is not None):
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view((- 1), self.fold_size)
return fold_idx |
def fold_loaders(self, fold=(- 1)):
'\n Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of\n the original data set.\n\n Args:\n fold: fold number to return\n\n Returns:\n (train data loader, test data loader)\n\n '
if (fold == (- 1)):
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if (i != fold)]].view((- 1))
train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = ((self.fold + 1) % self.n_fold)
return (train_loader, test_loader) | 2,172,173,981,719,047,200 | Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader) | pymatch/utils/KFold.py | fold_loaders | raharth/PyMatch | python | def fold_loaders(self, fold=(- 1)):
'\n Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of\n the original data set.\n\n Args:\n fold: fold number to return\n\n Returns:\n (train data loader, test data loader)\n\n '
if (fold == (- 1)):
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if (i != fold)]].view((- 1))
train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = ((self.fold + 1) % self.n_fold)
return (train_loader, test_loader) |
def __init__(self, num_visible, num_hidden, visible_unit_type='bin', main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models', model_name='rbm_model', gibbs_sampling_steps=1, learning_rate=0.01, momentum=0.9, l2=0.001, batch_size=10, num_epochs=10, stddev=0.1, verbose=0, plot_training_loss=True):
'"\n INPUT PARAMETER 1) num_visible: number of visible units in the RBM \n INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM\n INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories\n INPUT PARAMETER 4) model_name: name of the model you wanna save the data \n INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)\n INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional) \n INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent \n INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)\n INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)\n INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)\n INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not \'gauss\'\n INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)\n INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True\n INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)\n '
self.num_visible = num_visible
self.num_hidden = num_hidden
self.main_dir = main_dir
self.model_name = model_name
self.gibbs_sampling_steps = gibbs_sampling_steps
self.learning_rate = learning_rate
self.momentum = momentum
self.l2 = l2
self.batch_size = batch_size
self.num_epochs = num_epochs
self.stddev = stddev
self.verbose = verbose
self.plot_training_loss = plot_training_loss
self.visible_unit_type = visible_unit_type
self._create_model_directory()
self.model_path = os.path.join(self.main_dir, self.model_name)
self.W = None
self.bh_ = None
self.bv_ = None
self.dw = None
self.dbh_ = None
self.dbv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.encode = None
self.recontruct = None
self.loss_function = None
self.batch_cost = None
self.batch_free_energy = None
self.training_losses = []
self.input_data = None
self.hrand = None
self.validation_size = None
self.tf_session = None
self.tf_saver = None | 3,277,234,088,062,646,000 | "
INPUT PARAMETER 1) num_visible: number of visible units in the RBM
INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM
INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories
INPUT PARAMETER 4) model_name: name of the model you wanna save the data
INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)
INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional)
INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent
INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)
INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)
INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)
INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss'
INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)
INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True
INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary) | Unsupervised-Learning/rbm.py | __init__ | Phoebe0222/MLSA-workshops-2019-student | python | def __init__(self, num_visible, num_hidden, visible_unit_type='bin', main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models', model_name='rbm_model', gibbs_sampling_steps=1, learning_rate=0.01, momentum=0.9, l2=0.001, batch_size=10, num_epochs=10, stddev=0.1, verbose=0, plot_training_loss=True):
'"\n INPUT PARAMETER 1) num_visible: number of visible units in the RBM \n INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM\n INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories\n INPUT PARAMETER 4) model_name: name of the model you wanna save the data \n INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)\n INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional) \n INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent \n INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)\n INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)\n INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)\n INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not \'gauss\'\n INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)\n INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True\n INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)\n '
self.num_visible = num_visible
self.num_hidden = num_hidden
self.main_dir = main_dir
self.model_name = model_name
self.gibbs_sampling_steps = gibbs_sampling_steps
self.learning_rate = learning_rate
self.momentum = momentum
self.l2 = l2
self.batch_size = batch_size
self.num_epochs = num_epochs
self.stddev = stddev
self.verbose = verbose
self.plot_training_loss = plot_training_loss
self.visible_unit_type = visible_unit_type
self._create_model_directory()
self.model_path = os.path.join(self.main_dir, self.model_name)
self.W = None
self.bh_ = None
self.bv_ = None
self.dw = None
self.dbh_ = None
self.dbv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.encode = None
self.recontruct = None
self.loss_function = None
self.batch_cost = None
self.batch_free_energy = None
self.training_losses = []
self.input_data = None
self.hrand = None
self.validation_size = None
self.tf_session = None
self.tf_saver = None |
def sample_prob(self, probs, rand):
' takes a tensor of probabilitiesas from a sigmoidal activation and sample from all \n the distributions. \n probs INPUT parameter: tensor of probabilities \n rand INPUT parameter :tensor (of same shape as probabilities) of random values \n :RETURN binary sample of probabilities \n '
return tf.nn.relu(tf.sign((probs - rand))) | 8,988,085,133,716,907,000 | takes a tensor of probabilitiesas from a sigmoidal activation and sample from all
the distributions.
probs INPUT parameter: tensor of probabilities
rand INPUT parameter :tensor (of same shape as probabilities) of random values
:RETURN binary sample of probabilities | Unsupervised-Learning/rbm.py | sample_prob | Phoebe0222/MLSA-workshops-2019-student | python | def sample_prob(self, probs, rand):
' takes a tensor of probabilitiesas from a sigmoidal activation and sample from all \n the distributions. \n probs INPUT parameter: tensor of probabilities \n rand INPUT parameter :tensor (of same shape as probabilities) of random values \n :RETURN binary sample of probabilities \n '
return tf.nn.relu(tf.sign((probs - rand))) |
def gen_batches(self, data, batch_size):
' Divide input data into batches \n data INPUT parameter: input data( like a data frame)\n batch_size INPUT parameter: desired size of each batch\n :RETURN data divided in batches \n '
data = np.array(data)
for i in range(0, data.shape[0], batch_size):
(yield data[i:(i + batch_size)]) | -2,090,439,240,268,335,400 | Divide input data into batches
data INPUT parameter: input data( like a data frame)
batch_size INPUT parameter: desired size of each batch
:RETURN data divided in batches | Unsupervised-Learning/rbm.py | gen_batches | Phoebe0222/MLSA-workshops-2019-student | python | def gen_batches(self, data, batch_size):
' Divide input data into batches \n data INPUT parameter: input data( like a data frame)\n batch_size INPUT parameter: desired size of each batch\n :RETURN data divided in batches \n '
data = np.array(data)
for i in range(0, data.shape[0], batch_size):
(yield data[i:(i + batch_size)]) |
def fit(self, train_set, validation_set=None, restore_previous_model=False):
'"\n fit the model to the training data \n INPUT PARAMETER train_set: training set\n INPUT PARAMETER validation set.default None (Hence Optional)\n INPUT PARAMETER restore_previous_model:\n if true, a previous trained model\n with the same name of this model is restored from disk to continue training.\n OUTPUT: self \n '
if (validation_set is not None):
self.validation_size = validation_set.shape[0]
tf.reset_default_graph()
self._build_model()
with tf.Session() as self.tf_session:
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, validation_set)
self.tf_saver.save(self.tf_session, self.model_path)
if self.plot_training_loss:
plt.plot(self.training_losses)
plt.title('Training batch losses v.s. iteractions')
plt.xlabel('Num of training iteractions')
plt.ylabel('Reconstruction error')
plt.show() | -8,941,818,064,905,708,000 | "
fit the model to the training data
INPUT PARAMETER train_set: training set
INPUT PARAMETER validation set.default None (Hence Optional)
INPUT PARAMETER restore_previous_model:
if true, a previous trained model
with the same name of this model is restored from disk to continue training.
OUTPUT: self | Unsupervised-Learning/rbm.py | fit | Phoebe0222/MLSA-workshops-2019-student | python | def fit(self, train_set, validation_set=None, restore_previous_model=False):
'"\n fit the model to the training data \n INPUT PARAMETER train_set: training set\n INPUT PARAMETER validation set.default None (Hence Optional)\n INPUT PARAMETER restore_previous_model:\n if true, a previous trained model\n with the same name of this model is restored from disk to continue training.\n OUTPUT: self \n '
if (validation_set is not None):
self.validation_size = validation_set.shape[0]
tf.reset_default_graph()
self._build_model()
with tf.Session() as self.tf_session:
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, validation_set)
self.tf_saver.save(self.tf_session, self.model_path)
if self.plot_training_loss:
plt.plot(self.training_losses)
plt.title('Training batch losses v.s. iteractions')
plt.xlabel('Num of training iteractions')
plt.ylabel('Reconstruction error')
plt.show() |
def _initialize_tf_utilities_and_ops(self, restore_previous_model):
'"\n Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.\n Restore a previously trained model if the flag restore_previous_model is true.\n '
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path) | 1,191,629,061,674,570,500 | "
Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.
Restore a previously trained model if the flag restore_previous_model is true. | Unsupervised-Learning/rbm.py | _initialize_tf_utilities_and_ops | Phoebe0222/MLSA-workshops-2019-student | python | def _initialize_tf_utilities_and_ops(self, restore_previous_model):
'"\n Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.\n Restore a previously trained model if the flag restore_previous_model is true.\n '
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path) |
def _train_model(self, train_set, validation_set):
'" Train the Model \n \n INPUT PARAMETER train set: Training set \n INPUT PARAMETER validation_set: Validation set \n OUTPUT self\n '
for i in range(self.num_epochs):
self._run_train_step(train_set)
if (validation_set is not None):
self._run_validation_error(i, validation_set) | 4,000,757,518,716,573,000 | " Train the Model
INPUT PARAMETER train set: Training set
INPUT PARAMETER validation_set: Validation set
OUTPUT self | Unsupervised-Learning/rbm.py | _train_model | Phoebe0222/MLSA-workshops-2019-student | python | def _train_model(self, train_set, validation_set):
'" Train the Model \n \n INPUT PARAMETER train set: Training set \n INPUT PARAMETER validation_set: Validation set \n OUTPUT self\n '
for i in range(self.num_epochs):
self._run_train_step(train_set)
if (validation_set is not None):
self._run_validation_error(i, validation_set) |
def _run_train_step(self, train_set):
'"\n Run a training step. A training step is made by randomly shuffling the training set,\n divide into batches and run the variable update nodes for each batch. If self.plot_training_loss \n is true, will record training loss after each batch. \n INPUT PARAMETER train_set: training set\n OUTPUT self\n '
np.random.shuffle(train_set)
batches = [_ for _ in self.gen_batches(train_set, self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
if self.plot_training_loss:
(_, loss) = self.tf_session.run([updates, self.loss_function], feed_dict=self._create_feed_dict(batch))
self.training_losses.append(loss)
else:
self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch)) | 4,960,448,723,193,601,000 | "
Run a training step. A training step is made by randomly shuffling the training set,
divide into batches and run the variable update nodes for each batch. If self.plot_training_loss
is true, will record training loss after each batch.
INPUT PARAMETER train_set: training set
OUTPUT self | Unsupervised-Learning/rbm.py | _run_train_step | Phoebe0222/MLSA-workshops-2019-student | python | def _run_train_step(self, train_set):
'"\n Run a training step. A training step is made by randomly shuffling the training set,\n divide into batches and run the variable update nodes for each batch. If self.plot_training_loss \n is true, will record training loss after each batch. \n INPUT PARAMETER train_set: training set\n OUTPUT self\n '
np.random.shuffle(train_set)
batches = [_ for _ in self.gen_batches(train_set, self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
if self.plot_training_loss:
(_, loss) = self.tf_session.run([updates, self.loss_function], feed_dict=self._create_feed_dict(batch))
self.training_losses.append(loss)
else:
self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch)) |
def _run_validation_error(self, epoch, validation_set):
' \n Run the error computation on the validation set and print it out for each epoch. \n INPUT PARAMETER: current epoch\n INPUT PARAMETER validation_set: validation data\n OUTPUT: self\n '
loss = self.tf_session.run(self.loss_function, feed_dict=self._create_feed_dict(validation_set))
if (self.verbose == 1):
tqdm.write(('Validation cost at step %s: %s' % (epoch, loss))) | 1,609,787,130,270,361,600 | Run the error computation on the validation set and print it out for each epoch.
INPUT PARAMETER: current epoch
INPUT PARAMETER validation_set: validation data
OUTPUT: self | Unsupervised-Learning/rbm.py | _run_validation_error | Phoebe0222/MLSA-workshops-2019-student | python | def _run_validation_error(self, epoch, validation_set):
' \n Run the error computation on the validation set and print it out for each epoch. \n INPUT PARAMETER: current epoch\n INPUT PARAMETER validation_set: validation data\n OUTPUT: self\n '
loss = self.tf_session.run(self.loss_function, feed_dict=self._create_feed_dict(validation_set))
if (self.verbose == 1):
tqdm.write(('Validation cost at step %s: %s' % (epoch, loss))) |
def _create_feed_dict(self, data):
" Create the dictionary of data to feed to TensorFlow's session during training.\n :param data: training/validation set batch\n :return: dictionary(self.input_data: data, self.hrand: random_uniform)\n "
return {self.input_data: data, self.hrand: np.random.rand(data.shape[0], self.num_hidden)} | 1,993,604,828,718,735,400 | Create the dictionary of data to feed to TensorFlow's session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform) | Unsupervised-Learning/rbm.py | _create_feed_dict | Phoebe0222/MLSA-workshops-2019-student | python | def _create_feed_dict(self, data):
" Create the dictionary of data to feed to TensorFlow's session during training.\n :param data: training/validation set batch\n :return: dictionary(self.input_data: data, self.hrand: random_uniform)\n "
return {self.input_data: data, self.hrand: np.random.rand(data.shape[0], self.num_hidden)} |
def _build_model(self):
'\n BUilding the Restriced Boltzman Machine in Tensorflow\n '
(self.input_data, self.hrand) = self._create_placeholders()
(self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_) = self._create_variables()
(hprobs0, hstates0, vprobs, hprobs1, hstates1) = self.gibbs_sampling_step(self.input_data)
positive = self.compute_positive_association(self.input_data, hprobs0, hstates0)
nn_input = vprobs
for step in range((self.gibbs_sampling_steps - 1)):
(hprobs, hstates, vprobs, hprobs1, hstates1) = self.gibbs_sampling_step(nn_input)
nn_input = vprobs
self.reconstruct = vprobs
negative = tf.matmul(tf.transpose(vprobs), hprobs1)
self.encode = hprobs1
dw = (positive - negative)
self.dw = ((self.momentum * self.dw) + ((1 - self.momentum) * dw))
self.w_upd8 = self.W.assign_add(((self.learning_rate * self.dw) - ((self.learning_rate * self.l2) * self.W)))
dbh_ = tf.reduce_mean((hprobs0 - hprobs1), 0)
self.dbh_ = ((self.momentum * self.dbh_) + (self.learning_rate * dbh_))
self.bh_upd8 = self.bh_.assign_add(self.dbh_)
dbv_ = tf.reduce_mean((self.input_data - vprobs), 0)
self.dbv_ = ((self.momentum * self.dbv_) + (self.learning_rate * dbv_))
self.bv_upd8 = self.bv_.assign_add(self.dbv_)
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square((self.input_data - vprobs))))
self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square((self.input_data - vprobs)), 1))
self._create_free_energy_for_batch() | 5,571,932,781,441,398,000 | BUilding the Restriced Boltzman Machine in Tensorflow | Unsupervised-Learning/rbm.py | _build_model | Phoebe0222/MLSA-workshops-2019-student | python | def _build_model(self):
'\n \n '
(self.input_data, self.hrand) = self._create_placeholders()
(self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_) = self._create_variables()
(hprobs0, hstates0, vprobs, hprobs1, hstates1) = self.gibbs_sampling_step(self.input_data)
positive = self.compute_positive_association(self.input_data, hprobs0, hstates0)
nn_input = vprobs
for step in range((self.gibbs_sampling_steps - 1)):
(hprobs, hstates, vprobs, hprobs1, hstates1) = self.gibbs_sampling_step(nn_input)
nn_input = vprobs
self.reconstruct = vprobs
negative = tf.matmul(tf.transpose(vprobs), hprobs1)
self.encode = hprobs1
dw = (positive - negative)
self.dw = ((self.momentum * self.dw) + ((1 - self.momentum) * dw))
self.w_upd8 = self.W.assign_add(((self.learning_rate * self.dw) - ((self.learning_rate * self.l2) * self.W)))
dbh_ = tf.reduce_mean((hprobs0 - hprobs1), 0)
self.dbh_ = ((self.momentum * self.dbh_) + (self.learning_rate * dbh_))
self.bh_upd8 = self.bh_.assign_add(self.dbh_)
dbv_ = tf.reduce_mean((self.input_data - vprobs), 0)
self.dbv_ = ((self.momentum * self.dbv_) + (self.learning_rate * dbv_))
self.bv_upd8 = self.bv_.assign_add(self.dbv_)
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square((self.input_data - vprobs))))
self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square((self.input_data - vprobs)), 1))
self._create_free_energy_for_batch() |
def _create_free_energy_for_batch(self):
' Create free energy ops to batch input data \n :return: self\n '
if (self.visible_unit_type == 'bin'):
self._create_free_energy_for_bin()
elif (self.visible_unit_type == 'gauss'):
self._create_free_energy_for_gauss()
else:
self.batch_free_energy = None | -7,953,426,236,799,308,000 | Create free energy ops to batch input data
:return: self | Unsupervised-Learning/rbm.py | _create_free_energy_for_batch | Phoebe0222/MLSA-workshops-2019-student | python | def _create_free_energy_for_batch(self):
' Create free energy ops to batch input data \n :return: self\n '
if (self.visible_unit_type == 'bin'):
self._create_free_energy_for_bin()
elif (self.visible_unit_type == 'gauss'):
self._create_free_energy_for_gauss()
else:
self.batch_free_energy = None |
def _create_free_energy_for_bin(self):
' Create free energy for mdoel with Bin visible layer\n :return: self\n '
self.batch_free_energy = (- (tf.matmul(self.input_data, tf.reshape(self.bv_, [(- 1), 1])) + tf.reshape(tf.reduce_sum(tf.log((tf.exp((tf.matmul(self.input_data, self.W) + self.bh_)) + 1)), 1), [(- 1), 1]))) | -3,489,726,197,046,207,000 | Create free energy for mdoel with Bin visible layer
:return: self | Unsupervised-Learning/rbm.py | _create_free_energy_for_bin | Phoebe0222/MLSA-workshops-2019-student | python | def _create_free_energy_for_bin(self):
' Create free energy for mdoel with Bin visible layer\n :return: self\n '
self.batch_free_energy = (- (tf.matmul(self.input_data, tf.reshape(self.bv_, [(- 1), 1])) + tf.reshape(tf.reduce_sum(tf.log((tf.exp((tf.matmul(self.input_data, self.W) + self.bh_)) + 1)), 1), [(- 1), 1]))) |
def _create_free_energy_for_gauss(self):
' Create free energy for model with Gauss visible layer \n :return: self\n '
self.batch_free_energy = (- ((tf.matmul(self.input_data, tf.reshape(self.bv_, [(- 1), 1])) - tf.reshape(tf.reduce_sum(((0.5 * self.input_data) * self.input_data), 1), [(- 1), 1])) + tf.reshape(tf.reduce_sum(tf.log((tf.exp((tf.matmul(self.input_data, self.W) + self.bh_)) + 1)), 1), [(- 1), 1]))) | -4,266,481,182,452,475,400 | Create free energy for model with Gauss visible layer
:return: self | Unsupervised-Learning/rbm.py | _create_free_energy_for_gauss | Phoebe0222/MLSA-workshops-2019-student | python | def _create_free_energy_for_gauss(self):
' Create free energy for model with Gauss visible layer \n :return: self\n '
self.batch_free_energy = (- ((tf.matmul(self.input_data, tf.reshape(self.bv_, [(- 1), 1])) - tf.reshape(tf.reduce_sum(((0.5 * self.input_data) * self.input_data), 1), [(- 1), 1])) + tf.reshape(tf.reduce_sum(tf.log((tf.exp((tf.matmul(self.input_data, self.W) + self.bh_)) + 1)), 1), [(- 1), 1]))) |
def _create_placeholders(self):
' Create the TensorFlow placeholders for the model.\n :return: tuple(input(shape(None, num_visible)), \n hrand(shape(None, num_hidden)))\n '
x = tf.placeholder('float', [None, self.num_visible], name='x-input')
hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand')
return (x, hrand) | -8,748,354,369,674,363,000 | Create the TensorFlow placeholders for the model.
:return: tuple(input(shape(None, num_visible)),
hrand(shape(None, num_hidden))) | Unsupervised-Learning/rbm.py | _create_placeholders | Phoebe0222/MLSA-workshops-2019-student | python | def _create_placeholders(self):
' Create the TensorFlow placeholders for the model.\n :return: tuple(input(shape(None, num_visible)), \n hrand(shape(None, num_hidden)))\n '
x = tf.placeholder('float', [None, self.num_visible], name='x-input')
hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand')
return (x, hrand) |
def _create_variables(self):
' Create the TensorFlow variables for the model.\n :return: tuple(weights(shape(num_visible, num_hidden),\n hidden bias(shape(num_hidden)),\n visible bias(shape(num_visible)))\n '
W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights')
dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name='derivative-weights')
bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias')
dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias')
bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias')
dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias')
return (W, bh_, bv_, dw, dbh_, dbv_) | -2,261,539,388,551,716,000 | Create the TensorFlow variables for the model.
:return: tuple(weights(shape(num_visible, num_hidden),
hidden bias(shape(num_hidden)),
visible bias(shape(num_visible))) | Unsupervised-Learning/rbm.py | _create_variables | Phoebe0222/MLSA-workshops-2019-student | python | def _create_variables(self):
' Create the TensorFlow variables for the model.\n :return: tuple(weights(shape(num_visible, num_hidden),\n hidden bias(shape(num_hidden)),\n visible bias(shape(num_visible)))\n '
W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights')
dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name='derivative-weights')
bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias')
dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias')
bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias')
dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias')
return (W, bh_, bv_, dw, dbh_, dbv_) |
def gibbs_sampling_step(self, visible):
' Performs one step of gibbs sampling.\n :param visible: activations of the visible units\n :return: tuple(hidden probs, hidden states, visible probs,\n new hidden probs, new hidden states)\n '
(hprobs, hstates) = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs)
(hprobs1, hstates1) = self.sample_hidden_from_visible(vprobs)
return (hprobs, hstates, vprobs, hprobs1, hstates1) | 6,320,407,482,200,791,000 | Performs one step of gibbs sampling.
:param visible: activations of the visible units
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states) | Unsupervised-Learning/rbm.py | gibbs_sampling_step | Phoebe0222/MLSA-workshops-2019-student | python | def gibbs_sampling_step(self, visible):
' Performs one step of gibbs sampling.\n :param visible: activations of the visible units\n :return: tuple(hidden probs, hidden states, visible probs,\n new hidden probs, new hidden states)\n '
(hprobs, hstates) = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs)
(hprobs1, hstates1) = self.sample_hidden_from_visible(vprobs)
return (hprobs, hstates, vprobs, hprobs1, hstates1) |
def sample_hidden_from_visible(self, visible):
' Sample the hidden units from the visible units.\n This is the Positive phase of the Contrastive Divergence algorithm.\n :param visible: activations of the visible units\n :return: tuple(hidden probabilities, hidden binary states)\n '
hprobs = tf.nn.sigmoid((tf.matmul(visible, self.W) + self.bh_))
hstates = self.sample_prob(hprobs, self.hrand)
return (hprobs, hstates) | -5,385,906,317,538,630,000 | Sample the hidden units from the visible units.
This is the Positive phase of the Contrastive Divergence algorithm.
:param visible: activations of the visible units
:return: tuple(hidden probabilities, hidden binary states) | Unsupervised-Learning/rbm.py | sample_hidden_from_visible | Phoebe0222/MLSA-workshops-2019-student | python | def sample_hidden_from_visible(self, visible):
' Sample the hidden units from the visible units.\n This is the Positive phase of the Contrastive Divergence algorithm.\n :param visible: activations of the visible units\n :return: tuple(hidden probabilities, hidden binary states)\n '
hprobs = tf.nn.sigmoid((tf.matmul(visible, self.W) + self.bh_))
hstates = self.sample_prob(hprobs, self.hrand)
return (hprobs, hstates) |
def sample_visible_from_hidden(self, hidden):
' Sample the visible units from the hidden units.\n This is the Negative phase of the Contrastive Divergence algorithm.\n :param hidden: activations of the hidden units\n :return: visible probabilities\n '
visible_activation = (tf.matmul(hidden, tf.transpose(self.W)) + self.bv_)
if (self.visible_unit_type == 'bin'):
vprobs = tf.nn.sigmoid(visible_activation)
elif (self.visible_unit_type == 'gauss'):
vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs | -3,314,956,082,504,032,000 | Sample the visible units from the hidden units.
This is the Negative phase of the Contrastive Divergence algorithm.
:param hidden: activations of the hidden units
:return: visible probabilities | Unsupervised-Learning/rbm.py | sample_visible_from_hidden | Phoebe0222/MLSA-workshops-2019-student | python | def sample_visible_from_hidden(self, hidden):
' Sample the visible units from the hidden units.\n This is the Negative phase of the Contrastive Divergence algorithm.\n :param hidden: activations of the hidden units\n :return: visible probabilities\n '
visible_activation = (tf.matmul(hidden, tf.transpose(self.W)) + self.bv_)
if (self.visible_unit_type == 'bin'):
vprobs = tf.nn.sigmoid(visible_activation)
elif (self.visible_unit_type == 'gauss'):
vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs |
def compute_positive_association(self, visible, hidden_probs, hidden_states):
' Compute positive associations between visible and hidden units.\n :param visible: visible units\n :param hidden_probs: hidden units probabilities\n :param hidden_states: hidden units states\n :return: positive association = dot(visible.T, hidden)\n '
if (self.visible_unit_type == 'bin'):
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif (self.visible_unit_type == 'gauss'):
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive | -2,116,715,240,932,941,800 | Compute positive associations between visible and hidden units.
:param visible: visible units
:param hidden_probs: hidden units probabilities
:param hidden_states: hidden units states
:return: positive association = dot(visible.T, hidden) | Unsupervised-Learning/rbm.py | compute_positive_association | Phoebe0222/MLSA-workshops-2019-student | python | def compute_positive_association(self, visible, hidden_probs, hidden_states):
' Compute positive associations between visible and hidden units.\n :param visible: visible units\n :param hidden_probs: hidden units probabilities\n :param hidden_states: hidden units states\n :return: positive association = dot(visible.T, hidden)\n '
if (self.visible_unit_type == 'bin'):
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif (self.visible_unit_type == 'gauss'):
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive |
def _create_model_directory(self):
' Create the directory for storing the model\n :return: self\n '
if (not os.path.isdir(self.main_dir)):
print('Created dir: ', self.main_dir)
os.mkdir(self.main_dir) | 2,877,616,471,642,086,400 | Create the directory for storing the model
:return: self | Unsupervised-Learning/rbm.py | _create_model_directory | Phoebe0222/MLSA-workshops-2019-student | python | def _create_model_directory(self):
' Create the directory for storing the model\n :return: self\n '
if (not os.path.isdir(self.main_dir)):
print('Created dir: ', self.main_dir)
os.mkdir(self.main_dir) |
def getRecontructError(self, data):
' return Reconstruction Error (loss) from data in batch.\n :param data: input data of shape num_samples x visible_size\n :return: Reconstruction cost for each sample in the batch\n '
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_loss = self.tf_session.run(self.batch_cost, feed_dict=self._create_feed_dict(data))
return batch_loss | 554,381,575,356,357,100 | return Reconstruction Error (loss) from data in batch.
:param data: input data of shape num_samples x visible_size
:return: Reconstruction cost for each sample in the batch | Unsupervised-Learning/rbm.py | getRecontructError | Phoebe0222/MLSA-workshops-2019-student | python | def getRecontructError(self, data):
' return Reconstruction Error (loss) from data in batch.\n :param data: input data of shape num_samples x visible_size\n :return: Reconstruction cost for each sample in the batch\n '
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_loss = self.tf_session.run(self.batch_cost, feed_dict=self._create_feed_dict(data))
return batch_loss |
def getFreeEnergy(self, data):
' return Free Energy from data.\n :param data: input data of shape num_samples x visible_size\n :return: Free Energy for each sample: p(x)\n '
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_FE = self.tf_session.run(self.batch_free_energy, feed_dict=self._create_feed_dict(data))
return batch_FE | 2,393,992,164,699,182,600 | return Free Energy from data.
:param data: input data of shape num_samples x visible_size
:return: Free Energy for each sample: p(x) | Unsupervised-Learning/rbm.py | getFreeEnergy | Phoebe0222/MLSA-workshops-2019-student | python | def getFreeEnergy(self, data):
' return Free Energy from data.\n :param data: input data of shape num_samples x visible_size\n :return: Free Energy for each sample: p(x)\n '
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_FE = self.tf_session.run(self.batch_free_energy, feed_dict=self._create_feed_dict(data))
return batch_FE |
def load_model(self, shape, gibbs_sampling_steps, model_path):
' Load a trained model from disk. The shape of the model\n (num_visible, num_hidden) and the number of gibbs sampling steps\n must be known in order to restore the model.\n :param shape: tuple(num_visible, num_hidden)\n :param gibbs_sampling_steps:\n :param model_path:\n :return: self\n '
(self.num_visible, self.num_hidden) = (shape[0], shape[1])
self.gibbs_sampling_steps = gibbs_sampling_steps
tf.reset_default_graph()
self._build_model()
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path) | -4,758,758,241,476,712,000 | Load a trained model from disk. The shape of the model
(num_visible, num_hidden) and the number of gibbs sampling steps
must be known in order to restore the model.
:param shape: tuple(num_visible, num_hidden)
:param gibbs_sampling_steps:
:param model_path:
:return: self | Unsupervised-Learning/rbm.py | load_model | Phoebe0222/MLSA-workshops-2019-student | python | def load_model(self, shape, gibbs_sampling_steps, model_path):
' Load a trained model from disk. The shape of the model\n (num_visible, num_hidden) and the number of gibbs sampling steps\n must be known in order to restore the model.\n :param shape: tuple(num_visible, num_hidden)\n :param gibbs_sampling_steps:\n :param model_path:\n :return: self\n '
(self.num_visible, self.num_hidden) = (shape[0], shape[1])
self.gibbs_sampling_steps = gibbs_sampling_steps
tf.reset_default_graph()
self._build_model()
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path) |
def get_model_parameters(self):
' Return the model parameters in the form of numpy arrays.\n :return: model parameters\n '
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {'W': self.W.eval(), 'bh_': self.bh_.eval(), 'bv_': self.bv_.eval()} | -5,972,333,097,598,204,000 | Return the model parameters in the form of numpy arrays.
:return: model parameters | Unsupervised-Learning/rbm.py | get_model_parameters | Phoebe0222/MLSA-workshops-2019-student | python | def get_model_parameters(self):
' Return the model parameters in the form of numpy arrays.\n :return: model parameters\n '
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {'W': self.W.eval(), 'bh_': self.bh_.eval(), 'bv_': self.bv_.eval()} |
def setup(hass, config):
'Set up the Smapee component.'
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username, password, host, host_password)
if ((not smappee.is_local_active) and (not smappee.is_remote_active)):
_LOGGER.error('Neither Smappee server or local component enabled.')
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
return True | 7,854,840,177,363,320,000 | Set up the Smapee component. | homeassistant/components/smappee.py | setup | Arshrock/home-assistant | python | def setup(hass, config):
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username, password, host, host_password)
if ((not smappee.is_local_active) and (not smappee.is_remote_active)):
_LOGGER.error('Neither Smappee server or local component enabled.')
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
return True |
def __init__(self, client_id, client_secret, username, password, host, host_password):
'Initialize the data.'
import smappy
self._remote_active = False
self._local_active = False
if (client_id is not None):
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception('Smappee server authentication failed (%s)', error)
else:
_LOGGER.warning('Smappee server component init skipped.')
if (host is not None):
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception('Local Smappee device authentication failed (%s)', error)
else:
_LOGGER.warning('Smappee local component init skipped.')
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if (self._remote_active or self._local_active):
self.update() | 1,914,008,224,257,149,400 | Initialize the data. | homeassistant/components/smappee.py | __init__ | Arshrock/home-assistant | python | def __init__(self, client_id, client_secret, username, password, host, host_password):
import smappy
self._remote_active = False
self._local_active = False
if (client_id is not None):
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception('Smappee server authentication failed (%s)', error)
else:
_LOGGER.warning('Smappee server component init skipped.')
if (host is not None):
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception('Local Smappee device authentication failed (%s)', error)
else:
_LOGGER.warning('Smappee local component init skipped.')
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if (self._remote_active or self._local_active):
self.update() |
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
'Update data from Smappee API.'
if self.is_remote_active:
service_locations = self._smappy.get_service_locations().get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if (location_id is not None):
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy.get_service_location_info(location_id)
_LOGGER.debug('Remote info %s %s', self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id].update({sensor_id: self.get_sensor_consumption(location_id, sensor_id, aggregation=3, delta=1440)})
_LOGGER.debug('Remote sensors %s %s', self.locations, self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(location_id, aggregation=3, delta=1440)
_LOGGER.debug('Remote consumption %s %s', self.locations, self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug('Local switches %s', self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug('Local values %s', self.instantaneous) | -5,550,122,194,476,160,000 | Update data from Smappee API. | homeassistant/components/smappee.py | update | Arshrock/home-assistant | python | @Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
if self.is_remote_active:
service_locations = self._smappy.get_service_locations().get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if (location_id is not None):
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy.get_service_location_info(location_id)
_LOGGER.debug('Remote info %s %s', self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id].update({sensor_id: self.get_sensor_consumption(location_id, sensor_id, aggregation=3, delta=1440)})
_LOGGER.debug('Remote sensors %s %s', self.locations, self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(location_id, aggregation=3, delta=1440)
_LOGGER.debug('Remote consumption %s %s', self.locations, self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug('Local switches %s', self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug('Local values %s', self.instantaneous) |
@property
def is_remote_active(self):
'Return true if Smappe server is configured and working.'
return self._remote_active | 7,469,132,084,847,460,000 | Return true if Smappe server is configured and working. | homeassistant/components/smappee.py | is_remote_active | Arshrock/home-assistant | python | @property
def is_remote_active(self):
return self._remote_active |
@property
def is_local_active(self):
'Return true if Smappe local device is configured and working.'
return self._local_active | -9,195,173,792,660,097,000 | Return true if Smappe local device is configured and working. | homeassistant/components/smappee.py | is_local_active | Arshrock/home-assistant | python | @property
def is_local_active(self):
return self._local_active |
def get_switches(self):
'Get switches from local Smappee.'
if (not self.is_local_active):
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error('Error getting switches from local Smappee. (%s)', error) | 3,160,238,743,753,751,600 | Get switches from local Smappee. | homeassistant/components/smappee.py | get_switches | Arshrock/home-assistant | python | def get_switches(self):
if (not self.is_local_active):
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error('Error getting switches from local Smappee. (%s)', error) |
def get_consumption(self, location_id, aggregation, delta):
'Update data from Smappee.'
if (not self.is_remote_active):
return
end = datetime.utcnow()
start = (end - timedelta(minutes=delta))
try:
return self._smappy.get_consumption(location_id, start, end, aggregation)
except RequestException as error:
_LOGGER.error('Error getting comsumption from Smappee cloud. (%s)', error) | -4,255,988,973,020,689,400 | Update data from Smappee. | homeassistant/components/smappee.py | get_consumption | Arshrock/home-assistant | python | def get_consumption(self, location_id, aggregation, delta):
if (not self.is_remote_active):
return
end = datetime.utcnow()
start = (end - timedelta(minutes=delta))
try:
return self._smappy.get_consumption(location_id, start, end, aggregation)
except RequestException as error:
_LOGGER.error('Error getting comsumption from Smappee cloud. (%s)', error) |
def get_sensor_consumption(self, location_id, sensor_id, aggregation, delta):
'Update data from Smappee.'
if (not self.is_remote_active):
return
end = datetime.utcnow()
start = (end - timedelta(minutes=delta))
try:
return self._smappy.get_sensor_consumption(location_id, sensor_id, start, end, aggregation)
except RequestException as error:
_LOGGER.error('Error getting comsumption from Smappee cloud. (%s)', error) | -1,799,282,535,104,980,000 | Update data from Smappee. | homeassistant/components/smappee.py | get_sensor_consumption | Arshrock/home-assistant | python | def get_sensor_consumption(self, location_id, sensor_id, aggregation, delta):
if (not self.is_remote_active):
return
end = datetime.utcnow()
start = (end - timedelta(minutes=delta))
try:
return self._smappy.get_sensor_consumption(location_id, sensor_id, start, end, aggregation)
except RequestException as error:
_LOGGER.error('Error getting comsumption from Smappee cloud. (%s)', error) |
def actuator_on(self, location_id, actuator_id, is_remote_switch, duration=None):
'Turn on actuator.'
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error('Error turning actuator on. (%s)', error)
return False
return True | 6,920,471,198,806,654,000 | Turn on actuator. | homeassistant/components/smappee.py | actuator_on | Arshrock/home-assistant | python | def actuator_on(self, location_id, actuator_id, is_remote_switch, duration=None):
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error('Error turning actuator on. (%s)', error)
return False
return True |
def actuator_off(self, location_id, actuator_id, is_remote_switch, duration=None):
'Turn off actuator.'
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error('Error turning actuator on. (%s)', error)
return False
return True | 1,006,460,597,006,499,000 | Turn off actuator. | homeassistant/components/smappee.py | actuator_off | Arshrock/home-assistant | python | def actuator_off(self, location_id, actuator_id, is_remote_switch, duration=None):
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error('Error turning actuator on. (%s)', error)
return False
return True |
def active_power(self):
'Get sum of all instantaneous active power values from local hub.'
if (not self.is_local_active):
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error('Error getting data from Local Smappee unit. (%s)', error) | 8,179,547,795,337,284,000 | Get sum of all instantaneous active power values from local hub. | homeassistant/components/smappee.py | active_power | Arshrock/home-assistant | python | def active_power(self):
if (not self.is_local_active):
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error('Error getting data from Local Smappee unit. (%s)', error) |
def active_cosfi(self):
'Get the average of all instantaneous cosfi values.'
if (not self.is_local_active):
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error('Error getting data from Local Smappee unit. (%s)', error) | -4,409,512,030,781,574,700 | Get the average of all instantaneous cosfi values. | homeassistant/components/smappee.py | active_cosfi | Arshrock/home-assistant | python | def active_cosfi(self):
if (not self.is_local_active):
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error('Error getting data from Local Smappee unit. (%s)', error) |
def instantaneous_values(self):
'ReportInstantaneousValues.'
if (not self.is_local_active):
return
report_instantaneous_values = self._localsmappy.report_instantaneous_values()
report_result = report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = match.group('value')
_LOGGER.debug(properties)
return properties | 7,188,063,171,174,411,000 | ReportInstantaneousValues. | homeassistant/components/smappee.py | instantaneous_values | Arshrock/home-assistant | python | def instantaneous_values(self):
if (not self.is_local_active):
return
report_instantaneous_values = self._localsmappy.report_instantaneous_values()
report_result = report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = match.group('value')
_LOGGER.debug(properties)
return properties |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.