body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def build_model(self, norm=True, act='relu'):
'Build DCE using the initialized attributes\n\n Args:\n norm: boolean, wheher to add a normalization layer at the begining\n of the autoencoder\n act: string, keras activation function name for autoencoder\n '
autoencoder = DeepAutoEncoder(self.autoencoder_dims, act)
autoencoder.build_model(norm=norm)
embeding = autoencoder.model.get_layer(name='embedding_layer').output
clustering = KMeansLayer(self.n_clusters, alpha=self.alpha, name='clustering')(embeding)
self.model = Model(inputs=autoencoder.model.input, outputs=[clustering, autoencoder.model.output])
return | -1,438,000,602,470,540,500 | Build DCE using the initialized attributes
Args:
norm: boolean, wheher to add a normalization layer at the begining
of the autoencoder
act: string, keras activation function name for autoencoder | deepchembed/dce.py | build_model | chembed/DeepChEmbed | python | def build_model(self, norm=True, act='relu'):
'Build DCE using the initialized attributes\n\n Args:\n norm: boolean, wheher to add a normalization layer at the begining\n of the autoencoder\n act: string, keras activation function name for autoencoder\n '
autoencoder = DeepAutoEncoder(self.autoencoder_dims, act)
autoencoder.build_model(norm=norm)
embeding = autoencoder.model.get_layer(name='embedding_layer').output
clustering = KMeansLayer(self.n_clusters, alpha=self.alpha, name='clustering')(embeding)
self.model = Model(inputs=autoencoder.model.input, outputs=[clustering, autoencoder.model.output])
return |
def train_model(self, data_train, labels_train=None, data_test=None, labels_test=None, verbose=1, compiled=False, clustering_loss='kld', decoder_loss='mse', clustering_loss_weight=0.5, hardening_order=1, hardening_strength=2.0, compiled=False, optimizer='adam', lr=0.001, decay=0.0):
'Train DCE Model:\n\n If labels_train are not present, train DCE model in a unsupervised\n learning process; otherwise, train DCE model in a supervised learning\n process.\n\n Args:\n data_train: input training data\n labels_train: true labels of traning data\n data_test: input test data\n labels_test: true lables of testing data\n verbose: 0, turn off the screen prints\n clustering_loss: string, clustering layer loss function\n decoder_loss:, string, decoder loss function\n clustering_loss_weight: float in [0,1], w_c,\n harderning_order: odd int, the order of hardening function\n harderning_strength: float >=1.0, the streng of the harderning\n compiled: boolean, indicating if the model is compiled or not\n optmizer: string, keras optimizers\n lr: learning rate\n dacay: learning rate dacay\n\n Returns:\n train_loss: training loss\n test_loss: only if data_test and labels_test are not None in\n supervised learning process\n '
if (not compiled):
assert ((clustering_loss_weight <= 1) and (clustering_loss_weight >= 0))
if (optimizer == 'adam'):
dce_optimizer = optimizers.Adam(lr=lr, decay=decay)
elif (optimizer == 'sgd'):
dce_optimizer = optimizers.sgd(lr=lr, decay=decay)
else:
raise Exception('Input optimizer was not found')
self.model.compile(loss={'clustering': clustering_loss, 'decoder_output': decoder_loss}, loss_weights=[clustering_loss_weight, (1 - clustering_loss_weight)], optimizer=dce_optimizer)
if (labels_train is not None):
supervised_learning = True
if (verbose >= 1):
print('Starting supervised learning')
else:
supervised_learning = False
if (verbose >= 1):
print('Starting unsupervised learning')
kmeans_init = KMeans(n_clusters=self.n_clusters)
kmeans_init.build_model()
encoder = Model(inputs=self.model.input, outputs=self.model.get_layer(name='embedding_layer').output)
kmeans_init.model.fit(encoder.predict(data_train))
y_pred_last = kmeans_init.model.labels_
self.model.get_layer(name='clustering').set_weights([kmeans_init.model.cluster_centers_])
if (not supervised_learning):
assert (hardening_order in DCE.HARDENING_FUNCS.keys())
assert (hardening_strength >= 1.0)
h_func = DCE.HARDENING_FUNCS[hardening_order]
else:
assert (len(labels_train) == len(data_train))
assert (len(np.unique(labels_train)) == self.n_clusters)
p = np.zeros(shape=(len(labels_train), self.n_clusters))
for i in range(len(labels_train)):
p[i][labels_train[i]] = 1.0
if (data_test is not None):
assert (len(labels_test) == len(data_test))
assert (len(np.unique(labels_test)) == self.n_clusters)
p_test = np.zeros(shape=(len(labels_test), self.n_clusters))
for i in range(len(labels_test)):
p_test[i][labels_test[i]] = 1.0
validation_loss = []
loss = []
for iteration in range(int(self.max_iteration)):
if ((iteration % self.update_interval) == 0):
(q, _) = self.model.predict(data_train)
if (not supervised_learning):
p = DCE.hardening(q, h_func, hardening_strength)
y_pred = q.argmax(1)
delta_label_i = (np.sum((y_pred != y_pred_last)).astype(np.float32) / y_pred.shape[0])
y_pred_last = y_pred
if ((iteration > 0) and (delta_label_i < self.clustering_tol)):
print(((str(delta_label_i) + ' < ') + str(self.clustering_tol)))
print('Reached tolerance threshold. Stopping training.')
break
loss.append(self.model.train_on_batch(x=data_train, y=[p, data_train]))
if (supervised_learning and (data_test is not None)):
validation_loss.append(self.model.test_on_batch(x=data_test, y=[p_test, data_test]))
if ((verbose > 0) and ((iteration % self.update_interval) == 0)):
print(('Epoch: ' + str(iteration)))
if (verbose == 1):
print((((' Total_loss = ' + str(loss[iteration][0])) + ';Delta_label = ') + str(delta_label_i)))
print((((' Clustering_loss = ' + str(loss[iteration][1])) + '; Decoder_loss = ') + str(loss[iteration][2])))
if (iteration == (self.max_iteration - 1)):
print('Reached maximum iteration. Stopping training.')
if (data_test is None):
return np.array(loss).T
else:
return [np.array(loss).T, np.array(validation_loss).T] | -9,053,447,958,165,298,000 | Train DCE Model:
If labels_train are not present, train DCE model in a unsupervised
learning process; otherwise, train DCE model in a supervised learning
process.
Args:
data_train: input training data
labels_train: true labels of traning data
data_test: input test data
labels_test: true lables of testing data
verbose: 0, turn off the screen prints
clustering_loss: string, clustering layer loss function
decoder_loss:, string, decoder loss function
clustering_loss_weight: float in [0,1], w_c,
harderning_order: odd int, the order of hardening function
harderning_strength: float >=1.0, the streng of the harderning
compiled: boolean, indicating if the model is compiled or not
optmizer: string, keras optimizers
lr: learning rate
dacay: learning rate dacay
Returns:
train_loss: training loss
test_loss: only if data_test and labels_test are not None in
supervised learning process | deepchembed/dce.py | train_model | chembed/DeepChEmbed | python | def train_model(self, data_train, labels_train=None, data_test=None, labels_test=None, verbose=1, compiled=False, clustering_loss='kld', decoder_loss='mse', clustering_loss_weight=0.5, hardening_order=1, hardening_strength=2.0, compiled=False, optimizer='adam', lr=0.001, decay=0.0):
'Train DCE Model:\n\n If labels_train are not present, train DCE model in a unsupervised\n learning process; otherwise, train DCE model in a supervised learning\n process.\n\n Args:\n data_train: input training data\n labels_train: true labels of traning data\n data_test: input test data\n labels_test: true lables of testing data\n verbose: 0, turn off the screen prints\n clustering_loss: string, clustering layer loss function\n decoder_loss:, string, decoder loss function\n clustering_loss_weight: float in [0,1], w_c,\n harderning_order: odd int, the order of hardening function\n harderning_strength: float >=1.0, the streng of the harderning\n compiled: boolean, indicating if the model is compiled or not\n optmizer: string, keras optimizers\n lr: learning rate\n dacay: learning rate dacay\n\n Returns:\n train_loss: training loss\n test_loss: only if data_test and labels_test are not None in\n supervised learning process\n '
if (not compiled):
assert ((clustering_loss_weight <= 1) and (clustering_loss_weight >= 0))
if (optimizer == 'adam'):
dce_optimizer = optimizers.Adam(lr=lr, decay=decay)
elif (optimizer == 'sgd'):
dce_optimizer = optimizers.sgd(lr=lr, decay=decay)
else:
raise Exception('Input optimizer was not found')
self.model.compile(loss={'clustering': clustering_loss, 'decoder_output': decoder_loss}, loss_weights=[clustering_loss_weight, (1 - clustering_loss_weight)], optimizer=dce_optimizer)
if (labels_train is not None):
supervised_learning = True
if (verbose >= 1):
print('Starting supervised learning')
else:
supervised_learning = False
if (verbose >= 1):
print('Starting unsupervised learning')
kmeans_init = KMeans(n_clusters=self.n_clusters)
kmeans_init.build_model()
encoder = Model(inputs=self.model.input, outputs=self.model.get_layer(name='embedding_layer').output)
kmeans_init.model.fit(encoder.predict(data_train))
y_pred_last = kmeans_init.model.labels_
self.model.get_layer(name='clustering').set_weights([kmeans_init.model.cluster_centers_])
if (not supervised_learning):
assert (hardening_order in DCE.HARDENING_FUNCS.keys())
assert (hardening_strength >= 1.0)
h_func = DCE.HARDENING_FUNCS[hardening_order]
else:
assert (len(labels_train) == len(data_train))
assert (len(np.unique(labels_train)) == self.n_clusters)
p = np.zeros(shape=(len(labels_train), self.n_clusters))
for i in range(len(labels_train)):
p[i][labels_train[i]] = 1.0
if (data_test is not None):
assert (len(labels_test) == len(data_test))
assert (len(np.unique(labels_test)) == self.n_clusters)
p_test = np.zeros(shape=(len(labels_test), self.n_clusters))
for i in range(len(labels_test)):
p_test[i][labels_test[i]] = 1.0
validation_loss = []
loss = []
for iteration in range(int(self.max_iteration)):
if ((iteration % self.update_interval) == 0):
(q, _) = self.model.predict(data_train)
if (not supervised_learning):
p = DCE.hardening(q, h_func, hardening_strength)
y_pred = q.argmax(1)
delta_label_i = (np.sum((y_pred != y_pred_last)).astype(np.float32) / y_pred.shape[0])
y_pred_last = y_pred
if ((iteration > 0) and (delta_label_i < self.clustering_tol)):
print(((str(delta_label_i) + ' < ') + str(self.clustering_tol)))
print('Reached tolerance threshold. Stopping training.')
break
loss.append(self.model.train_on_batch(x=data_train, y=[p, data_train]))
if (supervised_learning and (data_test is not None)):
validation_loss.append(self.model.test_on_batch(x=data_test, y=[p_test, data_test]))
if ((verbose > 0) and ((iteration % self.update_interval) == 0)):
print(('Epoch: ' + str(iteration)))
if (verbose == 1):
print((((' Total_loss = ' + str(loss[iteration][0])) + ';Delta_label = ') + str(delta_label_i)))
print((((' Clustering_loss = ' + str(loss[iteration][1])) + '; Decoder_loss = ') + str(loss[iteration][2])))
if (iteration == (self.max_iteration - 1)):
print('Reached maximum iteration. Stopping training.')
if (data_test is None):
return np.array(loss).T
else:
return [np.array(loss).T, np.array(validation_loss).T] |
@staticmethod
def hardening(q, h_func, stength):
'hardening distribution P and return Q\n\n Args:\n q: input distributions.\n h_func: input harderning function.\n strength: hardening strength.\n\n returns:\n p: hardened and normatlized distributions.\n\n '
q = h_func(q)
weight = ((q ** stength) / q.sum(0))
return (weight.T / weight.sum(1)).T | 4,162,263,595,985,963,500 | hardening distribution P and return Q
Args:
q: input distributions.
h_func: input harderning function.
strength: hardening strength.
returns:
p: hardened and normatlized distributions. | deepchembed/dce.py | hardening | chembed/DeepChEmbed | python | @staticmethod
def hardening(q, h_func, stength):
'hardening distribution P and return Q\n\n Args:\n q: input distributions.\n h_func: input harderning function.\n strength: hardening strength.\n\n returns:\n p: hardened and normatlized distributions.\n\n '
q = h_func(q)
weight = ((q ** stength) / q.sum(0))
return (weight.T / weight.sum(1)).T |
def authenticate_active(self, request, principal, auth, life=None, sign=True, skip_handling_check=False, *args, **kwargs):
"Generate a WLS 'success' response based on interaction with the user\n\n This function creates a WLS response specifying that the principal was\n authenticated based on 'fresh' interaction with the user (e.g. input of\n a username and password).\n\n Args:\n request (AuthRequest): the original WAA request\n principal (AuthPrincipal): the principal authenticated by the WLS\n auth (str): the authentication method used by the principal.\n life (int): if specified, the validity (in seconds) of the\n principal's session with the WLS.\n sign (bool): whether to sign the response or not. Recommended to\n leave this at the default value of `True` (see warning below).\n\n *args: passed to `AuthResponse.respond_to_request`\n **kwargs: passed to `AuthResponse.respond_to_request`\n\n Returns:\n An `AuthResponse` instance matching the given arguments.\n\n Warning:\n Responses indicating successful authentication *MUST* be signed by\n the WLS. It is recommended that you leave `sign` set to `True`, or\n make sure to sign the response manually afterwards.\n "
self._pre_response(request, skip_handling_check)
if (request.iact == False):
raise ValueError("WAA demanded passive authentication (iact == 'no')")
if ((life is None) and (principal.session_expiry is not None)):
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
response = AuthResponse.respond_to_request(*args, request=request, code=status.SUCCESS, principal=principal.userid, auth=auth, ptags=principal.ptags, life=life, **kwargs)
return self._finish_response(response=response, sign=sign) | 8,779,146,282,649,714,000 | Generate a WLS 'success' response based on interaction with the user
This function creates a WLS response specifying that the principal was
authenticated based on 'fresh' interaction with the user (e.g. input of
a username and password).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
auth (str): the authentication method used by the principal.
life (int): if specified, the validity (in seconds) of the
principal's session with the WLS.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards. | ucam_wls/context.py | authenticate_active | edwinbalani/ucam-wls | python | def authenticate_active(self, request, principal, auth, life=None, sign=True, skip_handling_check=False, *args, **kwargs):
"Generate a WLS 'success' response based on interaction with the user\n\n This function creates a WLS response specifying that the principal was\n authenticated based on 'fresh' interaction with the user (e.g. input of\n a username and password).\n\n Args:\n request (AuthRequest): the original WAA request\n principal (AuthPrincipal): the principal authenticated by the WLS\n auth (str): the authentication method used by the principal.\n life (int): if specified, the validity (in seconds) of the\n principal's session with the WLS.\n sign (bool): whether to sign the response or not. Recommended to\n leave this at the default value of `True` (see warning below).\n\n *args: passed to `AuthResponse.respond_to_request`\n **kwargs: passed to `AuthResponse.respond_to_request`\n\n Returns:\n An `AuthResponse` instance matching the given arguments.\n\n Warning:\n Responses indicating successful authentication *MUST* be signed by\n the WLS. It is recommended that you leave `sign` set to `True`, or\n make sure to sign the response manually afterwards.\n "
self._pre_response(request, skip_handling_check)
if (request.iact == False):
raise ValueError("WAA demanded passive authentication (iact == 'no')")
if ((life is None) and (principal.session_expiry is not None)):
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
response = AuthResponse.respond_to_request(*args, request=request, code=status.SUCCESS, principal=principal.userid, auth=auth, ptags=principal.ptags, life=life, **kwargs)
return self._finish_response(response=response, sign=sign) |
def authenticate_passive(self, request, principal, sso=[], sign=True, skip_handling_check=False, *args, **kwargs):
"Generate a WLS 'success' response based on a pre-existing identity\n\n This function creates a WLS response specifying that the principal was\n authenticated based on previous successful authentication (e.g. an\n existing WLS session cookie).\n\n Args:\n request (AuthRequest): the original WAA request\n principal (AuthPrincipal): the principal authenticated by the WLS\n sso (list): a list of strings indicating the authentication methods\n previously used for authentication by the principal. If an\n empty list is passed, `principal.auth_methods` will be used.\n sign (bool): whether to sign the response or not. Recommended to\n leave this at the default value of `True` (see warning below).\n\n *args: passed to `AuthResponse.respond_to_request`\n **kwargs: passed to `AuthResponse.respond_to_request`\n\n Returns:\n An `AuthResponse` instance matching the given arguments.\n\n Warning:\n Responses indicating successful authentication *MUST* be signed by\n the WLS. It is recommended that you leave `sign` set to `True`, or\n make sure to sign the response manually afterwards.\n "
self._pre_response(request, skip_handling_check)
if (request.iact == True):
raise ValueError("WAA demanded active authentication (iact == 'yes')")
if (len(sso) == 0):
sso = principal.auth_methods
if (len(sso) == 0):
raise ValueError('no authentication methods specified for `sso`')
if (principal.session_expiry is not None):
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
else:
life = None
response = AuthResponse.respond_to_request(*args, request=request, code=status.SUCCESS, principal=principal.userid, sso=sso, ptags=principal.ptags, life=life, **kwargs)
return self._finish_response(response=response, sign=sign) | 1,335,896,058,374,553,300 | Generate a WLS 'success' response based on a pre-existing identity
This function creates a WLS response specifying that the principal was
authenticated based on previous successful authentication (e.g. an
existing WLS session cookie).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
sso (list): a list of strings indicating the authentication methods
previously used for authentication by the principal. If an
empty list is passed, `principal.auth_methods` will be used.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards. | ucam_wls/context.py | authenticate_passive | edwinbalani/ucam-wls | python | def authenticate_passive(self, request, principal, sso=[], sign=True, skip_handling_check=False, *args, **kwargs):
"Generate a WLS 'success' response based on a pre-existing identity\n\n This function creates a WLS response specifying that the principal was\n authenticated based on previous successful authentication (e.g. an\n existing WLS session cookie).\n\n Args:\n request (AuthRequest): the original WAA request\n principal (AuthPrincipal): the principal authenticated by the WLS\n sso (list): a list of strings indicating the authentication methods\n previously used for authentication by the principal. If an\n empty list is passed, `principal.auth_methods` will be used.\n sign (bool): whether to sign the response or not. Recommended to\n leave this at the default value of `True` (see warning below).\n\n *args: passed to `AuthResponse.respond_to_request`\n **kwargs: passed to `AuthResponse.respond_to_request`\n\n Returns:\n An `AuthResponse` instance matching the given arguments.\n\n Warning:\n Responses indicating successful authentication *MUST* be signed by\n the WLS. It is recommended that you leave `sign` set to `True`, or\n make sure to sign the response manually afterwards.\n "
self._pre_response(request, skip_handling_check)
if (request.iact == True):
raise ValueError("WAA demanded active authentication (iact == 'yes')")
if (len(sso) == 0):
sso = principal.auth_methods
if (len(sso) == 0):
raise ValueError('no authentication methods specified for `sso`')
if (principal.session_expiry is not None):
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
else:
life = None
response = AuthResponse.respond_to_request(*args, request=request, code=status.SUCCESS, principal=principal.userid, sso=sso, ptags=principal.ptags, life=life, **kwargs)
return self._finish_response(response=response, sign=sign) |
def generate_failure(self, code, request, msg='', sign=True, skip_handling_check=False, *args, **kwargs):
"Generate a response indicating failure.\n\n This is to be used in all cases where the outcome of user interaction\n is not success. This function will refuse to handle a request where\n the 'fail' parameter is 'yes' (in which case the WLS must not redirect\n back to the WAA).\n\n Args:\n code (int): the response status code. Values specified in the\n protocol are available as constants under `ucam_wls.status`.\n request (AuthRequest): the original WAA request\n msg (str): an optional message that could be shown to the end user\n by the WAA\n sign (bool): whether to sign the response or not.\n\n *args: passed to `AuthResponse.respond_to_request`\n **kwargs: passed to `AuthResponse.respond_to_request`\n\n Returns:\n An `AuthResponse` instance matching the given arguments.\n\n Note:\n Signatures on WLS responses indicating a non-success can optionally\n be signed. In the interests of security, the default in this\n function is to go ahead and sign anyway, but this can be turned off\n if really desired.\n "
self._pre_response(request, skip_handling_check, check_auth_types=False)
if request.fail:
raise ValueError('WAA specified that WLS must not redirect back to it on failure')
if (code == status.SUCCESS):
raise ValueError('Failure responses must not have success status')
response = AuthResponse.respond_to_request(*args, request=request, code=code, **kwargs)
return self._finish_response(response=response, sign=sign) | -3,337,601,949,590,731,300 | Generate a response indicating failure.
This is to be used in all cases where the outcome of user interaction
is not success. This function will refuse to handle a request where
the 'fail' parameter is 'yes' (in which case the WLS must not redirect
back to the WAA).
Args:
code (int): the response status code. Values specified in the
protocol are available as constants under `ucam_wls.status`.
request (AuthRequest): the original WAA request
msg (str): an optional message that could be shown to the end user
by the WAA
sign (bool): whether to sign the response or not.
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Note:
Signatures on WLS responses indicating a non-success can optionally
be signed. In the interests of security, the default in this
function is to go ahead and sign anyway, but this can be turned off
if really desired. | ucam_wls/context.py | generate_failure | edwinbalani/ucam-wls | python | def generate_failure(self, code, request, msg=, sign=True, skip_handling_check=False, *args, **kwargs):
"Generate a response indicating failure.\n\n This is to be used in all cases where the outcome of user interaction\n is not success. This function will refuse to handle a request where\n the 'fail' parameter is 'yes' (in which case the WLS must not redirect\n back to the WAA).\n\n Args:\n code (int): the response status code. Values specified in the\n protocol are available as constants under `ucam_wls.status`.\n request (AuthRequest): the original WAA request\n msg (str): an optional message that could be shown to the end user\n by the WAA\n sign (bool): whether to sign the response or not.\n\n *args: passed to `AuthResponse.respond_to_request`\n **kwargs: passed to `AuthResponse.respond_to_request`\n\n Returns:\n An `AuthResponse` instance matching the given arguments.\n\n Note:\n Signatures on WLS responses indicating a non-success can optionally\n be signed. In the interests of security, the default in this\n function is to go ahead and sign anyway, but this can be turned off\n if really desired.\n "
self._pre_response(request, skip_handling_check, check_auth_types=False)
if request.fail:
raise ValueError('WAA specified that WLS must not redirect back to it on failure')
if (code == status.SUCCESS):
raise ValueError('Failure responses must not have success status')
response = AuthResponse.respond_to_request(*args, request=request, code=code, **kwargs)
return self._finish_response(response=response, sign=sign) |
def __init__(__self__, *, enable_magnetic_store_writes: Optional[bool]=None, magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']=None):
"\n :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.\n :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.\n "
if (enable_magnetic_store_writes is not None):
pulumi.set(__self__, 'enable_magnetic_store_writes', enable_magnetic_store_writes)
if (magnetic_store_rejected_data_location is not None):
pulumi.set(__self__, 'magnetic_store_rejected_data_location', magnetic_store_rejected_data_location) | 2,888,393,677,886,899,000 | :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | __init__ | chivandikwa/pulumi-aws | python | def __init__(__self__, *, enable_magnetic_store_writes: Optional[bool]=None, magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']=None):
"\n :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.\n :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.\n "
if (enable_magnetic_store_writes is not None):
pulumi.set(__self__, 'enable_magnetic_store_writes', enable_magnetic_store_writes)
if (magnetic_store_rejected_data_location is not None):
pulumi.set(__self__, 'magnetic_store_rejected_data_location', magnetic_store_rejected_data_location) |
@property
@pulumi.getter(name='enableMagneticStoreWrites')
def enable_magnetic_store_writes(self) -> Optional[bool]:
'\n A flag to enable magnetic store writes.\n '
return pulumi.get(self, 'enable_magnetic_store_writes') | -2,718,757,825,877,902,300 | A flag to enable magnetic store writes. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | enable_magnetic_store_writes | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='enableMagneticStoreWrites')
def enable_magnetic_store_writes(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'enable_magnetic_store_writes') |
@property
@pulumi.getter(name='magneticStoreRejectedDataLocation')
def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']:
'\n The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.\n '
return pulumi.get(self, 'magnetic_store_rejected_data_location') | 7,316,370,310,385,799,000 | The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | magnetic_store_rejected_data_location | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='magneticStoreRejectedDataLocation')
def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']:
'\n \n '
return pulumi.get(self, 'magnetic_store_rejected_data_location') |
def __init__(__self__, *, s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']=None):
"\n :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.\n "
if (s3_configuration is not None):
pulumi.set(__self__, 's3_configuration', s3_configuration) | -6,933,671,522,388,319,000 | :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | __init__ | chivandikwa/pulumi-aws | python | def __init__(__self__, *, s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']=None):
"\n \n "
if (s3_configuration is not None):
pulumi.set(__self__, 's3_configuration', s3_configuration) |
@property
@pulumi.getter(name='s3Configuration')
def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']:
'\n Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.\n '
return pulumi.get(self, 's3_configuration') | 8,736,312,081,624,449,000 | Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | s3_configuration | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='s3Configuration')
def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']:
'\n \n '
return pulumi.get(self, 's3_configuration') |
def __init__(__self__, *, bucket_name: Optional[str]=None, encryption_option: Optional[str]=None, kms_key_id: Optional[str]=None, object_key_prefix: Optional[str]=None):
'\n :param str bucket_name: Bucket name of the customer S3 bucket.\n :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.\n :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.\n :param str object_key_prefix: Object key prefix for the customer S3 location.\n '
if (bucket_name is not None):
pulumi.set(__self__, 'bucket_name', bucket_name)
if (encryption_option is not None):
pulumi.set(__self__, 'encryption_option', encryption_option)
if (kms_key_id is not None):
pulumi.set(__self__, 'kms_key_id', kms_key_id)
if (object_key_prefix is not None):
pulumi.set(__self__, 'object_key_prefix', object_key_prefix) | -8,271,482,238,891,445,000 | :param str bucket_name: Bucket name of the customer S3 bucket.
:param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
:param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.
:param str object_key_prefix: Object key prefix for the customer S3 location. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | __init__ | chivandikwa/pulumi-aws | python | def __init__(__self__, *, bucket_name: Optional[str]=None, encryption_option: Optional[str]=None, kms_key_id: Optional[str]=None, object_key_prefix: Optional[str]=None):
'\n :param str bucket_name: Bucket name of the customer S3 bucket.\n :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.\n :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.\n :param str object_key_prefix: Object key prefix for the customer S3 location.\n '
if (bucket_name is not None):
pulumi.set(__self__, 'bucket_name', bucket_name)
if (encryption_option is not None):
pulumi.set(__self__, 'encryption_option', encryption_option)
if (kms_key_id is not None):
pulumi.set(__self__, 'kms_key_id', kms_key_id)
if (object_key_prefix is not None):
pulumi.set(__self__, 'object_key_prefix', object_key_prefix) |
@property
@pulumi.getter(name='bucketName')
def bucket_name(self) -> Optional[str]:
'\n Bucket name of the customer S3 bucket.\n '
return pulumi.get(self, 'bucket_name') | 4,003,761,450,091,991 | Bucket name of the customer S3 bucket. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | bucket_name | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='bucketName')
def bucket_name(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'bucket_name') |
@property
@pulumi.getter(name='encryptionOption')
def encryption_option(self) -> Optional[str]:
'\n Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.\n '
return pulumi.get(self, 'encryption_option') | 9,216,246,817,732,302,000 | Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | encryption_option | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='encryptionOption')
def encryption_option(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'encryption_option') |
@property
@pulumi.getter(name='kmsKeyId')
def kms_key_id(self) -> Optional[str]:
'\n KMS key arn for the customer s3 location when encrypting with a KMS managed key.\n '
return pulumi.get(self, 'kms_key_id') | -4,133,450,127,578,844,700 | KMS key arn for the customer s3 location when encrypting with a KMS managed key. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | kms_key_id | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='kmsKeyId')
def kms_key_id(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'kms_key_id') |
@property
@pulumi.getter(name='objectKeyPrefix')
def object_key_prefix(self) -> Optional[str]:
'\n Object key prefix for the customer S3 location.\n '
return pulumi.get(self, 'object_key_prefix') | -596,909,029,895,640,700 | Object key prefix for the customer S3 location. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | object_key_prefix | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='objectKeyPrefix')
def object_key_prefix(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'object_key_prefix') |
def __init__(__self__, *, magnetic_store_retention_period_in_days: int, memory_store_retention_period_in_hours: int):
'\n :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.\n :param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.\n '
pulumi.set(__self__, 'magnetic_store_retention_period_in_days', magnetic_store_retention_period_in_days)
pulumi.set(__self__, 'memory_store_retention_period_in_hours', memory_store_retention_period_in_hours) | 1,808,947,756,490,085,000 | :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
:param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | __init__ | chivandikwa/pulumi-aws | python | def __init__(__self__, *, magnetic_store_retention_period_in_days: int, memory_store_retention_period_in_hours: int):
'\n :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.\n :param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.\n '
pulumi.set(__self__, 'magnetic_store_retention_period_in_days', magnetic_store_retention_period_in_days)
pulumi.set(__self__, 'memory_store_retention_period_in_hours', memory_store_retention_period_in_hours) |
@property
@pulumi.getter(name='magneticStoreRetentionPeriodInDays')
def magnetic_store_retention_period_in_days(self) -> int:
'\n The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.\n '
return pulumi.get(self, 'magnetic_store_retention_period_in_days') | -3,694,460,775,966,215,000 | The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | magnetic_store_retention_period_in_days | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='magneticStoreRetentionPeriodInDays')
def magnetic_store_retention_period_in_days(self) -> int:
'\n \n '
return pulumi.get(self, 'magnetic_store_retention_period_in_days') |
@property
@pulumi.getter(name='memoryStoreRetentionPeriodInHours')
def memory_store_retention_period_in_hours(self) -> int:
'\n The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.\n '
return pulumi.get(self, 'memory_store_retention_period_in_hours') | -7,752,533,847,161,990,000 | The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. | sdk/python/pulumi_aws/timestreamwrite/outputs.py | memory_store_retention_period_in_hours | chivandikwa/pulumi-aws | python | @property
@pulumi.getter(name='memoryStoreRetentionPeriodInHours')
def memory_store_retention_period_in_hours(self) -> int:
'\n \n '
return pulumi.get(self, 'memory_store_retention_period_in_hours') |
def __default_grid__(ax):
'This is a temporary function'
ax.grid(b=True, which='major', color='#000000', alpha=0.2, linestyle='-', linewidth=0.5)
ax.grid(b=True, which='minor', color='#000000', alpha=0.1, linestyle='-', linewidth=0.25)
ax.minorticks_on() | -2,463,206,208,069,694,000 | This is a temporary function | nicenquickplotlib/config_types.py | __default_grid__ | SengerM/nicenquickplotlib | python | def __default_grid__(ax):
ax.grid(b=True, which='major', color='#000000', alpha=0.2, linestyle='-', linewidth=0.5)
ax.grid(b=True, which='minor', color='#000000', alpha=0.1, linestyle='-', linewidth=0.25)
ax.minorticks_on() |
def load_data(filename: str) -> pd.DataFrame:
'\n Load city daily temperature dataset and preprocess data.\n Parameters\n ----------\n filename: str\n Path to house prices dataset\n\n Returns\n -------\n Design matrix and response vector (Temp)\n '
data = pd.read_csv(filename, parse_dates=['Date']).drop_duplicates()
data = data.drop(data[(data['Temp'] < (- 70))].index)
data['DayOfYear'] = data['Date'].dt.dayofyear
return data | 9,173,056,866,655,160,000 | Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp) | exercises/city_temperature_prediction.py | load_data | noamwino/IML.HUJI | python | def load_data(filename: str) -> pd.DataFrame:
'\n Load city daily temperature dataset and preprocess data.\n Parameters\n ----------\n filename: str\n Path to house prices dataset\n\n Returns\n -------\n Design matrix and response vector (Temp)\n '
data = pd.read_csv(filename, parse_dates=['Date']).drop_duplicates()
data = data.drop(data[(data['Temp'] < (- 70))].index)
data['DayOfYear'] = data['Date'].dt.dayofyear
return data |
def question_2(data):
' Exploring data specifically in Israel '
data = data.copy()
data = data[(data['Country'] == 'Israel')]
data['Year'] = data['Year'].astype(str)
fig = px.scatter(data, x='DayOfYear', y='Temp', color='Year', width=1500, height=700, labels={'DayOfYear': 'Day of Year', 'Temp': 'Temperature'}, title='Q2(1) The relation between the day in the year and the temperature in Israel')
fig.update_xaxes(range=[0, 365], tick0=0, dtick=20)
fig.show()
std_by_month = data.groupby('Month').std().reset_index()
fig = px.bar(std_by_month, x='Month', y='Temp', width=1500, height=700, labels={'Temp': 'Std of the daily temperatures'}, title='Q2(2) The Standard Deviation of the Daily Temperatures Per Month in Israel')
fig.data[(- 1)].text = np.round(std_by_month['Temp'], 3)
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition='outside')
fig.show() | 543,939,310,610,351,000 | Exploring data specifically in Israel | exercises/city_temperature_prediction.py | question_2 | noamwino/IML.HUJI | python | def question_2(data):
' '
data = data.copy()
data = data[(data['Country'] == 'Israel')]
data['Year'] = data['Year'].astype(str)
fig = px.scatter(data, x='DayOfYear', y='Temp', color='Year', width=1500, height=700, labels={'DayOfYear': 'Day of Year', 'Temp': 'Temperature'}, title='Q2(1) The relation between the day in the year and the temperature in Israel')
fig.update_xaxes(range=[0, 365], tick0=0, dtick=20)
fig.show()
std_by_month = data.groupby('Month').std().reset_index()
fig = px.bar(std_by_month, x='Month', y='Temp', width=1500, height=700, labels={'Temp': 'Std of the daily temperatures'}, title='Q2(2) The Standard Deviation of the Daily Temperatures Per Month in Israel')
fig.data[(- 1)].text = np.round(std_by_month['Temp'], 3)
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition='outside')
fig.show() |
def question_3(data):
' Exploring differences between countries'
agg_data_mean = data.groupby(['Country', 'Month']).mean().reset_index()
agg_data_std = data.groupby(['Country', 'Month']).std().reset_index()
fig = px.line(agg_data_mean, x='Month', y='Temp', color='Country', error_y=agg_data_std['Temp'], width=1500, height=700, labels={'Temp': 'Averaged Temperature'}, title='Q3 The Average Monthly Temperatures in Different Countries')
fig.update_xaxes(tick0=1, dtick=1)
fig.show() | -5,551,659,980,031,403,000 | Exploring differences between countries | exercises/city_temperature_prediction.py | question_3 | noamwino/IML.HUJI | python | def question_3(data):
' '
agg_data_mean = data.groupby(['Country', 'Month']).mean().reset_index()
agg_data_std = data.groupby(['Country', 'Month']).std().reset_index()
fig = px.line(agg_data_mean, x='Month', y='Temp', color='Country', error_y=agg_data_std['Temp'], width=1500, height=700, labels={'Temp': 'Averaged Temperature'}, title='Q3 The Average Monthly Temperatures in Different Countries')
fig.update_xaxes(tick0=1, dtick=1)
fig.show() |
def question_4(data):
' Fitting model for different values of `k` '
data = data[(data['Country'] == 'Israel')]
(train_X, train_y, test_X, test_y) = split_train_test(data['DayOfYear'], data['Temp'])
losses = np.array([])
for k in range(1, 11):
poly_fit = PolynomialFitting(k)
poly_fit.fit(train_X.to_numpy(), train_y.to_numpy())
loss = poly_fit.loss(test_X.to_numpy(), test_y.to_numpy())
losses = np.append(losses, round(loss, 2))
print(k, loss)
fig = px.bar(x=range(1, 11), y=losses, width=1500, height=700, labels={'x': 'Polynomials Degrees (k)', 'y': 'Test Error (MSE)'}, title='Q4 Test Errors for Different Polynomials Degrees (k)')
fig.data[(- 1)].text = losses
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition='outside')
fig.show() | 5,774,251,136,083,118,000 | Fitting model for different values of `k` | exercises/city_temperature_prediction.py | question_4 | noamwino/IML.HUJI | python | def question_4(data):
' '
data = data[(data['Country'] == 'Israel')]
(train_X, train_y, test_X, test_y) = split_train_test(data['DayOfYear'], data['Temp'])
losses = np.array([])
for k in range(1, 11):
poly_fit = PolynomialFitting(k)
poly_fit.fit(train_X.to_numpy(), train_y.to_numpy())
loss = poly_fit.loss(test_X.to_numpy(), test_y.to_numpy())
losses = np.append(losses, round(loss, 2))
print(k, loss)
fig = px.bar(x=range(1, 11), y=losses, width=1500, height=700, labels={'x': 'Polynomials Degrees (k)', 'y': 'Test Error (MSE)'}, title='Q4 Test Errors for Different Polynomials Degrees (k)')
fig.data[(- 1)].text = losses
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition='outside')
fig.show() |
def question_5(data):
' Evaluating fitted model on different countries '
data_israel = data[(data['Country'] == 'Israel')]
poly_fit = PolynomialFitting(k=5)
poly_fit.fit(data_israel['DayOfYear'], data_israel['Temp'])
other_countries = ['Jordan', 'South Africa', 'The Netherlands']
losses = np.array([])
for country in other_countries:
country_data = data[(data['Country'] == country)]
loss = poly_fit.loss(country_data['DayOfYear'], country_data['Temp'])
losses = np.append(losses, loss)
fig = px.bar(x=np.array(other_countries), y=losses, width=700, height=700, labels={'x': 'Country', 'y': 'Losses (MSE)'}, title='Q5 Losses (MSE) per Country With k=5')
fig.data[(- 1)].text = np.round(losses, 3)
fig.update_traces(textposition='outside')
fig.show() | 3,931,820,151,589,127,700 | Evaluating fitted model on different countries | exercises/city_temperature_prediction.py | question_5 | noamwino/IML.HUJI | python | def question_5(data):
' '
data_israel = data[(data['Country'] == 'Israel')]
poly_fit = PolynomialFitting(k=5)
poly_fit.fit(data_israel['DayOfYear'], data_israel['Temp'])
other_countries = ['Jordan', 'South Africa', 'The Netherlands']
losses = np.array([])
for country in other_countries:
country_data = data[(data['Country'] == country)]
loss = poly_fit.loss(country_data['DayOfYear'], country_data['Temp'])
losses = np.append(losses, loss)
fig = px.bar(x=np.array(other_countries), y=losses, width=700, height=700, labels={'x': 'Country', 'y': 'Losses (MSE)'}, title='Q5 Losses (MSE) per Country With k=5')
fig.data[(- 1)].text = np.round(losses, 3)
fig.update_traces(textposition='outside')
fig.show() |
async def test_subquery_access(self):
'This test ensures that accessing a query does not modify it (#780)'
tournament_1 = (await Tournament.create(name='1'))
event_1 = (await Event.create(event_id=1, name='event 1', tournament=tournament_1))
event_2 = (await Event.create(event_id=2, name='event 2', tournament=tournament_1))
team_1 = (await Team.create(id=1, name='team 1'))
team_2 = (await Team.create(id=2, name='team 2'))
(await event_1.participants.add(team_1))
(await event_2.participants.add(team_1, team_2))
self.assertEqual((await event_1.participants.all()), [team_1])
self.assertEqual((await event_2.participants.all()), [team_1, team_2])
sub_query_team_1 = Subquery(Event.filter(participants__id=1).values('event_id'))
sub_query_team_2 = Subquery(Event.filter(participants__id=2).values('event_id'))
query = Event.filter(pk__in=sub_query_team_1)
query = query.filter(pk__in=sub_query_team_2)
self.assertEqual(query.sql(), query.sql())
self.assertEqual((await query.count()), (await query.count()))
self.assertEqual((await query.count()), 1)
self.assertEqual((await query.all()), [event_2]) | 613,092,107,671,665,800 | This test ensures that accessing a query does not modify it (#780) | tests/test_queryset.py | test_subquery_access | spacemanspiff2007/tortoise-orm | python | async def test_subquery_access(self):
tournament_1 = (await Tournament.create(name='1'))
event_1 = (await Event.create(event_id=1, name='event 1', tournament=tournament_1))
event_2 = (await Event.create(event_id=2, name='event 2', tournament=tournament_1))
team_1 = (await Team.create(id=1, name='team 1'))
team_2 = (await Team.create(id=2, name='team 2'))
(await event_1.participants.add(team_1))
(await event_2.participants.add(team_1, team_2))
self.assertEqual((await event_1.participants.all()), [team_1])
self.assertEqual((await event_2.participants.all()), [team_1, team_2])
sub_query_team_1 = Subquery(Event.filter(participants__id=1).values('event_id'))
sub_query_team_2 = Subquery(Event.filter(participants__id=2).values('event_id'))
query = Event.filter(pk__in=sub_query_team_1)
query = query.filter(pk__in=sub_query_team_2)
self.assertEqual(query.sql(), query.sql())
self.assertEqual((await query.count()), (await query.count()))
self.assertEqual((await query.count()), 1)
self.assertEqual((await query.all()), [event_2]) |
def t(eng, chinese):
"return English or Chinese text according to the user's browser language"
return (chinese if ('zh' in get_info().user_language) else eng) | 5,158,654,429,831,208,000 | return English or Chinese text according to the user's browser language | demos/output_usage.py | t | songshanyuwu/PyWebIO | python | def t(eng, chinese):
return (chinese if ('zh' in get_info().user_language) else eng) |
async def main():
'PyWebIO Output demo\n\n Demonstrate various output usage supported by PyWebIO.\n 演示PyWebIO输出模块的使用\n '
put_markdown(t('# PyWebIO Output demo\n \n You can get the source code of this demo in [here](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)\n \n This demo only introduces part of the functions of the PyWebIO output module. For the complete features, please refer to the [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html).\n \n The output functions are all defined in the `pywebio.output` module and can be imported using `from pywebio.output import *`.\n \n ', '# PyWebIO 输出演示\n \n 在[这里](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)可以获取本Demo的源码。\n \n 本Demo仅提供了PyWebIO输出模块的部分功能的演示,完整特性请参阅[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)。\n \n PyWebIO的输出函数都定义在 `pywebio.output` 模块中,可以使用 `from pywebio.output import *` 引入。\n\n ### 基本输出\n PyWebIO提供了一些便捷函数来输出表格、链接等格式:\n '), strip_indent=4)
code_block(t('\n # Text Output\n put_text("Hello world!")\n\n # Table Output\n put_table([\n [\'Commodity\', \'Price\'],\n [\'Apple\', \'5.5\'],\n [\'Banana\', \'7\'],\n ])\n \n # Markdown Output\n put_markdown(\'~~Strikethrough~~\')\n \n # File Output\n put_file(\'hello_word.txt\', b\'hello word!\')\n ', '\n # 文本输出\n put_text("Hello world!")\n\n # 表格输出\n put_table([\n [\'商品\', \'价格\'],\n [\'苹果\', \'5.5\'],\n [\'香蕉\', \'7\'],\n ])\n\n # Markdown输出\n put_markdown(\'~~删除线~~\')\n\n # 文件输出\n put_file(\'hello_word.txt\', b\'hello word!\')\n '))
put_markdown(t('For all output functions provided by PyWebIO, please refer to the document.\n \n ### Combined Output\n The output functions whose name starts with put_ can be combined with some output functions as part of the final output:\n\n You can pass `put_xxx()` calls to `put_table()` as cell content:\n ', 'PyWebIO提供的全部输出函数请参考PyWebIO文档\n \n ### 组合输出\n \n 函数名以 `put_` 开始的输出函数,可以与一些输出函数组合使用,作为最终输出的一部分。\n\n 比如`put_table()`支持以`put_xxx()`调用作为单元格内容:\n '), strip_indent=4)
code_block("\n put_table([\n ['Type', 'Content'],\n ['html', put_html('X<sup>2</sup>')],\n ['text', '<hr/>'], # equal to ['text', put_text('<hr/>')]\n ['buttons', put_buttons(['A', 'B'], onclick=toast)], \n ['markdown', put_markdown('`Awesome PyWebIO!`')],\n ['file', put_file('hello.text', b'hello world')],\n ['table', put_table([['A', 'B'], ['C', 'D']])]\n ])\n ")
put_markdown(t('Similarly, you can pass `put_xxx()` calls to `popup()` as the popup content:', '类似地,`popup()`也可以将`put_xxx()`调用作为弹窗内容:'), strip_indent=4)
code_block("\n popup('Popup title', [\n put_html('<h3>Popup Content</h3>'),\n 'plain html: <br/>', # equal to put_text('plain html: <br/>')\n put_table([['A', 'B'], ['C', 'D']]),\n put_buttons(['close_popup()'], onclick=lambda _: close_popup())\n ])\n ")
put_markdown(t('For more output functions that accept `put_xxx()` calls as parameters, please refer to corresponding function documentation.', '更多接受`put_xxx()`作为参数的输出函数请参考函数文档。'))
put_markdown((t('### Callback\n PyWebIO allows you to output some buttons, and the provided callback function will be executed when the button is clicked.\n \n This is an example:%s\n The call to `put_table()` will not block. When user clicks a button, the corresponding callback function will be invoked:\n ', '### 事件回调\n PyWebIO允许你输出一些控件,当控件被点击时执行提供的回调函数,就像编写GUI程序一样。\n \n 下面是一个例子:%s\n `put_table()`的调用不会阻塞。当用户点击了某行中的按钮时,PyWebIO会自动调用相应的回调函数:\n ') % '\n ```python\n from functools import partial\n\n def edit_row(choice, row):\n put_markdown("> You click`%s` button ar row `%s`" % (choice, row))\n\n put_table([\n [\'Idx\', \'Actions\'],\n [1, put_buttons([\'edit\', \'delete\'], onclick=partial(edit_row, row=1))],\n [2, put_buttons([\'edit\', \'delete\'], onclick=partial(edit_row, row=2))],\n [3, put_buttons([\'edit\', \'delete\'], onclick=partial(edit_row, row=3))],\n ])\n ```\n '), strip_indent=4)
from functools import partial
@use_scope('table-callback')
def edit_row(choice, row):
put_markdown(('> You click `%s` button ar row `%s`' % (choice, row)))
put_table([['Idx', 'Actions'], [1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))], [2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))], [3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))]])
set_scope('table-callback')
put_markdown((t('Of course, PyWebIO also supports outputting individual button:', '当然,PyWebIO还支持单独的按钮控件:') + '\n ```python\n def btn_click(btn_val):\n put_markdown("> You click `%s` button" % btn_val)\n\n put_buttons([\'A\', \'B\', \'C\'], onclick=btn_click)\n ```\n '), strip_indent=4)
@use_scope('button-callback')
def btn_click(btn_val):
put_markdown(('> You click `%s` button' % btn_val))
put_buttons(['A', 'B', 'C'], onclick=btn_click)
set_scope('button-callback')
put_markdown((t('### Output Scope\n \n PyWebIO uses the scope model to give more control to the location of content output. The output area of PyWebIO can be divided into different output domains. The output domain is called Scope in PyWebIO.\n\n The output domain is a container of output content, and each output domain is arranged vertically, and the output domains can also be nested.\n\n Each output function (function name like `put_xxx()`) will output its content to a scope, the default is "current scope". "current scope" is determined by the runtime context. The output function can also manually specify the scope to output. The scope name is unique within the session.\n \n You can use `use_scope()` to open and enter a new output scope, or enter an existing output scope: %s\n The above code will generate the following Scope layout:\n ', '### 输出域Scope\n\n PyWebIO使用Scope模型来对内容输出的位置进行灵活地控制,PyWebIO的内容输出区可以划分出不同的输出域,PyWebIO将输出域称作`Scope`。\n \n 输出域为输出内容的容器,各个输出域之间上下排列,输出域也可以进行嵌套。\n \n 每个输出函数(函数名形如 `put_xxx()` )都会将内容输出到一个Scope,默认为”当前Scope”,”当前Scope”由运行时上下文确定,输出函数也可以手动指定输出到的Scope。Scope名在会话内唯一。\n \n 可以使用 `use_scope()` 开启并进入一个新的输出域,或进入一个已经存在的输出域: %s\n 以上代码将会产生如下Scope布局:\n ') % "\n ```python\n with use_scope('A'):\n put_text('Text in scope A')\n \n with use_scope('B'):\n put_text('Text in scope B')\n \n with use_scope('C'):\n put_text('Text in scope C')\n ```\n "), strip_indent=4)
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
put_html('<style> \n #pywebio-scope-A {border: 1px solid red;} \n #pywebio-scope-B {border: 1px solid blue;margin:2px} \n #pywebio-scope-C {border: 1px solid green;margin-top:2px} \n </style><br/>')
put_markdown(t('The output function (function name like `put_xxx()`) will output the content to the "current scope" by default, and the "current scope" of the runtime context can be set by `use_scope()`.\n \n In addition, you can use the `scope` parameter of the output function to specify the destination scope to output:\n ', '\n 输出函数(函数名形如 `put_xxx()` )在默认情况下,会将内容输出到”当前Scope”,可以通过 `use_scope()` 设置运行时上下文的”当前Scope”。\n \n 此外,也可以通过输出函数的 scope 参数指定输出的目的Scope:\n '), strip_indent=4)
put_grid([[put_code("put_text('A', scope='A')", 'python'), None, put_buttons([t('Run', '运行')], [(lambda : put_text('A', scope='A'))])], [put_code("put_text('B', scope='B')", 'python'), None, put_buttons([t('Run', '运行')], [(lambda : put_text('B', scope='B'))])], [put_code("put_text('C', scope='C')", 'python'), None, put_buttons([t('Run', '运行')], [(lambda : put_text('C', scope='C'))])]], cell_widths='1fr 10px auto')
put_markdown((t('The output content can be inserted into any positions of the target scope by using the `position` parameter of the output function.', '输出函数可以使用`position`参数指定内容在Scope中输出的位置') + "\n ```python\n put_text(now(), scope='A', position=...)\n ```\n "), strip_indent=4)
import datetime
put_buttons([(('position=%s' % i), i) for i in [1, 2, 3, (- 1), (- 2), (- 3)]], (lambda i: put_text(datetime.datetime.now(), position=i, scope='A')), small=True)
put_markdown(t('In addition to `use_scope()`, PyWebIO also provides the following scope control functions:', '除了 `use_scope()` , PyWebIO同样提供了以下scope控制函数: '))
put_grid([[put_code("clear('B') # Clear content of Scope B", 'python'), None, put_buttons(['运行'], [(lambda : clear('B'))])], [put_code("remove('C') # Remove Scope C", 'python'), None, put_buttons(['运行'], [(lambda : remove('C'))])], [put_code("scroll_to('A') # Scroll the page to position of Scope A", 'python'), None, put_buttons(['运行'], [(lambda : scroll_to('A'))])]], cell_widths='1fr 10px auto')
put_markdown(t('### Layout\n \n In general, using the various output functions introduced above is enough to output what you want, but these outputs are arranged vertically. If you want to make a more complex layout (such as displaying a code block on the left side of the page and an image on the right), you need to use layout functions.\n \n The `pywebio.output` module provides 3 layout functions, and you can create complex layouts by combining them:\n \n - `put_row()` : Use row layout to output content. The content is arranged horizontally\n - `put_column()` : Use column layout to output content. The content is arranged vertically\n - `put_grid()` : Output content using grid layout\n \n Here is an example by combining `put_row()` and `put_column()`:\n ', '### 布局\n 一般情况下,使用上文介绍的各种输出函数足以完成各种内容的展示,但直接调用输出函数产生的输出之间都是竖直排列的,如果想实现更复杂的布局(比如在页 面左侧显示一个代码块,在右侧显示一个图像),就需要借助布局函数。\n\n `pywebio.output` 模块提供了3个布局函数,通过对他们进行组合可以完成各种复杂的布局:\n \n - `put_row()` : 使用行布局输出内容. 内容在水平方向上排列\n - `put_column()` : 使用列布局输出内容. 内容在竖直方向上排列\n - `put_grid()` : 使用网格布局输出内容\n\n 比如,通过通过组合 `put_row()` 和 `put_column()` 实现的布局:\n '), strip_indent=4)
code_block(("\n put_row([\n put_column([\n put_code('A'),\n put_row([\n put_code('B1'), None, # %s\n put_code('B2'), None,\n put_code('B3'),\n ]),\n put_code('C'),\n ]), None,\n put_code('D'), None,\n put_code('E')\n ])\n " % t('None represents the space between the output', 'None 表示输出之间的空白')))
put_markdown(t('### Style\n If you are familiar with CSS styles, you can use the `style()` function to set a custom style for the output.\n\n You can set the CSS style for a single `put_xxx()` output:\n ', '### 样式\n \n 如果你熟悉 CSS样式 ,你还可以使用 `style()` 函数给输出设定自定义样式。\n\n 可以给单个的 `put_xxx()` 输出设定CSS样式,也可以配合组合输出使用:\n '), strip_indent=4)
code_block("\n style(put_text('Red'), 'color: red')\n \n put_table([\n ['A', 'B'],\n ['C', style(put_text('Red'), 'color: red')],\n ])\n ", strip_indent=4)
put_markdown(t('`style()` also accepts a list of output calls:', '`style()` 也接受列表作为输入:'))
code_block("\n style([\n put_text('Red'),\n put_markdown('~~del~~')\n ], 'color: red')\n \n put_collapse('title', style([\n put_text('text'),\n put_markdown('~~del~~'),\n ], 'margin-left: 20px'))\n\n ", strip_indent=4)
put_markdown(t('----\n For more information about output of PyWebIO, please visit PyWebIO [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html) and [output module documentation](https://pywebio.readthedocs.io/zh_CN/latest/output.html).\n ', '----\n PyWebIO的输出演示到这里就结束了,更多内容请访问PyWebIO[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)和[output模块文档](https://pywebio.readthedocs.io/zh_CN/latest/output.html)。\n '), lstrip=True)
(await hold()) | 3,378,511,886,882,203,600 | PyWebIO Output demo
Demonstrate various output usage supported by PyWebIO.
演示PyWebIO输出模块的使用 | demos/output_usage.py | main | songshanyuwu/PyWebIO | python | async def main():
'PyWebIO Output demo\n\n Demonstrate various output usage supported by PyWebIO.\n 演示PyWebIO输出模块的使用\n '
put_markdown(t('# PyWebIO Output demo\n \n You can get the source code of this demo in [here](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)\n \n This demo only introduces part of the functions of the PyWebIO output module. For the complete features, please refer to the [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html).\n \n The output functions are all defined in the `pywebio.output` module and can be imported using `from pywebio.output import *`.\n \n ', '# PyWebIO 输出演示\n \n 在[这里](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)可以获取本Demo的源码。\n \n 本Demo仅提供了PyWebIO输出模块的部分功能的演示,完整特性请参阅[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)。\n \n PyWebIO的输出函数都定义在 `pywebio.output` 模块中,可以使用 `from pywebio.output import *` 引入。\n\n ### 基本输出\n PyWebIO提供了一些便捷函数来输出表格、链接等格式:\n '), strip_indent=4)
code_block(t('\n # Text Output\n put_text("Hello world!")\n\n # Table Output\n put_table([\n [\'Commodity\', \'Price\'],\n [\'Apple\', \'5.5\'],\n [\'Banana\', \'7\'],\n ])\n \n # Markdown Output\n put_markdown(\'~~Strikethrough~~\')\n \n # File Output\n put_file(\'hello_word.txt\', b\'hello word!\')\n ', '\n # 文本输出\n put_text("Hello world!")\n\n # 表格输出\n put_table([\n [\'商品\', \'价格\'],\n [\'苹果\', \'5.5\'],\n [\'香蕉\', \'7\'],\n ])\n\n # Markdown输出\n put_markdown(\'~~删除线~~\')\n\n # 文件输出\n put_file(\'hello_word.txt\', b\'hello word!\')\n '))
put_markdown(t('For all output functions provided by PyWebIO, please refer to the document.\n \n ### Combined Output\n The output functions whose name starts with put_ can be combined with some output functions as part of the final output:\n\n You can pass `put_xxx()` calls to `put_table()` as cell content:\n ', 'PyWebIO提供的全部输出函数请参考PyWebIO文档\n \n ### 组合输出\n \n 函数名以 `put_` 开始的输出函数,可以与一些输出函数组合使用,作为最终输出的一部分。\n\n 比如`put_table()`支持以`put_xxx()`调用作为单元格内容:\n '), strip_indent=4)
code_block("\n put_table([\n ['Type', 'Content'],\n ['html', put_html('X<sup>2</sup>')],\n ['text', '<hr/>'], # equal to ['text', put_text('<hr/>')]\n ['buttons', put_buttons(['A', 'B'], onclick=toast)], \n ['markdown', put_markdown('`Awesome PyWebIO!`')],\n ['file', put_file('hello.text', b'hello world')],\n ['table', put_table([['A', 'B'], ['C', 'D']])]\n ])\n ")
put_markdown(t('Similarly, you can pass `put_xxx()` calls to `popup()` as the popup content:', '类似地,`popup()`也可以将`put_xxx()`调用作为弹窗内容:'), strip_indent=4)
code_block("\n popup('Popup title', [\n put_html('<h3>Popup Content</h3>'),\n 'plain html: <br/>', # equal to put_text('plain html: <br/>')\n put_table([['A', 'B'], ['C', 'D']]),\n put_buttons(['close_popup()'], onclick=lambda _: close_popup())\n ])\n ")
put_markdown(t('For more output functions that accept `put_xxx()` calls as parameters, please refer to corresponding function documentation.', '更多接受`put_xxx()`作为参数的输出函数请参考函数文档。'))
put_markdown((t('### Callback\n PyWebIO allows you to output some buttons, and the provided callback function will be executed when the button is clicked.\n \n This is an example:%s\n The call to `put_table()` will not block. When user clicks a button, the corresponding callback function will be invoked:\n ', '### 事件回调\n PyWebIO允许你输出一些控件,当控件被点击时执行提供的回调函数,就像编写GUI程序一样。\n \n 下面是一个例子:%s\n `put_table()`的调用不会阻塞。当用户点击了某行中的按钮时,PyWebIO会自动调用相应的回调函数:\n ') % '\n ```python\n from functools import partial\n\n def edit_row(choice, row):\n put_markdown("> You click`%s` button ar row `%s`" % (choice, row))\n\n put_table([\n [\'Idx\', \'Actions\'],\n [1, put_buttons([\'edit\', \'delete\'], onclick=partial(edit_row, row=1))],\n [2, put_buttons([\'edit\', \'delete\'], onclick=partial(edit_row, row=2))],\n [3, put_buttons([\'edit\', \'delete\'], onclick=partial(edit_row, row=3))],\n ])\n ```\n '), strip_indent=4)
from functools import partial
@use_scope('table-callback')
def edit_row(choice, row):
put_markdown(('> You click `%s` button ar row `%s`' % (choice, row)))
put_table([['Idx', 'Actions'], [1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))], [2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))], [3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))]])
set_scope('table-callback')
put_markdown((t('Of course, PyWebIO also supports outputting individual button:', '当然,PyWebIO还支持单独的按钮控件:') + '\n ```python\n def btn_click(btn_val):\n put_markdown("> You click `%s` button" % btn_val)\n\n put_buttons([\'A\', \'B\', \'C\'], onclick=btn_click)\n ```\n '), strip_indent=4)
@use_scope('button-callback')
def btn_click(btn_val):
put_markdown(('> You click `%s` button' % btn_val))
put_buttons(['A', 'B', 'C'], onclick=btn_click)
set_scope('button-callback')
put_markdown((t('### Output Scope\n \n PyWebIO uses the scope model to give more control to the location of content output. The output area of PyWebIO can be divided into different output domains. The output domain is called Scope in PyWebIO.\n\n The output domain is a container of output content, and each output domain is arranged vertically, and the output domains can also be nested.\n\n Each output function (function name like `put_xxx()`) will output its content to a scope, the default is "current scope". "current scope" is determined by the runtime context. The output function can also manually specify the scope to output. The scope name is unique within the session.\n \n You can use `use_scope()` to open and enter a new output scope, or enter an existing output scope: %s\n The above code will generate the following Scope layout:\n ', '### 输出域Scope\n\n PyWebIO使用Scope模型来对内容输出的位置进行灵活地控制,PyWebIO的内容输出区可以划分出不同的输出域,PyWebIO将输出域称作`Scope`。\n \n 输出域为输出内容的容器,各个输出域之间上下排列,输出域也可以进行嵌套。\n \n 每个输出函数(函数名形如 `put_xxx()` )都会将内容输出到一个Scope,默认为”当前Scope”,”当前Scope”由运行时上下文确定,输出函数也可以手动指定输出到的Scope。Scope名在会话内唯一。\n \n 可以使用 `use_scope()` 开启并进入一个新的输出域,或进入一个已经存在的输出域: %s\n 以上代码将会产生如下Scope布局:\n ') % "\n ```python\n with use_scope('A'):\n put_text('Text in scope A')\n \n with use_scope('B'):\n put_text('Text in scope B')\n \n with use_scope('C'):\n put_text('Text in scope C')\n ```\n "), strip_indent=4)
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
put_html('<style> \n #pywebio-scope-A {border: 1px solid red;} \n #pywebio-scope-B {border: 1px solid blue;margin:2px} \n #pywebio-scope-C {border: 1px solid green;margin-top:2px} \n </style><br/>')
put_markdown(t('The output function (function name like `put_xxx()`) will output the content to the "current scope" by default, and the "current scope" of the runtime context can be set by `use_scope()`.\n \n In addition, you can use the `scope` parameter of the output function to specify the destination scope to output:\n ', '\n 输出函数(函数名形如 `put_xxx()` )在默认情况下,会将内容输出到”当前Scope”,可以通过 `use_scope()` 设置运行时上下文的”当前Scope”。\n \n 此外,也可以通过输出函数的 scope 参数指定输出的目的Scope:\n '), strip_indent=4)
put_grid([[put_code("put_text('A', scope='A')", 'python'), None, put_buttons([t('Run', '运行')], [(lambda : put_text('A', scope='A'))])], [put_code("put_text('B', scope='B')", 'python'), None, put_buttons([t('Run', '运行')], [(lambda : put_text('B', scope='B'))])], [put_code("put_text('C', scope='C')", 'python'), None, put_buttons([t('Run', '运行')], [(lambda : put_text('C', scope='C'))])]], cell_widths='1fr 10px auto')
put_markdown((t('The output content can be inserted into any positions of the target scope by using the `position` parameter of the output function.', '输出函数可以使用`position`参数指定内容在Scope中输出的位置') + "\n ```python\n put_text(now(), scope='A', position=...)\n ```\n "), strip_indent=4)
import datetime
put_buttons([(('position=%s' % i), i) for i in [1, 2, 3, (- 1), (- 2), (- 3)]], (lambda i: put_text(datetime.datetime.now(), position=i, scope='A')), small=True)
put_markdown(t('In addition to `use_scope()`, PyWebIO also provides the following scope control functions:', '除了 `use_scope()` , PyWebIO同样提供了以下scope控制函数: '))
put_grid([[put_code("clear('B') # Clear content of Scope B", 'python'), None, put_buttons(['运行'], [(lambda : clear('B'))])], [put_code("remove('C') # Remove Scope C", 'python'), None, put_buttons(['运行'], [(lambda : remove('C'))])], [put_code("scroll_to('A') # Scroll the page to position of Scope A", 'python'), None, put_buttons(['运行'], [(lambda : scroll_to('A'))])]], cell_widths='1fr 10px auto')
put_markdown(t('### Layout\n \n In general, using the various output functions introduced above is enough to output what you want, but these outputs are arranged vertically. If you want to make a more complex layout (such as displaying a code block on the left side of the page and an image on the right), you need to use layout functions.\n \n The `pywebio.output` module provides 3 layout functions, and you can create complex layouts by combining them:\n \n - `put_row()` : Use row layout to output content. The content is arranged horizontally\n - `put_column()` : Use column layout to output content. The content is arranged vertically\n - `put_grid()` : Output content using grid layout\n \n Here is an example by combining `put_row()` and `put_column()`:\n ', '### 布局\n 一般情况下,使用上文介绍的各种输出函数足以完成各种内容的展示,但直接调用输出函数产生的输出之间都是竖直排列的,如果想实现更复杂的布局(比如在页 面左侧显示一个代码块,在右侧显示一个图像),就需要借助布局函数。\n\n `pywebio.output` 模块提供了3个布局函数,通过对他们进行组合可以完成各种复杂的布局:\n \n - `put_row()` : 使用行布局输出内容. 内容在水平方向上排列\n - `put_column()` : 使用列布局输出内容. 内容在竖直方向上排列\n - `put_grid()` : 使用网格布局输出内容\n\n 比如,通过通过组合 `put_row()` 和 `put_column()` 实现的布局:\n '), strip_indent=4)
code_block(("\n put_row([\n put_column([\n put_code('A'),\n put_row([\n put_code('B1'), None, # %s\n put_code('B2'), None,\n put_code('B3'),\n ]),\n put_code('C'),\n ]), None,\n put_code('D'), None,\n put_code('E')\n ])\n " % t('None represents the space between the output', 'None 表示输出之间的空白')))
put_markdown(t('### Style\n If you are familiar with CSS styles, you can use the `style()` function to set a custom style for the output.\n\n You can set the CSS style for a single `put_xxx()` output:\n ', '### 样式\n \n 如果你熟悉 CSS样式 ,你还可以使用 `style()` 函数给输出设定自定义样式。\n\n 可以给单个的 `put_xxx()` 输出设定CSS样式,也可以配合组合输出使用:\n '), strip_indent=4)
code_block("\n style(put_text('Red'), 'color: red')\n \n put_table([\n ['A', 'B'],\n ['C', style(put_text('Red'), 'color: red')],\n ])\n ", strip_indent=4)
put_markdown(t('`style()` also accepts a list of output calls:', '`style()` 也接受列表作为输入:'))
code_block("\n style([\n put_text('Red'),\n put_markdown('~~del~~')\n ], 'color: red')\n \n put_collapse('title', style([\n put_text('text'),\n put_markdown('~~del~~'),\n ], 'margin-left: 20px'))\n\n ", strip_indent=4)
put_markdown(t('----\n For more information about output of PyWebIO, please visit PyWebIO [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html) and [output module documentation](https://pywebio.readthedocs.io/zh_CN/latest/output.html).\n ', '----\n PyWebIO的输出演示到这里就结束了,更多内容请访问PyWebIO[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)和[output模块文档](https://pywebio.readthedocs.io/zh_CN/latest/output.html)。\n '), lstrip=True)
(await hold()) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor.\n **kwargs: Additional keyword arguments.\n '
raise NotImplementedError | 7,005,630,701,422,536,000 | Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor.\n **kwargs: Additional keyword arguments.\n '
raise NotImplementedError |
def get_config(self):
'Returns the configuration of the initializer as a JSON-serializable dict.\n\n Returns:\n A JSON-serializable Python dict.\n '
return {} | 6,964,281,744,853,564,000 | Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict. | keras/initializers/initializers_v2.py | get_config | StanislavParovoy/Keras | python | def get_config(self):
'Returns the configuration of the initializer as a JSON-serializable dict.\n\n Returns:\n A JSON-serializable Python dict.\n '
return {} |
@classmethod
def from_config(cls, config):
'Instantiates an initializer from a configuration dictionary.\n\n Example:\n\n ```python\n initializer = RandomUniform(-1, 1)\n config = initializer.get_config()\n initializer = RandomUniform.from_config(config)\n ```\n\n Args:\n config: A Python dictionary, the output of `get_config`.\n\n Returns:\n A `tf.keras.initializers.Initializer` instance.\n '
config.pop('dtype', None)
return cls(**config) | -3,684,884,346,167,467,500 | Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance. | keras/initializers/initializers_v2.py | from_config | StanislavParovoy/Keras | python | @classmethod
def from_config(cls, config):
'Instantiates an initializer from a configuration dictionary.\n\n Example:\n\n ```python\n initializer = RandomUniform(-1, 1)\n config = initializer.get_config()\n initializer = RandomUniform.from_config(config)\n ```\n\n Args:\n config: A Python dictionary, the output of `get_config`.\n\n Returns:\n A `tf.keras.initializers.Initializer` instance.\n '
config.pop('dtype', None)
return cls(**config) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
return super(Zeros, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 933,338,983,785,517,400 | Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
return super(Zeros, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
return super(Ones, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 3,836,736,980,779,496,400 | Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
return super(Ones, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to `self.value`.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. If not specified,\n `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
del kwargs
return tf.constant(self.value, dtype=_get_dtype(dtype), shape=shape) | -4,842,611,882,655,564,000 | Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to `self.value`.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. If not specified,\n `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
del kwargs
return tf.constant(self.value, dtype=_get_dtype(dtype), shape=shape) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point and integer\n types are supported. If not specified,\n `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
return super(RandomUniform, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 3,468,556,579,783,864,300 | Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point and integer\n types are supported. If not specified,\n `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n '
return super(RandomUniform, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to random normal values.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(RandomNormal, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 757,155,504,251,613,600 | Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to random normal values.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(RandomNormal, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to random normal values (truncated).\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(TruncatedNormal, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 3,453,308,935,921,840,600 | Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to random normal values (truncated).\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(TruncatedNormal, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(VarianceScaling, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 8,955,783,661,739,036,000 | Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(VarianceScaling, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to an orthogonal matrix.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(Orthogonal, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | 4,775,635,297,769,653,000 | Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to an orthogonal matrix.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(Orthogonal, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to a 2D identity matrix.\n\n Args:\n shape: Shape of the tensor. It should have exactly rank 2.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(Identity, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) | -860,620,525,179,975,700 | Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments. | keras/initializers/initializers_v2.py | __call__ | StanislavParovoy/Keras | python | def __call__(self, shape, dtype=None, **kwargs):
'Returns a tensor object initialized to a 2D identity matrix.\n\n Args:\n shape: Shape of the tensor. It should have exactly rank 2.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n '
return super(Identity, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs) |
def _get_vlan(self):
'\n Getter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)\n '
return self.__vlan | 7,771,124,431,135,386,000 | Getter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list) | pybind/nos/v6_0_2f/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py | _get_vlan | extremenetworks/pybind | python | def _get_vlan(self):
'\n \n '
return self.__vlan |
def _set_vlan(self, v, load=False):
'\n Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_vlan is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_vlan() directly.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=YANGListType('access_vlan_id access_mac_group', vlan.vlan, yang_name='vlan', rest_name='rspan-vlan', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name='vlan', rest_name='rspan-vlan', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'vlan must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'access-vlan-id access-mac-group\', extensions={u\'tailf-common\': {u\'callpoint\': u\'rspan-mac-group-vlan-classification-config-phy\', u\'cli-suppress-list-no\': None, u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'alt-name\': u\'rspan-vlan\'}}), is_container=\'list\', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'rspan-mac-group-vlan-classification-config-phy\', u\'cli-suppress-list-no\': None, u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'alt-name\': u\'rspan-vlan\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'list\', is_config=True)'})
self.__vlan = t
if hasattr(self, '_set'):
self._set() | -3,941,033,711,324,643,300 | Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan() directly. | pybind/nos/v6_0_2f/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py | _set_vlan | extremenetworks/pybind | python | def _set_vlan(self, v, load=False):
'\n Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_vlan is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_vlan() directly.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=YANGListType('access_vlan_id access_mac_group', vlan.vlan, yang_name='vlan', rest_name='rspan-vlan', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name='vlan', rest_name='rspan-vlan', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'vlan must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'access-vlan-id access-mac-group\', extensions={u\'tailf-common\': {u\'callpoint\': u\'rspan-mac-group-vlan-classification-config-phy\', u\'cli-suppress-list-no\': None, u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'alt-name\': u\'rspan-vlan\'}}), is_container=\'list\', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'rspan-mac-group-vlan-classification-config-phy\', u\'cli-suppress-list-no\': None, u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'alt-name\': u\'rspan-vlan\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'list\', is_config=True)'})
self.__vlan = t
if hasattr(self, '_set'):
self._set() |
def item_count(self):
'get the number of items in the list'
return GroceryItem.objects.filter(list=self).count() | -8,491,763,321,385,804,000 | get the number of items in the list | v1/list/models.py | item_count | BitFis/openeats-api | python | def item_count(self):
return GroceryItem.objects.filter(list=self).count() |
def _get_error_message_from_exception(self, e):
' This method is used to get appropriate error message from the exception.\n :param e: Exception object\n :return: error message\n '
try:
if e.args:
if (len(e.args) > 1):
error_code = e.args[0]
error_msg = e.args[1]
elif (len(e.args) == 1):
error_code = 'Error code unavailable'
error_msg = e.args[0]
else:
error_code = 'Error code unavailable'
error_msg = 'Error message unavailable. Please check the asset configuration and|or action parameters.'
except Exception:
error_code = 'Error code unavailable'
error_msg = 'Error message unavailable. Please check the asset configuration and|or action parameters.'
return (error_code, error_msg) | -1,006,598,289,810,020,500 | This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message | Apps/phgsgmail/gsgmail_process_email.py | _get_error_message_from_exception | chunmanjimmyf/phantom-apps | python | def _get_error_message_from_exception(self, e):
' This method is used to get appropriate error message from the exception.\n :param e: Exception object\n :return: error message\n '
try:
if e.args:
if (len(e.args) > 1):
error_code = e.args[0]
error_msg = e.args[1]
elif (len(e.args) == 1):
error_code = 'Error code unavailable'
error_msg = e.args[0]
else:
error_code = 'Error code unavailable'
error_msg = 'Error message unavailable. Please check the asset configuration and|or action parameters.'
except Exception:
error_code = 'Error code unavailable'
error_msg = 'Error message unavailable. Please check the asset configuration and|or action parameters.'
return (error_code, error_msg) |
def load_data(folder, input_path='user_item', cut=40, high_cut=1000000, seed=None):
'\n loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and\n returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs\n :param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files\n :param cut: value to cut off users with less than "cut" read articles\n :return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.\n (look in create_split to see how the split is defines)\n '
(user_item_train, user_item_test, user_item_validation) = (pd.read_pickle(f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle(f'{folder}/{input_path}_validation.pkl'))
user_item_train = user_item_train[(user_item_train.str.len() > (cut * 0.7))]
user_item_train = user_item_train[(user_item_train.str.len() < (high_cut * 0.7))]
user_item_test = user_item_test.loc[user_item_train.index]
user_item_validation = user_item_validation.loc[user_item_train.index]
return (user_item_train, user_item_test, user_item_validation) | -7,876,844,019,875,978,000 | loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines) | preprocessing.py | load_data | MTC-ETH/RecommenderSystems | python | def load_data(folder, input_path='user_item', cut=40, high_cut=1000000, seed=None):
'\n loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and\n returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs\n :param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files\n :param cut: value to cut off users with less than "cut" read articles\n :return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.\n (look in create_split to see how the split is defines)\n '
(user_item_train, user_item_test, user_item_validation) = (pd.read_pickle(f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle(f'{folder}/{input_path}_validation.pkl'))
user_item_train = user_item_train[(user_item_train.str.len() > (cut * 0.7))]
user_item_train = user_item_train[(user_item_train.str.len() < (high_cut * 0.7))]
user_item_test = user_item_test.loc[user_item_train.index]
user_item_validation = user_item_validation.loc[user_item_train.index]
return (user_item_train, user_item_test, user_item_validation) |
def load_data_vertical(folder, input_path='user_item_vertical', cut=40):
'\n loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and\n returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs\n :param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files\n :param cut: value to cut off users with less than "cut" read articles\n :return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.\n (look in create_split to see how the split is defines)\n '
(user_item_train, user_item_test, user_item_validation) = (pd.read_parquet(f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet(f'{folder}/{input_path}_validation.pq'))
user_item_train = user_item_train[(user_item_train['count'] > cut)]
user_item_test = user_item_test[(user_item_test['count'] > cut)]
user_item_validation = user_item_validation[(user_item_validation['count'] > cut)]
user_item_train['resource_id'] = user_item_train['article_id']
user_item_test['resource_id'] = user_item_test['article_id']
user_item_validation['resource_id'] = user_item_validation['article_id']
return (user_item_train, user_item_test, user_item_validation) | 1,800,058,400,881,477,000 | loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines) | preprocessing.py | load_data_vertical | MTC-ETH/RecommenderSystems | python | def load_data_vertical(folder, input_path='user_item_vertical', cut=40):
'\n loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and\n returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs\n :param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files\n :param cut: value to cut off users with less than "cut" read articles\n :return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.\n (look in create_split to see how the split is defines)\n '
(user_item_train, user_item_test, user_item_validation) = (pd.read_parquet(f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet(f'{folder}/{input_path}_validation.pq'))
user_item_train = user_item_train[(user_item_train['count'] > cut)]
user_item_test = user_item_test[(user_item_test['count'] > cut)]
user_item_validation = user_item_validation[(user_item_validation['count'] > cut)]
user_item_train['resource_id'] = user_item_train['article_id']
user_item_test['resource_id'] = user_item_test['article_id']
user_item_validation['resource_id'] = user_item_validation['article_id']
return (user_item_train, user_item_test, user_item_validation) |
def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000, seed=1):
'\n Same as load_data but only returns random 80% of the training set\n '
(user_item_train, user_item_test, user_item_validation) = load_data(folder, input_path=input_path, cut=cut, high_cut=high_cut)
user_item_train = user_item_train.sample(frac=0.8, random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return (user_item_train, user_item_test, user_item_validation) | 1,705,447,626,688,921,600 | Same as load_data but only returns random 80% of the training set | preprocessing.py | load_data_cv | MTC-ETH/RecommenderSystems | python | def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000, seed=1):
'\n \n '
(user_item_train, user_item_test, user_item_validation) = load_data(folder, input_path=input_path, cut=cut, high_cut=high_cut)
user_item_train = user_item_train.sample(frac=0.8, random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return (user_item_train, user_item_test, user_item_validation) |
def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000, seed=1):
'\n Same as load_data but only returns random 80% of the training set\n '
(user_item_train, user_item_test, user_item_validation) = load_data_vertical(folder, input_path=input_path, cut=cut)
user_item_train = user_item_train.sample(frac=0.8, random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return (user_item_train, user_item_test, user_item_validation) | -1,286,343,011,307,267,600 | Same as load_data but only returns random 80% of the training set | preprocessing.py | load_data_vertical_cv | MTC-ETH/RecommenderSystems | python | def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000, seed=1):
'\n \n '
(user_item_train, user_item_test, user_item_validation) = load_data_vertical(folder, input_path=input_path, cut=cut)
user_item_train = user_item_train.sample(frac=0.8, random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return (user_item_train, user_item_test, user_item_validation) |
def get_metadata(folder, usecols=[]):
'\n Loads and returns the article metadata.\n The algorithms expect the format to be a Dataframe with two columns:\n - "resource_id": unique id for the article\n - "text": full text of the article (without html tags)\n '
if (not usecols):
usecols = ['text', 'resource_id']
metadata = pd.read_csv(f'{folder}/meta.csv', usecols=usecols)
return metadata.dropna(subset=['text']) | 8,553,378,981,365,157,000 | Loads and returns the article metadata.
The algorithms expect the format to be a Dataframe with two columns:
- "resource_id": unique id for the article
- "text": full text of the article (without html tags) | preprocessing.py | get_metadata | MTC-ETH/RecommenderSystems | python | def get_metadata(folder, usecols=[]):
'\n Loads and returns the article metadata.\n The algorithms expect the format to be a Dataframe with two columns:\n - "resource_id": unique id for the article\n - "text": full text of the article (without html tags)\n '
if (not usecols):
usecols = ['text', 'resource_id']
metadata = pd.read_csv(f'{folder}/meta.csv', usecols=usecols)
return metadata.dropna(subset=['text']) |
def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl', input_path='user_item_matrix_vertical.pq', sortby='ts'):
'\n Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have\n one row for each user and each row contains a (sorted) list of articles she/he clicked on.\n :param folder: Input folder\n :param output_path: Filename/path for outputfile\n :param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:\n "user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp\n to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.\n :param sortby: Columnname of the timestamp column to sort by\n :return: returns a Series where the index is the UserID and values is the by timestamp\n sorted list of clicked ArticleIDs\n '
now = datetime.datetime.now()
matrices = pd.read_parquet(f'{folder}/{input_path}')
grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply((lambda x: list(x['article_id'])))
grouped.to_pickle(f'{folder}/{output_path}')
print(f'Data transformed {(datetime.datetime.now() - now)}') | 7,652,603,608,182,917,000 | Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have
one row for each user and each row contains a (sorted) list of articles she/he clicked on.
:param folder: Input folder
:param output_path: Filename/path for outputfile
:param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:
"user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp
to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.
:param sortby: Columnname of the timestamp column to sort by
:return: returns a Series where the index is the UserID and values is the by timestamp
sorted list of clicked ArticleIDs | preprocessing.py | transform_item_matrix_to_horizontal_format | MTC-ETH/RecommenderSystems | python | def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl', input_path='user_item_matrix_vertical.pq', sortby='ts'):
'\n Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have\n one row for each user and each row contains a (sorted) list of articles she/he clicked on.\n :param folder: Input folder\n :param output_path: Filename/path for outputfile\n :param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:\n "user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp\n to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.\n :param sortby: Columnname of the timestamp column to sort by\n :return: returns a Series where the index is the UserID and values is the by timestamp\n sorted list of clicked ArticleIDs\n '
now = datetime.datetime.now()
matrices = pd.read_parquet(f'{folder}/{input_path}')
grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply((lambda x: list(x['article_id'])))
grouped.to_pickle(f'{folder}/{output_path}')
print(f'Data transformed {(datetime.datetime.now() - now)}') |
def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10):
'\n Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.\n This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%\n read articles in the test set. We remove users with less than 10 clicked articles.\n This is the data that is loaded to train/test the models in the end.\n '
now = datetime.datetime.now()
user_item = pd.read_pickle(f'{folder}/{input_path}')
user_item = user_item[(user_item.str.len() > cut_dump)]
user_item_train = user_item.apply((lambda x: x[:int((len(x) * 0.7))]))
user_item_test = user_item.apply((lambda x: x[int((len(x) * 0.7)):int((len(x) * 0.9))]))
user_item_validation = user_item.apply((lambda x: x[int((len(x) * 0.9)):]))
user_item_train.name = 'article_id'
user_item_test.name = 'article_id'
user_item_validation.name = 'article_id'
user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl')
user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl')
user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl')
print(f'Split created {(datetime.datetime.now() - now)}') | 2,414,461,517,074,541,600 | Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end. | preprocessing.py | create_split | MTC-ETH/RecommenderSystems | python | def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10):
'\n Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.\n This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%\n read articles in the test set. We remove users with less than 10 clicked articles.\n This is the data that is loaded to train/test the models in the end.\n '
now = datetime.datetime.now()
user_item = pd.read_pickle(f'{folder}/{input_path}')
user_item = user_item[(user_item.str.len() > cut_dump)]
user_item_train = user_item.apply((lambda x: x[:int((len(x) * 0.7))]))
user_item_test = user_item.apply((lambda x: x[int((len(x) * 0.7)):int((len(x) * 0.9))]))
user_item_validation = user_item.apply((lambda x: x[int((len(x) * 0.9)):]))
user_item_train.name = 'article_id'
user_item_test.name = 'article_id'
user_item_validation.name = 'article_id'
user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl')
user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl')
user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl')
print(f'Split created {(datetime.datetime.now() - now)}') |
def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10, time_column='ts'):
'\n Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.\n This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%\n read articles in the test set. We remove users with less than 10 clicked articles.\n This is the data that is loaded to train/test the models in the end.\n '
now = datetime.datetime.now()
user_item = pd.read_parquet(f'{folder}/{input_path}').sort_values(time_column)
user_item['count'] = user_item.groupby(['user_ix']).article_id.transform('count')
user_item = user_item[(user_item['count'] > cut_dump)]
grouped = user_item.groupby(['user_ix'])
user_item['percentile'] = ((grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count'))
user_item_train = user_item[(user_item['percentile'] <= 0.7)]
user_item_test = user_item[((user_item['percentile'] > 0.7) & (user_item['percentile'] < 0.9))]
user_item_validation = user_item[(user_item['percentile'] > 0.9)]
user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq')
user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq')
user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq')
print(f'Split created {(datetime.datetime.now() - now)}') | 7,071,494,411,561,606,000 | Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end. | preprocessing.py | create_split_vertical | MTC-ETH/RecommenderSystems | python | def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10, time_column='ts'):
'\n Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.\n This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%\n read articles in the test set. We remove users with less than 10 clicked articles.\n This is the data that is loaded to train/test the models in the end.\n '
now = datetime.datetime.now()
user_item = pd.read_parquet(f'{folder}/{input_path}').sort_values(time_column)
user_item['count'] = user_item.groupby(['user_ix']).article_id.transform('count')
user_item = user_item[(user_item['count'] > cut_dump)]
grouped = user_item.groupby(['user_ix'])
user_item['percentile'] = ((grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count'))
user_item_train = user_item[(user_item['percentile'] <= 0.7)]
user_item_test = user_item[((user_item['percentile'] > 0.7) & (user_item['percentile'] < 0.9))]
user_item_validation = user_item[(user_item['percentile'] > 0.9)]
user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq')
user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq')
user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq')
print(f'Split created {(datetime.datetime.now() - now)}') |
def transform_horizontal_to_vertical(df):
'\n Transforms the horizontal format into vertical format\n :param df:\n :return:\n '
return df.explode().reset_index() | -6,143,747,669,144,615,000 | Transforms the horizontal format into vertical format
:param df:
:return: | preprocessing.py | transform_horizontal_to_vertical | MTC-ETH/RecommenderSystems | python | def transform_horizontal_to_vertical(df):
'\n Transforms the horizontal format into vertical format\n :param df:\n :return:\n '
return df.explode().reset_index() |
@auth.optional
def get(self):
'\n Show register form\n\n Returns:\n Register template with form\n '
return render_template('auth/register.html', form=RegisterForm()) | 1,752,371,931,808,680,400 | Show register form
Returns:
Register template with form | app/controllers/auth/register.py | get | TheSynt4x/flask-blog | python | @auth.optional
def get(self):
'\n Show register form\n\n Returns:\n Register template with form\n '
return render_template('auth/register.html', form=RegisterForm()) |
@auth.optional
def post(self):
'\n Handle the POST request and sign up the user if form validation passes\n\n Returns:\n A redirect or a template with the validation errors\n '
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if (('avatar' in request.files) and request.files['avatar']):
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form) | -7,568,100,965,139,478,000 | Handle the POST request and sign up the user if form validation passes
Returns:
A redirect or a template with the validation errors | app/controllers/auth/register.py | post | TheSynt4x/flask-blog | python | @auth.optional
def post(self):
'\n Handle the POST request and sign up the user if form validation passes\n\n Returns:\n A redirect or a template with the validation errors\n '
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if (('avatar' in request.files) and request.files['avatar']):
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form) |
def get(self, request):
' Returns a list of wiki pages. '
pages = Page.objects.all()
context = {'pages': pages}
return render(request, 'list.html', context=context) | -2,116,179,919,993,262,300 | Returns a list of wiki pages. | wiki/views.py | get | ebonnecab/makewiki | python | def get(self, request):
' '
pages = Page.objects.all()
context = {'pages': pages}
return render(request, 'list.html', context=context) |
def __init__(self, num_inputs, num_hidden_layers, num_inner_features):
'Initializer for linear model.\n\n Args:\n num_inputs: the dimension of input data.\n num_hidden_layers: the number of hidden layers.\n num_inner_features: the number of features in the hidden layers\n '
super(NNModel, self).__init__()
self.input_layer = nn.Linear(num_inputs, num_inner_features)
hidden_layers = []
for _ in range(num_hidden_layers):
hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(num_inner_features, 1) | 5,918,843,681,598,149,000 | Initializer for linear model.
Args:
num_inputs: the dimension of input data.
num_hidden_layers: the number of hidden layers.
num_inner_features: the number of features in the hidden layers | stock_trading_backend/agent/neural_network_model.py | __init__ | iryzhkov/stock-trading-backend | python | def __init__(self, num_inputs, num_hidden_layers, num_inner_features):
'Initializer for linear model.\n\n Args:\n num_inputs: the dimension of input data.\n num_hidden_layers: the number of hidden layers.\n num_inner_features: the number of features in the hidden layers\n '
super(NNModel, self).__init__()
self.input_layer = nn.Linear(num_inputs, num_inner_features)
hidden_layers = []
for _ in range(num_hidden_layers):
hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(num_inner_features, 1) |
def forward(self, input_tensor):
'Forward pass on the neural network model.\n\n Args:\n input_tensor: the input tensor.\n\n Returns:\n Tensor with model results.\n '
output = F.relu(self.input_layer(input_tensor))
output = self.hidden_layers(output)
output = self.output_layer(output)
return output | -7,529,039,952,037,276,000 | Forward pass on the neural network model.
Args:
input_tensor: the input tensor.
Returns:
Tensor with model results. | stock_trading_backend/agent/neural_network_model.py | forward | iryzhkov/stock-trading-backend | python | def forward(self, input_tensor):
'Forward pass on the neural network model.\n\n Args:\n input_tensor: the input tensor.\n\n Returns:\n Tensor with model results.\n '
output = F.relu(self.input_layer(input_tensor))
output = self.hidden_layers(output)
output = self.output_layer(output)
return output |
def __init__(self, learning_rate=0.001, num_hidden_layers=1, num_inner_features=100):
'Initializer for model class.\n\n Args:\n learning_rate: the learning rate of the model.\n num_hidden_layers: number of hidden layers in the network.\n num_inner_features: number of features in the hidden layers.\n '
super(NeuralNetworkModel, self).__init__()
self.model = None
self.optimizer = None
self.criterion = nn.MSELoss()
self.learning_rate = learning_rate
self.num_hidden_layers = num_hidden_layers
self.num_inner_features = num_inner_features
self.id_str = '{}_{}_{}_{}'.format(self.name, learning_rate, num_hidden_layers, num_inner_features) | -1,580,914,347,267,366,100 | Initializer for model class.
Args:
learning_rate: the learning rate of the model.
num_hidden_layers: number of hidden layers in the network.
num_inner_features: number of features in the hidden layers. | stock_trading_backend/agent/neural_network_model.py | __init__ | iryzhkov/stock-trading-backend | python | def __init__(self, learning_rate=0.001, num_hidden_layers=1, num_inner_features=100):
'Initializer for model class.\n\n Args:\n learning_rate: the learning rate of the model.\n num_hidden_layers: number of hidden layers in the network.\n num_inner_features: number of features in the hidden layers.\n '
super(NeuralNetworkModel, self).__init__()
self.model = None
self.optimizer = None
self.criterion = nn.MSELoss()
self.learning_rate = learning_rate
self.num_hidden_layers = num_hidden_layers
self.num_inner_features = num_inner_features
self.id_str = '{}_{}_{}_{}'.format(self.name, learning_rate, num_hidden_layers, num_inner_features) |
def _init_model(self, num_inputs):
'Initializes internal linear model.\n\n Args:\n num_inputs: number of inputs that model will have.\n '
self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) | -7,363,408,784,865,279,000 | Initializes internal linear model.
Args:
num_inputs: number of inputs that model will have. | stock_trading_backend/agent/neural_network_model.py | _init_model | iryzhkov/stock-trading-backend | python | def _init_model(self, num_inputs):
'Initializes internal linear model.\n\n Args:\n num_inputs: number of inputs that model will have.\n '
self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) |
def _predict(self, state_action_tensor):
'Use provided information to make a prediction.\n\n Args:\n state_action_tensor: pytorch tensor with state-action values.\n\n Returns:\n Predicted values for observation-action tensors.\n '
if (self.model is None):
self._init_model(state_action_tensor.shape[1])
return self.model(state_action_tensor).detach().reshape((- 1)) | -3,101,276,370,257,911,000 | Use provided information to make a prediction.
Args:
state_action_tensor: pytorch tensor with state-action values.
Returns:
Predicted values for observation-action tensors. | stock_trading_backend/agent/neural_network_model.py | _predict | iryzhkov/stock-trading-backend | python | def _predict(self, state_action_tensor):
'Use provided information to make a prediction.\n\n Args:\n state_action_tensor: pytorch tensor with state-action values.\n\n Returns:\n Predicted values for observation-action tensors.\n '
if (self.model is None):
self._init_model(state_action_tensor.shape[1])
return self.model(state_action_tensor).detach().reshape((- 1)) |
def _train(self, state_action_tensor, expected_values_tensor):
'Train the model for 1 epoch.\n\n Args:\n state_action_tensor: pytorch tensor with state-action expected_values.\n expected_values: pytorch tensor with expected values for each state-action.\n\n Returns:\n The loss before trainig.\n '
if (self.model is None):
self._init_model(state_action_tensor.shape[1])
self.optimizer.zero_grad()
output = self.model(state_action_tensor)
loss = self.criterion(output, expected_values_tensor)
loss_value = loss.data.item()
loss.backward()
self.optimizer.step()
return loss_value | -8,040,501,734,331,822,000 | Train the model for 1 epoch.
Args:
state_action_tensor: pytorch tensor with state-action expected_values.
expected_values: pytorch tensor with expected values for each state-action.
Returns:
The loss before trainig. | stock_trading_backend/agent/neural_network_model.py | _train | iryzhkov/stock-trading-backend | python | def _train(self, state_action_tensor, expected_values_tensor):
'Train the model for 1 epoch.\n\n Args:\n state_action_tensor: pytorch tensor with state-action expected_values.\n expected_values: pytorch tensor with expected values for each state-action.\n\n Returns:\n The loss before trainig.\n '
if (self.model is None):
self._init_model(state_action_tensor.shape[1])
self.optimizer.zero_grad()
output = self.model(state_action_tensor)
loss = self.criterion(output, expected_values_tensor)
loss_value = loss.data.item()
loss.backward()
self.optimizer.step()
return loss_value |
@property
def distributions(self):
'Return the distributions for this trial.\n\n Returns:\n The distributions.\n '
return self._distributions | 8,992,952,435,542,309,000 | Return the distributions for this trial.
Returns:
The distributions. | optuna/structs.py | distributions | VladSkripniuk/optuna | python | @property
def distributions(self):
'Return the distributions for this trial.\n\n Returns:\n The distributions.\n '
return self._distributions |
@distributions.setter
def distributions(self, value):
'Set the distributions for this trial.\n\n Args:\n value: The distributions.\n '
self._distributions = value | -5,502,361,171,038,914,000 | Set the distributions for this trial.
Args:
value: The distributions. | optuna/structs.py | distributions | VladSkripniuk/optuna | python | @distributions.setter
def distributions(self, value):
'Set the distributions for this trial.\n\n Args:\n value: The distributions.\n '
self._distributions = value |
@property
def trial_id(self):
'Return the trial ID.\n\n .. deprecated:: 0.19.0\n The direct use of this attribute is deprecated and it is recommended that you use\n :attr:`~optuna.trial.FrozenTrial.number` instead.\n\n Returns:\n The trial ID.\n '
warnings.warn('The use of `FrozenTrial.trial_id` is deprecated. Please use `FrozenTrial.number` instead.', DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning('The use of `FrozenTrial.trial_id` is deprecated. Please use `FrozenTrial.number` instead.')
return self._trial_id | 7,157,514,691,564,256,000 | Return the trial ID.
.. deprecated:: 0.19.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.FrozenTrial.number` instead.
Returns:
The trial ID. | optuna/structs.py | trial_id | VladSkripniuk/optuna | python | @property
def trial_id(self):
'Return the trial ID.\n\n .. deprecated:: 0.19.0\n The direct use of this attribute is deprecated and it is recommended that you use\n :attr:`~optuna.trial.FrozenTrial.number` instead.\n\n Returns:\n The trial ID.\n '
warnings.warn('The use of `FrozenTrial.trial_id` is deprecated. Please use `FrozenTrial.number` instead.', DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning('The use of `FrozenTrial.trial_id` is deprecated. Please use `FrozenTrial.number` instead.')
return self._trial_id |
@property
def study_id(self):
'Return the study ID.\n\n .. deprecated:: 0.20.0\n The direct use of this attribute is deprecated and it is recommended that you use\n :attr:`~optuna.structs.StudySummary.study_name` instead.\n\n Returns:\n The study ID.\n '
message = 'The use of `StudySummary.study_id` is deprecated. Please use `StudySummary.study_name` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
return self._study_id | 4,847,127,753,446,662,000 | Return the study ID.
.. deprecated:: 0.20.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.structs.StudySummary.study_name` instead.
Returns:
The study ID. | optuna/structs.py | study_id | VladSkripniuk/optuna | python | @property
def study_id(self):
'Return the study ID.\n\n .. deprecated:: 0.20.0\n The direct use of this attribute is deprecated and it is recommended that you use\n :attr:`~optuna.structs.StudySummary.study_name` instead.\n\n Returns:\n The study ID.\n '
message = 'The use of `StudySummary.study_id` is deprecated. Please use `StudySummary.study_name` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
return self._study_id |
def __init__(__self__, resource_name, opts=None, api_stages=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None, __props__=None, __name__=None, __opts__=None):
'\n Provides an API Gateway Usage Plan.\n\n ## Example Usage\n\n\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n myapi = aws.apigateway.RestApi("myapi")\n dev = aws.apigateway.Deployment("dev",\n rest_api=myapi.id,\n stage_name="dev")\n prod = aws.apigateway.Deployment("prod",\n rest_api=myapi.id,\n stage_name="prod")\n my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",\n api_stages=[\n {\n "api_id": myapi.id,\n "stage": dev.stage_name,\n },\n {\n "api_id": myapi.id,\n "stage": prod.stage_name,\n },\n ],\n description="my description",\n product_code="MYCODE",\n quota_settings={\n "limit": 20,\n "offset": 2,\n "period": "WEEK",\n },\n throttle_settings={\n "burstLimit": 5,\n "rate_limit": 10,\n })\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[list] api_stages: The associated API stages of the usage plan.\n :param pulumi.Input[str] description: The description of a usage plan.\n :param pulumi.Input[str] name: The name of the usage plan.\n :param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.\n :param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.\n :param pulumi.Input[dict] tags: Key-value map of resource tags\n :param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.\n\n The **api_stages** object supports the following:\n\n * `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.\n * `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.\n\n The **quota_settings** object supports the following:\n\n * `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.\n * `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.\n * `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".\n\n The **throttle_settings** object supports the following:\n\n * `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.\n * `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
__props__['arn'] = None
super(UsagePlan, __self__).__init__('aws:apigateway/usagePlan:UsagePlan', resource_name, __props__, opts) | 3,021,579,052,692,282,400 | Provides an API Gateway Usage Plan.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
myapi = aws.apigateway.RestApi("myapi")
dev = aws.apigateway.Deployment("dev",
rest_api=myapi.id,
stage_name="dev")
prod = aws.apigateway.Deployment("prod",
rest_api=myapi.id,
stage_name="prod")
my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",
api_stages=[
{
"api_id": myapi.id,
"stage": dev.stage_name,
},
{
"api_id": myapi.id,
"stage": prod.stage_name,
},
],
description="my description",
product_code="MYCODE",
quota_settings={
"limit": 20,
"offset": 2,
"period": "WEEK",
},
throttle_settings={
"burstLimit": 5,
"rate_limit": 10,
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit. | sdk/python/pulumi_aws/apigateway/usage_plan.py | __init__ | JakeGinnivan/pulumi-aws | python | def __init__(__self__, resource_name, opts=None, api_stages=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None, __props__=None, __name__=None, __opts__=None):
'\n Provides an API Gateway Usage Plan.\n\n ## Example Usage\n\n\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n myapi = aws.apigateway.RestApi("myapi")\n dev = aws.apigateway.Deployment("dev",\n rest_api=myapi.id,\n stage_name="dev")\n prod = aws.apigateway.Deployment("prod",\n rest_api=myapi.id,\n stage_name="prod")\n my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",\n api_stages=[\n {\n "api_id": myapi.id,\n "stage": dev.stage_name,\n },\n {\n "api_id": myapi.id,\n "stage": prod.stage_name,\n },\n ],\n description="my description",\n product_code="MYCODE",\n quota_settings={\n "limit": 20,\n "offset": 2,\n "period": "WEEK",\n },\n throttle_settings={\n "burstLimit": 5,\n "rate_limit": 10,\n })\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[list] api_stages: The associated API stages of the usage plan.\n :param pulumi.Input[str] description: The description of a usage plan.\n :param pulumi.Input[str] name: The name of the usage plan.\n :param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.\n :param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.\n :param pulumi.Input[dict] tags: Key-value map of resource tags\n :param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.\n\n The **api_stages** object supports the following:\n\n * `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.\n * `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.\n\n The **quota_settings** object supports the following:\n\n * `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.\n * `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.\n * `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".\n\n The **throttle_settings** object supports the following:\n\n * `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.\n * `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
__props__['arn'] = None
super(UsagePlan, __self__).__init__('aws:apigateway/usagePlan:UsagePlan', resource_name, __props__, opts) |
@staticmethod
def get(resource_name, id, opts=None, api_stages=None, arn=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None):
'\n Get an existing UsagePlan resource\'s state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[list] api_stages: The associated API stages of the usage plan.\n :param pulumi.Input[str] arn: Amazon Resource Name (ARN)\n :param pulumi.Input[str] description: The description of a usage plan.\n :param pulumi.Input[str] name: The name of the usage plan.\n :param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.\n :param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.\n :param pulumi.Input[dict] tags: Key-value map of resource tags\n :param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.\n\n The **api_stages** object supports the following:\n\n * `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.\n * `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.\n\n The **quota_settings** object supports the following:\n\n * `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.\n * `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.\n * `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".\n\n The **throttle_settings** object supports the following:\n\n * `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.\n * `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.\n '
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['arn'] = arn
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
return UsagePlan(resource_name, opts=opts, __props__=__props__) | -8,477,662,931,629,256,000 | Get an existing UsagePlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit. | sdk/python/pulumi_aws/apigateway/usage_plan.py | get | JakeGinnivan/pulumi-aws | python | @staticmethod
def get(resource_name, id, opts=None, api_stages=None, arn=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None):
'\n Get an existing UsagePlan resource\'s state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[list] api_stages: The associated API stages of the usage plan.\n :param pulumi.Input[str] arn: Amazon Resource Name (ARN)\n :param pulumi.Input[str] description: The description of a usage plan.\n :param pulumi.Input[str] name: The name of the usage plan.\n :param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.\n :param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.\n :param pulumi.Input[dict] tags: Key-value map of resource tags\n :param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.\n\n The **api_stages** object supports the following:\n\n * `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.\n * `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.\n\n The **quota_settings** object supports the following:\n\n * `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.\n * `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.\n * `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".\n\n The **throttle_settings** object supports the following:\n\n * `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.\n * `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.\n '
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['arn'] = arn
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
return UsagePlan(resource_name, opts=opts, __props__=__props__) |
@property
def color(self):
"\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n - A list or array of any of the above\n\n Returns\n -------\n str|numpy.ndarray\n "
return self['color'] | -9,075,663,790,309,021,000 | The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | color | lucasiscovici/plotly_py | python | @property
def color(self):
"\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n - A list or array of any of the above\n\n Returns\n -------\n str|numpy.ndarray\n "
return self['color'] |
@property
def colorsrc(self):
"\n Sets the source reference on plot.ly for color .\n \n The 'colorsrc' property must be specified as a string or\n as a plotly_study.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['colorsrc'] | 2,247,104,057,059,088,600 | Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | colorsrc | lucasiscovici/plotly_py | python | @property
def colorsrc(self):
"\n Sets the source reference on plot.ly for color .\n \n The 'colorsrc' property must be specified as a string or\n as a plotly_study.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['colorsrc'] |
@property
def family(self):
'\n HTML font family - the typeface that will be applied by the web\n browser. The web browser will only be able to apply a font if\n it is available on the system which it operates. Provide\n multiple font families, separated by commas, to indicate the\n preference in which to apply fonts if they aren\'t available on\n the system. The plotly service (at https://plot.ly or on-\n premise) generates images on a server, where only a select\n number of fonts are installed and supported. These include\n "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",\n "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open\n Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New\n Roman".\n \n The \'family\' property is a string and must be specified as:\n - A non-empty string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n '
return self['family'] | -3,524,569,398,637,699,600 | HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | family | lucasiscovici/plotly_py | python | @property
def family(self):
'\n HTML font family - the typeface that will be applied by the web\n browser. The web browser will only be able to apply a font if\n it is available on the system which it operates. Provide\n multiple font families, separated by commas, to indicate the\n preference in which to apply fonts if they aren\'t available on\n the system. The plotly service (at https://plot.ly or on-\n premise) generates images on a server, where only a select\n number of fonts are installed and supported. These include\n "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",\n "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open\n Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New\n Roman".\n \n The \'family\' property is a string and must be specified as:\n - A non-empty string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n '
return self['family'] |
@property
def familysrc(self):
"\n Sets the source reference on plot.ly for family .\n \n The 'familysrc' property must be specified as a string or\n as a plotly_study.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['familysrc'] | 2,851,453,137,557,342,000 | Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | familysrc | lucasiscovici/plotly_py | python | @property
def familysrc(self):
"\n Sets the source reference on plot.ly for family .\n \n The 'familysrc' property must be specified as a string or\n as a plotly_study.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['familysrc'] |
@property
def size(self):
"\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [1, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n "
return self['size'] | 6,887,128,696,685,480,000 | The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | size | lucasiscovici/plotly_py | python | @property
def size(self):
"\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [1, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n "
return self['size'] |
@property
def sizesrc(self):
"\n Sets the source reference on plot.ly for size .\n \n The 'sizesrc' property must be specified as a string or\n as a plotly_study.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['sizesrc'] | -2,197,100,178,794,376,400 | Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | sizesrc | lucasiscovici/plotly_py | python | @property
def sizesrc(self):
"\n Sets the source reference on plot.ly for size .\n \n The 'sizesrc' property must be specified as a string or\n as a plotly_study.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['sizesrc'] |
def __init__(self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs):
'\n Construct a new Font object\n \n Sets the font used in hover labels.\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n plotly_study.graph_objs.streamtube.hoverlabel.Font\n color\n\n colorsrc\n Sets the source reference on plot.ly for color .\n family\n HTML font family - the typeface that will be applied by\n the web browser. The web browser will only be able to\n apply a font if it is available on the system which it\n operates. Provide multiple font families, separated by\n commas, to indicate the preference in which to apply\n fonts if they aren\'t available on the system. The\n plotly service (at https://plot.ly or on-premise)\n generates images on a server, where only a select\n number of fonts are installed and supported. These\n include "Arial", "Balto", "Courier New", "Droid Sans",,\n "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old\n Standard TT", "Open Sans", "Overpass", "PT Sans\n Narrow", "Raleway", "Times New Roman".\n familysrc\n Sets the source reference on plot.ly for family .\n size\n\n sizesrc\n Sets the source reference on plot.ly for size .\n\n Returns\n -------\n Font\n '
super(Font, self).__init__('font')
if (arg is None):
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError('The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font \nconstructor must be a dict or \nan instance of plotly_study.graph_objs.streamtube.hoverlabel.Font')
self._skip_invalid = kwargs.pop('skip_invalid', False)
from plotly_study.validators.streamtube.hoverlabel import font as v_font
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
_v = arg.pop('color', None)
self['color'] = (color if (color is not None) else _v)
_v = arg.pop('colorsrc', None)
self['colorsrc'] = (colorsrc if (colorsrc is not None) else _v)
_v = arg.pop('family', None)
self['family'] = (family if (family is not None) else _v)
_v = arg.pop('familysrc', None)
self['familysrc'] = (familysrc if (familysrc is not None) else _v)
_v = arg.pop('size', None)
self['size'] = (size if (size is not None) else _v)
_v = arg.pop('sizesrc', None)
self['sizesrc'] = (sizesrc if (sizesrc is not None) else _v)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False | 4,897,156,161,566,623,000 | Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.streamtube.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | __init__ | lucasiscovici/plotly_py | python | def __init__(self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs):
'\n Construct a new Font object\n \n Sets the font used in hover labels.\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n plotly_study.graph_objs.streamtube.hoverlabel.Font\n color\n\n colorsrc\n Sets the source reference on plot.ly for color .\n family\n HTML font family - the typeface that will be applied by\n the web browser. The web browser will only be able to\n apply a font if it is available on the system which it\n operates. Provide multiple font families, separated by\n commas, to indicate the preference in which to apply\n fonts if they aren\'t available on the system. The\n plotly service (at https://plot.ly or on-premise)\n generates images on a server, where only a select\n number of fonts are installed and supported. These\n include "Arial", "Balto", "Courier New", "Droid Sans",,\n "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old\n Standard TT", "Open Sans", "Overpass", "PT Sans\n Narrow", "Raleway", "Times New Roman".\n familysrc\n Sets the source reference on plot.ly for family .\n size\n\n sizesrc\n Sets the source reference on plot.ly for size .\n\n Returns\n -------\n Font\n '
super(Font, self).__init__('font')
if (arg is None):
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError('The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font \nconstructor must be a dict or \nan instance of plotly_study.graph_objs.streamtube.hoverlabel.Font')
self._skip_invalid = kwargs.pop('skip_invalid', False)
from plotly_study.validators.streamtube.hoverlabel import font as v_font
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
_v = arg.pop('color', None)
self['color'] = (color if (color is not None) else _v)
_v = arg.pop('colorsrc', None)
self['colorsrc'] = (colorsrc if (colorsrc is not None) else _v)
_v = arg.pop('family', None)
self['family'] = (family if (family is not None) else _v)
_v = arg.pop('familysrc', None)
self['familysrc'] = (familysrc if (familysrc is not None) else _v)
_v = arg.pop('size', None)
self['size'] = (size if (size is not None) else _v)
_v = arg.pop('sizesrc', None)
self['sizesrc'] = (sizesrc if (sizesrc is not None) else _v)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False |
def getNetworkCellularGatewaySettingsDhcp(self, networkId: str):
'\n **List common DHCP settings of MGs**\n https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp\n \n - networkId (string)\n '
metadata = {'tags': ['MG DHCP settings'], 'operation': 'getNetworkCellularGatewaySettingsDhcp'}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
return self._session.get(metadata, resource) | -1,668,987,376,588,538,000 | **List common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp
- networkId (string) | meraki/api/mg_dhcp_settings.py | getNetworkCellularGatewaySettingsDhcp | NoFliesOnYou/dashboard-api-python | python | def getNetworkCellularGatewaySettingsDhcp(self, networkId: str):
'\n **List common DHCP settings of MGs**\n https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp\n \n - networkId (string)\n '
metadata = {'tags': ['MG DHCP settings'], 'operation': 'getNetworkCellularGatewaySettingsDhcp'}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
return self._session.get(metadata, resource) |
def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs):
"\n **Update common DHCP settings of MGs**\n https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp\n \n - networkId (string)\n - dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.\n - dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.\n - dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom'\n "
kwargs.update(locals())
metadata = {'tags': ['MG DHCP settings'], 'operation': 'updateNetworkCellularGatewaySettingsDhcp'}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers']
payload = {k: v for (k, v) in kwargs.items() if (k in body_params)}
return self._session.put(metadata, resource, payload) | 6,057,655,295,595,497,000 | **Update common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp
- networkId (string)
- dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.
- dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.
- dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom' | meraki/api/mg_dhcp_settings.py | updateNetworkCellularGatewaySettingsDhcp | NoFliesOnYou/dashboard-api-python | python | def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs):
"\n **Update common DHCP settings of MGs**\n https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp\n \n - networkId (string)\n - dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.\n - dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.\n - dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom'\n "
kwargs.update(locals())
metadata = {'tags': ['MG DHCP settings'], 'operation': 'updateNetworkCellularGatewaySettingsDhcp'}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers']
payload = {k: v for (k, v) in kwargs.items() if (k in body_params)}
return self._session.put(metadata, resource, payload) |
@classmethod
def setUpClass(cls):
'Configure raw file and its object in parent class (TestDump).'
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats')
super().set_raw_dump_object(PortStats)
super().set_minimum_size(112) | -3,124,006,365,885,124,000 | Configure raw file and its object in parent class (TestDump). | build/lib/tests/v0x04/test_controller2switch/test_port_stats.py | setUpClass | smythtech/python-openflow-legacy | python | @classmethod
def setUpClass(cls):
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats')
super().set_raw_dump_object(PortStats)
super().set_minimum_size(112) |
@bottle.post('/api/v3/report/import')
def post_report_import(database: Database):
'Import a preconfigured report into the database.'
report = dict(bottle.request.json)
result = import_json_report(database, report)
result['new_report_uuid'] = report['report_uuid']
return result | 4,125,415,011,259,234,300 | Import a preconfigured report into the database. | components/server/src/routes/report.py | post_report_import | Gamer1120/quality-time | python | @bottle.post('/api/v3/report/import')
def post_report_import(database: Database):
report = dict(bottle.request.json)
result = import_json_report(database, report)
result['new_report_uuid'] = report['report_uuid']
return result |
@bottle.post('/api/v3/report/new')
def post_report_new(database: Database):
'Add a new report.'
report_uuid = uuid()
user = sessions.user(database)
report = dict(report_uuid=report_uuid, title='New report', subjects={}, delta=dict(uuids=[report_uuid], email=user['email'], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result['new_report_uuid'] = report_uuid
return result | -8,755,332,867,516,317,000 | Add a new report. | components/server/src/routes/report.py | post_report_new | Gamer1120/quality-time | python | @bottle.post('/api/v3/report/new')
def post_report_new(database: Database):
report_uuid = uuid()
user = sessions.user(database)
report = dict(report_uuid=report_uuid, title='New report', subjects={}, delta=dict(uuids=[report_uuid], email=user['email'], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result['new_report_uuid'] = report_uuid
return result |
@bottle.post('/api/v3/report/<report_uuid>/copy')
def post_report_copy(report_uuid: ReportId, database: Database):
'Copy a report.'
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy['delta'] = dict(uuids=[report_uuid, report_copy['report_uuid']], email=user['email'], description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result['new_report_uuid'] = report_copy['report_uuid']
return result | 501,560,276,536,554,100 | Copy a report. | components/server/src/routes/report.py | post_report_copy | Gamer1120/quality-time | python | @bottle.post('/api/v3/report/<report_uuid>/copy')
def post_report_copy(report_uuid: ReportId, database: Database):
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy['delta'] = dict(uuids=[report_uuid, report_copy['report_uuid']], email=user['email'], description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result['new_report_uuid'] = report_copy['report_uuid']
return result |
@bottle.get('/api/v3/report/<report_uuid>/pdf')
def export_report_as_pdf(report_uuid: ReportId):
'Download the report as pdf.'
renderer_host = os.environ.get('RENDERER_HOST', 'renderer')
renderer_port = os.environ.get('RENDERER_PORT', '9000')
render_url = f'http://{renderer_host}:{renderer_port}/api/render'
proxy_host = os.environ.get('PROXY_HOST', 'www')
proxy_port = os.environ.get('PROXY_PORT', '80')
query_string = (f'?{bottle.request.query_string}' if bottle.request.query_string else '')
report_url = parse.quote(f'http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}')
margins = '&'.join([f'pdf.margin.{side}=25' for side in ('top', 'bottom', 'left', 'right')])
options = f'emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}'
response = requests.get(f'{render_url}?url={report_url}&{options}')
response.raise_for_status()
bottle.response.content_type = 'application/pdf'
return response.content | -3,540,804,449,831,905,000 | Download the report as pdf. | components/server/src/routes/report.py | export_report_as_pdf | Gamer1120/quality-time | python | @bottle.get('/api/v3/report/<report_uuid>/pdf')
def export_report_as_pdf(report_uuid: ReportId):
renderer_host = os.environ.get('RENDERER_HOST', 'renderer')
renderer_port = os.environ.get('RENDERER_PORT', '9000')
render_url = f'http://{renderer_host}:{renderer_port}/api/render'
proxy_host = os.environ.get('PROXY_HOST', 'www')
proxy_port = os.environ.get('PROXY_PORT', '80')
query_string = (f'?{bottle.request.query_string}' if bottle.request.query_string else )
report_url = parse.quote(f'http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}')
margins = '&'.join([f'pdf.margin.{side}=25' for side in ('top', 'bottom', 'left', 'right')])
options = f'emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}'
response = requests.get(f'{render_url}?url={report_url}&{options}')
response.raise_for_status()
bottle.response.content_type = 'application/pdf'
return response.content |
@bottle.delete('/api/v3/report/<report_uuid>')
def delete_report(report_uuid: ReportId, database: Database):
'Delete a report.'
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report['deleted'] = 'true'
user = sessions.user(database)
data.report['delta'] = dict(uuids=[report_uuid], email=user['email'], description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report) | -6,304,560,776,285,529,000 | Delete a report. | components/server/src/routes/report.py | delete_report | Gamer1120/quality-time | python | @bottle.delete('/api/v3/report/<report_uuid>')
def delete_report(report_uuid: ReportId, database: Database):
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report['deleted'] = 'true'
user = sessions.user(database)
data.report['delta'] = dict(uuids=[report_uuid], email=user['email'], description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report) |
@bottle.post('/api/v3/report/<report_uuid>/attribute/<report_attribute>')
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
'Set a report attribute.'
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = (data.report.get(report_attribute) or '')
data.report[report_attribute] = value
value_change_description = ('' if (report_attribute == 'layout') else f" from '{old_value}' to '{value}'")
user = sessions.user(database)
data.report['delta'] = dict(uuids=[report_uuid], email=user['email'], description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'{value_change_description}.")
return insert_new_report(database, data.report) | -7,890,409,386,999,294,000 | Set a report attribute. | components/server/src/routes/report.py | post_report_attribute | Gamer1120/quality-time | python | @bottle.post('/api/v3/report/<report_uuid>/attribute/<report_attribute>')
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = (data.report.get(report_attribute) or )
data.report[report_attribute] = value
value_change_description = ( if (report_attribute == 'layout') else f" from '{old_value}' to '{value}'")
user = sessions.user(database)
data.report['delta'] = dict(uuids=[report_uuid], email=user['email'], description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'{value_change_description}.")
return insert_new_report(database, data.report) |
@bottle.get('/api/v3/tagreport/<tag>')
def get_tag_report(tag: str, database: Database):
'Get a report with all metrics that have the specified tag.'
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(title=f'Report for tag "{tag}"', subtitle='Note: tag reports are read-only', report_uuid=f'tag-{tag}', timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report | 2,397,682,409,466,062,000 | Get a report with all metrics that have the specified tag. | components/server/src/routes/report.py | get_tag_report | Gamer1120/quality-time | python | @bottle.get('/api/v3/tagreport/<tag>')
def get_tag_report(tag: str, database: Database):
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(title=f'Report for tag "{tag}"', subtitle='Note: tag reports are read-only', report_uuid=f'tag-{tag}', timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report |
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
'Return all subjects and metrics that have the tag.'
subjects = {}
for report in reports:
for (subject_uuid, subject) in list(report.get('subjects', {}).items()):
for (metric_uuid, metric) in list(subject.get('metrics', {}).items()):
if (tag not in metric.get('tags', [])):
del subject['metrics'][metric_uuid]
if subject.get('metrics', {}):
subject_name = (subject.get('name') or data_model['subjects'][subject['type']]['name'])
subject['name'] = ((report['title'] + ' / ') + subject_name)
subjects[subject_uuid] = subject
return subjects | 3,139,816,455,561,467,400 | Return all subjects and metrics that have the tag. | components/server/src/routes/report.py | _get_subjects_and_metrics_by_tag | Gamer1120/quality-time | python | def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
subjects = {}
for report in reports:
for (subject_uuid, subject) in list(report.get('subjects', {}).items()):
for (metric_uuid, metric) in list(subject.get('metrics', {}).items()):
if (tag not in metric.get('tags', [])):
del subject['metrics'][metric_uuid]
if subject.get('metrics', {}):
subject_name = (subject.get('name') or data_model['subjects'][subject['type']]['name'])
subject['name'] = ((report['title'] + ' / ') + subject_name)
subjects[subject_uuid] = subject
return subjects |
def blend(image1, image2, factor):
'Blend image1 and image2 using \'factor\'.\n\n A value of factor 0.0 means only image1 is used.\n A value of 1.0 means only image2 is used. A value between 0.0 and\n 1.0 means we linearly interpolate the pixel values between the two\n images. A value greater than 1.0 "extrapolates" the difference\n between the two pixel values, and we clip the results to values\n between 0 and 255.\n\n Args:\n image1: An image Tensor.\n image2: An image Tensor.\n factor: A floating point value above 0.0.\n\n Returns:\n A blended image Tensor.\n '
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast((image1 + (factor * (image2 - image1))), tf.uint8) | -5,146,605,963,756,331,000 | Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor. | third_party/augment_ops.py | blend | google-research/crest | python | def blend(image1, image2, factor):
'Blend image1 and image2 using \'factor\'.\n\n A value of factor 0.0 means only image1 is used.\n A value of 1.0 means only image2 is used. A value between 0.0 and\n 1.0 means we linearly interpolate the pixel values between the two\n images. A value greater than 1.0 "extrapolates" the difference\n between the two pixel values, and we clip the results to values\n between 0 and 255.\n\n Args:\n image1: An image Tensor.\n image2: An image Tensor.\n factor: A floating point value above 0.0.\n\n Returns:\n A blended image Tensor.\n '
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast((image1 + (factor * (image2 - image1))), tf.uint8) |
def wrap(image):
"Returns 'image' with an extra channel set to all 1s."
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended | -2,054,740,842,410,237,000 | Returns 'image' with an extra channel set to all 1s. | third_party/augment_ops.py | wrap | google-research/crest | python | def wrap(image):
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended |
def unwrap(image):
"Unwraps an image produced by wrap.\n\n Where there is a 0 in the last channel for every spatial position,\n the rest of the three channels in that spatial dimension are grayed\n (set to 128). Operations like translate and shear on a wrapped\n Tensor will leave 0s in empty locations. Some transformations look\n at the intensity of values to do preprocessing, and we want these\n empty pixels to assume the 'average' value, rather than pure black.\n\n\n Args:\n image: A 3D Image Tensor with 4 channels.\n\n Returns:\n image: A 3D image Tensor with 3 channels.\n "
image_shape = tf.shape(image)
flattened_image = tf.reshape(image, [(- 1), image_shape[2]])
alpha_channel = tf.expand_dims(flattened_image[:, (image_shape[2] - 1)], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1], image.dtype)
flattened_image = tf.where(tf.equal(alpha_channel, 0), (tf.ones_like(flattened_image, dtype=image.dtype) * replace), flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], (image_shape[2] - 1)])
return image | 596,681,917,176,061,000 | Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels. | third_party/augment_ops.py | unwrap | google-research/crest | python | def unwrap(image):
"Unwraps an image produced by wrap.\n\n Where there is a 0 in the last channel for every spatial position,\n the rest of the three channels in that spatial dimension are grayed\n (set to 128). Operations like translate and shear on a wrapped\n Tensor will leave 0s in empty locations. Some transformations look\n at the intensity of values to do preprocessing, and we want these\n empty pixels to assume the 'average' value, rather than pure black.\n\n\n Args:\n image: A 3D Image Tensor with 4 channels.\n\n Returns:\n image: A 3D image Tensor with 3 channels.\n "
image_shape = tf.shape(image)
flattened_image = tf.reshape(image, [(- 1), image_shape[2]])
alpha_channel = tf.expand_dims(flattened_image[:, (image_shape[2] - 1)], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1], image.dtype)
flattened_image = tf.where(tf.equal(alpha_channel, 0), (tf.ones_like(flattened_image, dtype=image.dtype) * replace), flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], (image_shape[2] - 1)])
return image |
def invert(image):
'Inverts the image pixels.'
return (255 - tf.convert_to_tensor(image)) | -8,700,322,171,604,700,000 | Inverts the image pixels. | third_party/augment_ops.py | invert | google-research/crest | python | def invert(image):
return (255 - tf.convert_to_tensor(image)) |
def invert_blend(image, factor):
'Implements blend of invert with original image.'
return blend(invert(image), image, factor) | -4,616,882,108,785,448,000 | Implements blend of invert with original image. | third_party/augment_ops.py | invert_blend | google-research/crest | python | def invert_blend(image, factor):
return blend(invert(image), image, factor) |
def color(image, factor):
'Equivalent of PIL Color.'
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor) | 2,872,861,326,192,433,000 | Equivalent of PIL Color. | third_party/augment_ops.py | color | google-research/crest | python | def color(image, factor):
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor) |
def contrast(image, factor):
'Equivalent of PIL Contrast.'
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast((mean + 0.5), tf.uint8)
degenerate = (tf.ones_like(grayscale_im, dtype=tf.uint8) * mean)
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor) | -3,693,930,040,758,899,000 | Equivalent of PIL Contrast. | third_party/augment_ops.py | contrast | google-research/crest | python | def contrast(image, factor):
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast((mean + 0.5), tf.uint8)
degenerate = (tf.ones_like(grayscale_im, dtype=tf.uint8) * mean)
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor) |
def brightness(image, factor):
'Equivalent of PIL Brightness.'
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor) | -5,514,793,971,791,669,000 | Equivalent of PIL Brightness. | third_party/augment_ops.py | brightness | google-research/crest | python | def brightness(image, factor):
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor) |
def posterize(image, bits):
'Equivalent of PIL Posterize.'
shift = tf.cast((8 - bits), image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) | 7,847,657,482,698,043,000 | Equivalent of PIL Posterize. | third_party/augment_ops.py | posterize | google-research/crest | python | def posterize(image, bits):
shift = tf.cast((8 - bits), image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) |
def rotate(image, degrees):
'Equivalent of PIL Rotation.'
degrees_to_radians = (math.pi / 180.0)
radians = (degrees * degrees_to_radians)
image = tfa_image.transform_ops.rotate(wrap(image), radians)
return unwrap(image) | -6,439,018,474,791,032,000 | Equivalent of PIL Rotation. | third_party/augment_ops.py | rotate | google-research/crest | python | def rotate(image, degrees):
degrees_to_radians = (math.pi / 180.0)
radians = (degrees * degrees_to_radians)
image = tfa_image.transform_ops.rotate(wrap(image), radians)
return unwrap(image) |
def translate_x(image, pixels):
'Equivalent of PIL Translate in X dimension.'
image = tfa_image.translate_ops.translate(wrap(image), [(- pixels), 0])
return unwrap(image) | -5,187,543,649,634,846,000 | Equivalent of PIL Translate in X dimension. | third_party/augment_ops.py | translate_x | google-research/crest | python | def translate_x(image, pixels):
image = tfa_image.translate_ops.translate(wrap(image), [(- pixels), 0])
return unwrap(image) |
def translate_y(image, pixels):
'Equivalent of PIL Translate in Y dimension.'
image = tfa_image.translate_ops.translate(wrap(image), [0, (- pixels)])
return unwrap(image) | -3,589,578,885,919,435,000 | Equivalent of PIL Translate in Y dimension. | third_party/augment_ops.py | translate_y | google-research/crest | python | def translate_y(image, pixels):
image = tfa_image.translate_ops.translate(wrap(image), [0, (- pixels)])
return unwrap(image) |
def shear_x(image, level):
'Equivalent of PIL Shearing in X dimension.'
image = tfa_image.transform_ops.transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
return unwrap(image) | -1,900,459,595,508,388,400 | Equivalent of PIL Shearing in X dimension. | third_party/augment_ops.py | shear_x | google-research/crest | python | def shear_x(image, level):
image = tfa_image.transform_ops.transform(wrap(image), [1.0, level, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
return unwrap(image) |
def shear_y(image, level):
'Equivalent of PIL Shearing in Y dimension.'
image = tfa_image.transform_ops.transform(wrap(image), [1.0, 0.0, 0.0, level, 1.0, 0.0, 0.0, 0.0])
return unwrap(image) | -8,037,771,224,047,471,000 | Equivalent of PIL Shearing in Y dimension. | third_party/augment_ops.py | shear_y | google-research/crest | python | def shear_y(image, level):
image = tfa_image.transform_ops.transform(wrap(image), [1.0, 0.0, 0.0, level, 1.0, 0.0, 0.0, 0.0])
return unwrap(image) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.