code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def test_resend_confirmation_success_message(self, app, db): <NEW_LINE> <INDENT> user = make_dummy_user() <NEW_LINE> user.confirmed = False <NEW_LINE> db.session.add(user) <NEW_LINE> db.session.commit() <NEW_LINE> with app.test_client() as tc: <NEW_LINE> <INDENT> rv = tc.post(url_for('auth.resend_confirmation'), data=dict(email=user.email), follow_redirects=True) <NEW_LINE> <DEDENT> assert 'Confirmation email sent to' in str(rv.data)
resend_confirmation flashes a success message on successful sub.
625941b86aa9bd52df036bf6
def handleV2(self, info): <NEW_LINE> <INDENT> return 'V2: %s' % info
handles v2
625941b867a9b606de4a7d11
def make_float(self, s, ): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = float(s) <NEW_LINE> return f <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print('error, cannot convert string to float') <NEW_LINE> raise
Converts string "s" into a float.
625941b8851cf427c661a36f
@exit_with_status('!= 0') <NEW_LINE> def test_non_json_file(text_file): <NEW_LINE> <INDENT> gjtk.cli.main(argv=[text_file])
Test invocation with a file that does not contain JSON.
625941b855399d3f05588508
def say(self, message): <NEW_LINE> <INDENT> self.parent.say(message)
Tell the player something, long wait.
625941b89c8ee82313fbb5c9
def is_bool(value): <NEW_LINE> <INDENT> return bool(value) == value
Return True if `value` is a boolean.
625941b8f9cc0f698b14045a
def initialize(self): <NEW_LINE> <INDENT> self.assmts = {} <NEW_LINE> bit = 1 <NEW_LINE> for entry in self.entries: <NEW_LINE> <INDENT> assmts = AssignmentList() <NEW_LINE> assmts.mask = assmts.bit = bit <NEW_LINE> self.assmts[entry] = assmts <NEW_LINE> bit <<= 1 <NEW_LINE> <DEDENT> for block in self.blocks: <NEW_LINE> <INDENT> for stat in block.stats: <NEW_LINE> <INDENT> if isinstance(stat, NameAssignment): <NEW_LINE> <INDENT> stat.bit = bit <NEW_LINE> assmts = self.assmts[stat.entry] <NEW_LINE> assmts.stats.append(stat) <NEW_LINE> assmts.mask |= bit <NEW_LINE> bit <<= 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for block in self.blocks: <NEW_LINE> <INDENT> for entry, stat in list(block.gen.items()): <NEW_LINE> <INDENT> assmts = self.assmts[entry] <NEW_LINE> if stat is Uninitialized: <NEW_LINE> <INDENT> block.i_gen |= assmts.bit <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> block.i_gen |= stat.bit <NEW_LINE> <DEDENT> block.i_kill |= assmts.mask <NEW_LINE> <DEDENT> block.i_output = block.i_gen <NEW_LINE> for entry in block.bounded: <NEW_LINE> <INDENT> block.i_kill |= self.assmts[entry].bit <NEW_LINE> <DEDENT> <DEDENT> for assmts in self.assmts.values(): <NEW_LINE> <INDENT> self.entry_point.i_gen |= assmts.bit <NEW_LINE> <DEDENT> self.entry_point.i_output = self.entry_point.i_gen
Set initial state, map assignments to bits.
625941b84c3428357757c17f
def ipam_roles_delete(self, id, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.ipam_roles_delete_with_http_info(id, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.ipam_roles_delete_with_http_info(id, **kwargs) <NEW_LINE> return data
ipam_roles_delete # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.ipam_roles_delete(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: A unique integer value identifying this role. (required) :return: None If the method is called asynchronously, returns the request thread.
625941b80383005118ecf439
def save(self, *args, **kwargs): <NEW_LINE> <INDENT> if self.published and self.pub_date is None: <NEW_LINE> <INDENT> self.pub_date = datetime.now() <NEW_LINE> <DEDENT> elif not self.published and self.pub_date is not None: <NEW_LINE> <INDENT> self.pub_date = None <NEW_LINE> <DEDENT> super().save(*args, **kwargs)
Set publish date to the date when object published status is switched to True, reset the date if object is unpublished
625941b83539df3088e2e1a0
def apply(self, arg2, out): <NEW_LINE> <INDENT> return _Resampler.ResamplerCC_apply(self, arg2, out)
apply(ResamplerCC self, complex< double > * arg2, complex< double > * out) -> int
625941b89b70327d1c4e0c28
def get_type(self): <NEW_LINE> <INDENT> obj = self._get_db_obj_query().first() <NEW_LINE> return obj.type if obj else None
Return the provider type. Args: None Returns: (String): "Provider type. Cloud backend name", example: "AWS"
625941b8cdde0d52a9e52e83
def __init__(self, *args): <NEW_LINE> <INDENT> _itkOffsetPython.itkOffset2_swiginit(self,_itkOffsetPython.new_itkOffset2(*args))
__init__(self) -> itkOffset2 __init__(self, itkOffset2 arg0) -> itkOffset2
625941b8aad79263cf390890
def get_weight_range(self): <NEW_LINE> <INDENT> return self._mins[1], self._maxs[1]
Returns the range of weight data in the set. get_weight_range() -> tuple<float, float>
625941b8091ae35668666dba
def from_radians(self, radians): <NEW_LINE> <INDENT> if isinstance(radians, basestring): <NEW_LINE> <INDENT> radians = self.from_sexegesimal(radians) <NEW_LINE> <DEDENT> self.degrees = math.degrees(radians)
Set the Angle using a value provided in radians.
625941b87d43ff24873a2af8
def deserialize_numpy(self, str, numpy): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.timestamp is None: <NEW_LINE> <INDENT> self.timestamp = genpy.Time() <NEW_LINE> <DEDENT> end = 0 <NEW_LINE> _x = self <NEW_LINE> start = end <NEW_LINE> end += 28 <NEW_LINE> (_x.timestamp.secs, _x.timestamp.nsecs, _x.id, _x.enabled, _x.calibrated, _x.ready, _x.moving, _x.gripping, _x.missed, _x.error, _x.reverse, _x.position, _x.force,) = _get_struct_3I8B2f().unpack(str[start:end]) <NEW_LINE> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> start = end <NEW_LINE> end += length <NEW_LINE> if python3: <NEW_LINE> <INDENT> self.state = str[start:end].decode('utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.state = str[start:end] <NEW_LINE> <DEDENT> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> start = end <NEW_LINE> end += length <NEW_LINE> if python3: <NEW_LINE> <INDENT> self.command = str[start:end].decode('utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.command = str[start:end] <NEW_LINE> <DEDENT> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> start = end <NEW_LINE> end += length <NEW_LINE> if python3: <NEW_LINE> <INDENT> self.command_sender = str[start:end].decode('utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.command_sender = str[start:end] <NEW_LINE> <DEDENT> start = end <NEW_LINE> end += 4 <NEW_LINE> (self.command_sequence,) = _get_struct_I().unpack(str[start:end]) <NEW_LINE> self.timestamp.canon() <NEW_LINE> return self <NEW_LINE> <DEDENT> except struct.error as e: <NEW_LINE> <INDENT> raise genpy.DeserializationError(e)
unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module
625941b8d10714528d5ffb34
def read_catalog(self, file): <NEW_LINE> <INDENT> self.set_catalog_version(file) <NEW_LINE> if not self.error: <NEW_LINE> <INDENT> self.df.dropna(subset=['KPI name'], inplace=True) <NEW_LINE> if not len(self.df): <NEW_LINE> <INDENT> self.error = True <NEW_LINE> error_msg = 'Empty catalog file\r\n\r\n %s \r\n\r\n ' <NEW_LINE> self.error_type = error_msg % file <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if 'Zone' not in self.df.columns: <NEW_LINE> <INDENT> self.df['Zone'] = self.zone <NEW_LINE> <DEDENT> self.df = self.df[COLS_OUTPUT]
Retrieve catalog information
625941b89f2886367277a6e6
def dot(a, b): <NEW_LINE> <INDENT> with tf.name_scope("dot"): <NEW_LINE> <INDENT> a_ndim = a.get_shape().ndims <NEW_LINE> b_ndim = b.get_shape().ndims <NEW_LINE> assert a_ndim is not None <NEW_LINE> if a_ndim == 0: <NEW_LINE> <INDENT> return tf.scalar_mul(a, b) <NEW_LINE> <DEDENT> assert b_ndim is not None <NEW_LINE> if b_ndim == 0: <NEW_LINE> <INDENT> return tf.scalar_mul(b, a) <NEW_LINE> <DEDENT> a = check_dim_equal(a, -1, b, 0) <NEW_LINE> if a_ndim == b_ndim == 1: <NEW_LINE> <INDENT> return tf.reduce_sum(a * b) <NEW_LINE> <DEDENT> d = get_shape_dim(b, 0) <NEW_LINE> assert a_ndim >= 2 and b_ndim >= 2 <NEW_LINE> res_shape = None <NEW_LINE> if a_ndim > 2 or b_ndim > 2: <NEW_LINE> <INDENT> res_shape = [get_shape_dim(a, i) for i in range(0, a_ndim - 1)] + [get_shape_dim(b, i) for i in range(1, b_ndim)] <NEW_LINE> <DEDENT> if a_ndim > 2: <NEW_LINE> <INDENT> a = tf.reshape(a, (-1, d)) <NEW_LINE> <DEDENT> if b_ndim > 2: <NEW_LINE> <INDENT> b = tf.reshape(b, (d, -1)) <NEW_LINE> <DEDENT> res = tf.matmul(a, b) <NEW_LINE> if a_ndim > 2 or b_ndim > 2: <NEW_LINE> <INDENT> res = tf.reshape(res, res_shape) <NEW_LINE> <DEDENT> return res
:param tf.Tensor a: shape [...da...,d] :param tf.Tensor b: shape [d,...db...] :return: tensor of shape [...da...,d,...db...] :rtype: tf.Tensor
625941b8462c4b4f79d1d525
def compute_coupling_matrix(self,fl1,fl2,bins,nell_rebin=2,method=203,ell_cut_x=[1.,-1.],ell_cut_y=[1.,-1.]) : <NEW_LINE> <INDENT> if self.wsp!=None : <NEW_LINE> <INDENT> lib.workspace_flat_free(self.wsp) <NEW_LINE> <DEDENT> self.wsp=lib.compute_coupling_matrix_flat(fl1.fl,fl2.fl,bins.bin,nell_rebin,method, ell_cut_x[0],ell_cut_x[1],ell_cut_y[0],ell_cut_y[1])
Computes coupling matrix associated with the cross-power spectrum of two NmtFieldFlats and an NmtBinFlat binning scheme. :param NmtFieldFlat fl1,fl2: fields to correlate :param NmtBinFlat bin: binning scheme :param int nell_rebin: number of sub-intervals into which the base k-intervals will be sub-sampled to compute the coupling matrix :param int method: algorithm to compute the coupling matrix (only 203 has been fully validated so far). :param float(2) ell_cut_x: remove all modes with ell_x in the interval [ell_cut_x[0],ell_cut_x[1]] from the calculation. :param float(2) ell_cut_y: remove all modes with ell_y in the interval [ell_cut_y[0],ell_cut_y[1]] from the calculation.
625941b830dc7b76659017bf
def test_yaml_dump(self): <NEW_LINE> <INDENT> yaml = yaml_dump("{\"key\":\"value\"}") <NEW_LINE> self.assertEqual(yaml, "key: value\n")
dump json string to yaml
625941b8090684286d50eb35
def get_properties(self): <NEW_LINE> <INDENT> if self.features_layer is not None: <NEW_LINE> <INDENT> for property in self.features_layer.get_properties(): <NEW_LINE> <INDENT> yield propertyfound_entities.get(mention.string)
Returns all the properties of the features layer (iterator) @rtype: L{Cproperty} @return: list of properties
625941b83cc13d1c6d3c71d9
@workflow <NEW_LINE> def heal(ctx, graph, node_id): <NEW_LINE> <INDENT> failing_node = ctx.model.node.get(node_id) <NEW_LINE> host_node = ctx.model.node.get(failing_node.host.id) <NEW_LINE> failed_node_subgraph = _get_contained_subgraph(ctx, host_node) <NEW_LINE> failed_node_ids = list(n.id for n in failed_node_subgraph) <NEW_LINE> targeted_nodes = [node for node in ctx.nodes if node.id not in failed_node_ids] <NEW_LINE> uninstall_subgraph = task.WorkflowTask( heal_uninstall, failing_nodes=failed_node_subgraph, targeted_nodes=targeted_nodes ) <NEW_LINE> install_subgraph = task.WorkflowTask( heal_install, failing_nodes=failed_node_subgraph, targeted_nodes=targeted_nodes) <NEW_LINE> graph.sequence(uninstall_subgraph, install_subgraph)
Built-in heal workflow.. :param ctx: workflow context :param graph: graph which will describe the workflow. :param node_id: ID of the node to heal :return:
625941b8bf627c535bc1302b
def connect_JSON(config): <NEW_LINE> <INDENT> testnet = config.get('testnet', '0') <NEW_LINE> testnet = (int(testnet) > 0) <NEW_LINE> if not 'rpcport' in config: <NEW_LINE> <INDENT> config['rpcport'] = 19887 if testnet else 9887 <NEW_LINE> <DEDENT> connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) <NEW_LINE> try: <NEW_LINE> <INDENT> result = ServiceProxy(connect) <NEW_LINE> if result.getmininginfo()['testnet'] != testnet: <NEW_LINE> <INDENT> sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> return result <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> sys.stderr.write("Error connecting to RPC server at "+connect+"\n") <NEW_LINE> sys.exit(1)
Connect to a bitcoin JSON-RPC server
625941b8435de62698dfdaa9
def uid(self, coordinates): <NEW_LINE> <INDENT> return "{:2.6f}{:2.6f}{}".format( coordinates[CONF_LATITUDE], coordinates[CONF_LONGITUDE], self.type )
Generate a unique id using coordinates and sensor type.
625941b82ae34c7f2600cf87
def __init__(self, patterns, n=None): <NEW_LINE> <INDENT> self.patterns = np.array([pattern.flatten() for pattern in patterns]) <NEW_LINE> self.unique_patterns, idx, counts = np.unique(self.patterns, axis=0, return_index=True, return_counts=True) <NEW_LINE> self.pattern_dim = dict(zip(idx, counts)) <NEW_LINE> self.N = np.shape(self.patterns)[1] <NEW_LINE> if n is None: <NEW_LINE> <INDENT> self.n = 1/self.N <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.n = n <NEW_LINE> <DEDENT> self.W = self.get_weights()
patterns : list or array of flatten patterns to be stored n : learning rate
625941b88c0ade5d55d3e814
def on_session_ended(session_ended_request, context): <NEW_LINE> <INDENT> pass
Handle session clean up when the session should end.
625941b810dbd63aa1bd2a04
def get_vels_Crust1(location): <NEW_LINE> <INDENT> lat, lon = location <NEW_LINE> all_lons = np.arange(-179.5,180,1) <NEW_LINE> all_lats = np.arange(89.5,-90,-1) <NEW_LINE> i = int((lon - all_lons[0]) + ((all_lats[0] - lat) // 1) * len(all_lons)) <NEW_LINE> nm = 'data/earth_models/crust1/crust1.' <NEW_LINE> try: <NEW_LINE> <INDENT> cb = pd.read_csv(nm + 'bnds', skiprows=i, nrows=1, header=None, sep='\s+' ).values.flatten() <NEW_LINE> vs = pd.read_csv(nm + 'vs', skiprows=i, nrows=1, header=None, sep='\s+' ).values.flatten() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> crust1url = 'http://igppweb.ucsd.edu/~gabi/crust1/crust1.0.tar.gz' <NEW_LINE> print( 'You need to download (and extract) the Crust1.0 model' + ' from \n\t{} \nand save to \n\t{}'.format(crust1url, nm[:-7]) ) <NEW_LINE> return <NEW_LINE> <DEDENT> thickness = -np.diff(cb) <NEW_LINE> ib = 0 <NEW_LINE> m_t = [0] <NEW_LINE> m_vs = [] <NEW_LINE> for t in thickness: <NEW_LINE> <INDENT> if t > 0: <NEW_LINE> <INDENT> m_vs += [vs[ib]] <NEW_LINE> m_t += [t] <NEW_LINE> <DEDENT> ib += 1 <NEW_LINE> <DEDENT> m_vs += [vs[ib]] <NEW_LINE> return m_t, m_vs
Crust 1.0 is given in a 1 degree x 1 degree grid (i.e. 360 lon points, 180 lat points). The downloads are structured as crust1.bnds (360 * 180) x 9 depths to top of each layer 0. water (i.e. topography) 1. ice (i.e. bathymetry) 2. upper sediments (i.e. depth to rock) 3. middle sediments 4. lower sediments 5. upper crust (i.e. depth to bedrock) 6. middle crust 7. lower crust 8. mantle (i.e. Moho depth) Note that for places where a layer doesn't exist, the difference between bnds[i, n] and bnds[i, n+1] = 0; i.e. for continents, the top of the ice is the same as the top of the water; where there are no glaciers, the top of sediments is the same as the top of the ice, etc. crust1.[rho|vp|vs] (360 * 180) x 9 values of density, Vp, Vs for each of the layers specified in bnds Each row in these datasets steps first in longitude (from -179.5 to +179.5) then in latitude (from 89.5 to -89.5). i.e. index of (lat, lon) will be at (lon + 179.5) + (89.5 - lat) * 360
625941b8baa26c4b54cb0f78
def bulk_lookup(datasets, cell, method='poisson'): <NEW_LINE> <INDENT> comp_func = None <NEW_LINE> if method == 'poisson': <NEW_LINE> <INDENT> comp_func = log_prob_poisson <NEW_LINE> <DEDENT> elif method == 'spearman' or method == 'rank_corr': <NEW_LINE> <INDENT> comp_func = rank_correlation <NEW_LINE> <DEDENT> elif method == 'cosine': <NEW_LINE> <INDENT> comp_func = cosine <NEW_LINE> <DEDENT> elif method == 'corr' or method == 'pearson': <NEW_LINE> <INDENT> comp_func = pearson_correlation <NEW_LINE> <DEDENT> scores = [] <NEW_LINE> for name, d in datasets.items(): <NEW_LINE> <INDENT> s = comp_func(d, cell) <NEW_LINE> scores.append((name, s)) <NEW_LINE> <DEDENT> scores.sort(key=lambda x: x[1], reverse=True) <NEW_LINE> return scores
Returns a list of (dataset, value) pairs sorted by descending value, where value indicates similarity between the cell and the dataset. Potential metrics: - corr/pearson - rank_corr/spearman - cosine (normalized cosine distance) - poisson (log-probability) Test NMI results on 10x_400, all genes: Poisson: 1.0 Spearman: 0.97 Cosine: 0.85 Pearson: 0.85 Args: bulk_datasets (dict): dict of (name, 1d np array) cell (array): 1d array Returns: list of (bulk_name, similarity_value) sorted in descending similarity
625941b8796e427e537b0417
def post(): <NEW_LINE> <INDENT> return db.test_post.insert(**dict (request.vars))
Test for JSON POST #curl -i -X POST http://127.0.0.1:8000/sahana/test/post -H "Content-Type: application/json" -d {"name": "John"} #curl -i -X POST http://127.0.0.1:8000/sahana/test/post -H "Content-Type: application/json" -d @test.json Web2Py forms are multipart/form-data POST forms curl -i -X POST http://127.0.0.1:8000/sahana/test/post -F name=john curl -i -X POST --data-urlencode "name=Colombo Shelter" http://127.0.0.1:8000/sahana/test/post
625941b824f1403a926009bf
def randomcase(payload, **kwargs): <NEW_LINE> <INDENT> retVal = payload <NEW_LINE> if payload: <NEW_LINE> <INDENT> for match in re.finditer(r"[A-Za-z_]+", retVal): <NEW_LINE> <INDENT> word = match.group() <NEW_LINE> if word.upper() in kb.keywords: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> _ = "" <NEW_LINE> for i in xrange(len(word)): <NEW_LINE> <INDENT> _ += word[i].upper() if randomRange(0, 1) else word[i].lower() <NEW_LINE> <DEDENT> if len(_) > 1 and _ not in (_.lower(), _.upper()): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> retVal = retVal.replace(word, _) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return retVal
Replaces each keyword character with random case value Tested against: * Microsoft SQL Server 2005 * MySQL 4, 5.0 and 5.5 * Oracle 10g * PostgreSQL 8.3, 8.4, 9.0 Notes: * Useful to bypass very weak and bespoke web application firewalls that has poorly written permissive regular expressions * This tamper script should work against all (?) databases >>> import random >>> random.seed(0) >>> tamper('INSERT') 'INseRt'
625941b8be7bc26dc91cd45a
def do_update(self, name, data): <NEW_LINE> <INDENT> self.emit("update", data)
Send a websocket update event to the client.
625941b80c0af96317bb803e
def show(self): <NEW_LINE> <INDENT> sHtml = '<div class="tmvcstimeline tmvcstimelinetooltip">\n'; <NEW_LINE> oCurDate = None; <NEW_LINE> for oEntry in self.aoEntries: <NEW_LINE> <INDENT> oTsZulu = db.dbTimestampToZuluDatetime(oEntry.tsCreated); <NEW_LINE> if oCurDate is None or oCurDate != oTsZulu.date(): <NEW_LINE> <INDENT> if oCurDate is not None: <NEW_LINE> <INDENT> sHtml += ' </dl>\n' <NEW_LINE> <DEDENT> oCurDate = oTsZulu.date(); <NEW_LINE> sHtml += ' <h2>%s:</h2>\n' ' <dl>\n' % (oTsZulu.strftime('%Y-%m-%d'),); <NEW_LINE> <DEDENT> sEntry = ' <dt id="r%s">' % (oEntry.iRevision, ); <NEW_LINE> sEntry += '<a href="%s">' % ( webutils.escapeAttr(config.g_ksTracChangsetUrlFmt % { 'iRevision': oEntry.iRevision, 'sRepository': oEntry.sRepository,}), ); <NEW_LINE> sEntry += '<span class="tmvcstimeline-time">%s</span>' % ( oTsZulu.strftime('%H:%MZ'), ); <NEW_LINE> sEntry += ' Changeset <span class="tmvcstimeline-rev">[%s]</span>' % ( oEntry.iRevision, ); <NEW_LINE> sEntry += ' by <span class="tmvcstimeline-author">%s</span>' % ( webutils.escapeElem(oEntry.sAuthor), ); <NEW_LINE> sEntry += '</a>\n'; <NEW_LINE> sEntry += '</dt>\n'; <NEW_LINE> sEntry += ' <dd>%s</dd>\n' % ( webutils.escapeElem(oEntry.sMessage), ); <NEW_LINE> sHtml += sEntry; <NEW_LINE> <DEDENT> if oCurDate is not None: <NEW_LINE> <INDENT> sHtml += ' </dl>\n'; <NEW_LINE> <DEDENT> sHtml += '</div>\n'; <NEW_LINE> return ('VCS History Tooltip', sHtml);
Generates the tooltip. Returns (sTitle, HTML).
625941b8e8904600ed9f1d7e
def nameserver_check_scheduler(heartbeat_obj): <NEW_LINE> <INDENT> sched = BackgroundScheduler() <NEW_LINE> sched.start() <NEW_LINE> sched.add_job(heartbeat_obj.nameserver_check, 'cron', second=("*/%s" % int(heartbeat_obj.configuration['heartbeat']['default']['interval']))) <NEW_LINE> retries_check = int(heartbeat_obj.configuration['heartbeat']['default']['init_retries']) <NEW_LINE> retry_wait = int(10) <NEW_LINE> while(retries_check != 0): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> heartbeat_obj.nameservers.next() <NEW_LINE> <DEDENT> except StopIteration: <NEW_LINE> <INDENT> pretty_log("Heartbeat scheduler not initialized yet... Will retry %s times..." % retries_check) <NEW_LINE> pretty_log("Will retry in %s seconds" % retry_wait) <NEW_LINE> retries_check -= 1 <NEW_LINE> sleep(retry_wait) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pretty_log("Heartbeat scheduler initalized...") <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> pretty_log("Heartbeat scheduler error!") <NEW_LINE> return False
Schedule the check using the heartbeat object
625941b8293b9510aa2c30ee
def start_interpetration_reforms(self): <NEW_LINE> <INDENT> self.__reforms__ = [] <NEW_LINE> reform_apply_fun_found = False <NEW_LINE> current_reform_index = -1 <NEW_LINE> current_reform = None <NEW_LINE> reform_found = False <NEW_LINE> with open(self.__reforms_file_path__,'r') as content_variable: <NEW_LINE> <INDENT> for line in content_variable.readlines(): <NEW_LINE> <INDENT> line = line.strip() <NEW_LINE> if '#' in line: <NEW_LINE> <INDENT> line = line[:line.find('#')] <NEW_LINE> <DEDENT> if line: <NEW_LINE> <INDENT> if reform_apply_fun_found: <NEW_LINE> <INDENT> if 'class' in line and '(Reform):' in line: <NEW_LINE> <INDENT> reform_apply_fun_found = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dict_action = {} <NEW_LINE> if 'modify_parameters' in line: <NEW_LINE> <INDENT> line = line.split('=')[1] <NEW_LINE> line = (line[:line.find(')')].strip()).replace("self.","") <NEW_LINE> dict_action[line] = "modify_parameters" <NEW_LINE> current_reform.append_reform_action(dict_action) <NEW_LINE> <DEDENT> elif 'update_variable' in line: <NEW_LINE> <INDENT> line = (line [(line.find('(') + 1):line.find(')')].strip()).replace("self.","") <NEW_LINE> line = line.replace("\'", "") <NEW_LINE> dict_action[line] = "update_variable" <NEW_LINE> current_reform.append_reform_action(dict_action) <NEW_LINE> <DEDENT> elif 'add_variable' in line: <NEW_LINE> <INDENT> line = (line[(line.find('(') + 1):line.find(')')].strip()).replace("self.","") <NEW_LINE> line = line.replace("\'", "") <NEW_LINE> dict_action[line] = "add_variable" <NEW_LINE> current_reform.append_reform_action(dict_action) <NEW_LINE> <DEDENT> elif 'neutralize_variable' in line: <NEW_LINE> <INDENT> line = (line[(line.find('(\'') + 2):line.find('\')')].strip()).replace("self.","") <NEW_LINE> line = line.replace("\'", "") <NEW_LINE> dict_action[line] = "neutralize_variable" <NEW_LINE> current_reform.append_reform_action(dict_action) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> pieces = line.split('=') <NEW_LINE> if 'class' in pieces[0] and '(Reform):' in pieces[0]: <NEW_LINE> <INDENT> reform_found = True <NEW_LINE> reform_apply_fun_found = False <NEW_LINE> current_reform_index = current_reform_index + 1 <NEW_LINE> reform_name = pieces[0] <NEW_LINE> for chs in ['class','(Reform):']: <NEW_LINE> <INDENT> reform_name = reform_name.replace(chs,'') <NEW_LINE> <DEDENT> current_reform = Reform_for_writing(reform_name = reform_name) <NEW_LINE> current_reform.set_reform_actions(reform_actions=[]) <NEW_LINE> self.__reforms__.append(current_reform) <NEW_LINE> <DEDENT> elif 'name ' in pieces[0] and reform_found == True: <NEW_LINE> <INDENT> full_name = pieces[1] <NEW_LINE> for chs in ['u"','\"']: <NEW_LINE> <INDENT> full_name = full_name.replace(chs,'') <NEW_LINE> <DEDENT> current_reform.set_reform_full_name(full_name) <NEW_LINE> <DEDENT> elif 'def apply(self):' in pieces[0]: <NEW_LINE> <INDENT> reform_found = False <NEW_LINE> reform_apply_fun_found = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> self.__find_and_bind_variables__() <NEW_LINE> self.__find_and_bind_modifier_func__()
Start interpretation reform
625941b8442bda511e8be27b
def adapt(self, data, reset_state=True): <NEW_LINE> <INDENT> if not reset_state: <NEW_LINE> <INDENT> raise ValueError("CategoricalEncoding does not support streaming adapts.") <NEW_LINE> <DEDENT> if self._called and self._max_tokens is None: <NEW_LINE> <INDENT> raise RuntimeError( "CategoricalEncoding can't be adapted after being called " "if max_tokens is None.") <NEW_LINE> <DEDENT> super(CategoricalEncoding, self).adapt(data, reset_state)
Fits the state of the preprocessing layer to the dataset. Overrides the default adapt method to apply relevant preprocessing to the inputs before passing to the combiner. Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of the layer at the start of the call to `adapt`. This must be True for this layer, which does not support repeated calls to `adapt`. Raises: RuntimeError: if the layer cannot be adapted at this time.
625941b856ac1b37e6264035
def is_prime(n): <NEW_LINE> <INDENT> for i in range(2, int(math.sqrt(n)) + 1): <NEW_LINE> <INDENT> if n%i == 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
Returns True if a positive integer n is prime and False otherwise n: A positive integer
625941b821a7993f00bc7b3f
def blpop(self, keys, timeout=0, **options): <NEW_LINE> <INDENT> keys = native_str(keys) <NEW_LINE> if isinstance(keys, str): <NEW_LINE> <INDENT> keys = [keys] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> keys = list(keys) <NEW_LINE> <DEDENT> keys.append(timeout) <NEW_LINE> return self.execute_command('BLPOP', *keys, **options)
LPOP a value off of the first non-empty list named in the ``keys`` list. If none of the lists in ``keys`` has a value to LPOP, then block for ``timeout`` seconds, or until a value gets pushed on to one of the lists. If timeout is 0, then block indefinitely.
625941b823e79379d52ee3bd
def htmlentities_decode(self, string): <NEW_LINE> <INDENT> return htmlentities_decode(string)
decodes htmlentities
625941b87cff6e4e811177db
def main(argv): <NEW_LINE> <INDENT> data = load_data("car_sales.json") <NEW_LINE> summary = process_data(data) <NEW_LINE> new_summary = '\n'.join(summary) <NEW_LINE> print(summary) <NEW_LINE> report('/tmp/cars.pdf', "Cars report", new_summary, cars_dict_to_table(data)) <NEW_LINE> msg = email_generate("[email protected]", "[email protected]", "Sales summary for last month", new_summary, "/tmp/cars.pdf") <NEW_LINE> email_send(msg)
Process the JSON data and generate a full report out of it.
625941b8a8ecb033257d2f2b
def _sanitize_key(key) -> str: <NEW_LINE> <INDENT> return str(key).lower().replace(" ", "_")
Sanitize the location or group key to look up Args: key: The key to sanitize
625941b80c0af96317bb803f
def update(self): <NEW_LINE> <INDENT> if self.__health_points <=0: <NEW_LINE> <INDENT> self.__dead = True <NEW_LINE> <DEDENT> if self.__finished: <NEW_LINE> <INDENT> self.rect.center= (1500,0) <NEW_LINE> <DEDENT> if self.__counter % self.__counter_list1[self.__current_monster-1] == 0 and not self.__dead: <NEW_LINE> <INDENT> if self.__current_monster != 4: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.__index += 1 <NEW_LINE> self.image = self.__moving[self.__index] <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> self.__index = 0 <NEW_LINE> <DEDENT> if self.rect.left > 0 and self.__dx <0: <NEW_LINE> <INDENT> self.rect.left += 3 *self.__dx <NEW_LINE> <DEDENT> elif self.rect.right < self.__screen.get_width() and self.__dx >0: <NEW_LINE> <INDENT> self.rect.left += 3 * self.__dx <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.__index += 1 <NEW_LINE> if self.__going_left: <NEW_LINE> <INDENT> self.image = self.__attacking_left4[self.__index] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.image = self.__attacking_right4[self.__index] <NEW_LINE> <DEDENT> <DEDENT> except IndexError: <NEW_LINE> <INDENT> self.__index = 0 <NEW_LINE> <DEDENT> if ((self.rect.left > 0) and (self.__dx < 0)) or ((self.rect.right < self.__screen.get_width()) and (self.__dx > 0)): <NEW_LINE> <INDENT> self.rect.left += 3* self.__dx <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.__dx = self.__dx * -1 <NEW_LINE> self.__going_left = not self.__going_left <NEW_LINE> <DEDENT> if ((self.rect.top > 0) and (self.__dy > 0)) or ((self.rect.bottom < self.__screen.get_height() - 70) and (self.__dy < 0)): <NEW_LINE> <INDENT> self.rect.top -= 3*self.__dy <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.__dy = -self.__dy <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self.__counter %self.__counter_list2[self.__current_monster-1] == 0 and self.__dead: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.__dying_index += 1 <NEW_LINE> if self.__current_monster !=4: <NEW_LINE> <INDENT> self.image = self.__dying[self.__dying_index] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.__going_left: <NEW_LINE> <INDENT> self.image = self.__dying_left4[self.__dying_index] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.image = self.__dying_right4[self.__dying_index] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except IndexError: <NEW_LINE> <INDENT> self.__finished = True <NEW_LINE> self.__dead = False <NEW_LINE> <DEDENT> <DEDENT> if self.__map_moving: <NEW_LINE> <INDENT> self.rect.centerx += self.__move <NEW_LINE> self.__map_moving = False <NEW_LINE> <DEDENT> if not self.__finished: <NEW_LINE> <INDENT> self.__position = self.rect.center <NEW_LINE> <DEDENT> self.__counter += 1
This method will be responsible for repositioning the image on the screen as well as iterating through lists of images according to different Boolean variables, ultimately, causing the animations to occur. It will also check if the monster has reached the end of the screen. If the monster died, it will move it outside of the screen where it will not be hit by the player
625941b83c8af77a43ae35f4
def __init__(self, algorithm, length, value, format_type=enums.KeyFormatType.X_509, masks=None, name='Public Key', key_wrapping_data=None): <NEW_LINE> <INDENT> super(PublicKey, self).__init__( key_wrapping_data=key_wrapping_data ) <NEW_LINE> self._object_type = enums.ObjectType.PUBLIC_KEY <NEW_LINE> self._valid_formats = [ enums.KeyFormatType.RAW, enums.KeyFormatType.X_509, enums.KeyFormatType.PKCS_1] <NEW_LINE> self.value = value <NEW_LINE> self.cryptographic_algorithm = algorithm <NEW_LINE> self.cryptographic_length = length <NEW_LINE> self.key_format_type = format_type <NEW_LINE> self.names = [name] <NEW_LINE> if masks: <NEW_LINE> <INDENT> self.cryptographic_usage_masks = masks <NEW_LINE> <DEDENT> self._cryptographic_domain_parameters = list() <NEW_LINE> self.validate()
Create a PublicKey. Args: algorithm(CryptographicAlgorithm): An enumeration identifying the type of algorithm for the key. length(int): The length in bits of the key. value(bytes): The bytes representing the key. format_type(KeyFormatType): An enumeration defining the format of the key value. Optional, defaults to enums.KeyFormatType.X_509. masks(list): A list of CryptographicUsageMask enumerations defining how the key will be used. Optional, defaults to None. name(string): The string name of the key. Optional, defaults to 'Public Key'. key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None.
625941b85166f23b2e1a4faf
def _test_path(self, path, expected_posix_path, expected_windows_path, relative_to_posix=None, relative_to_windows=None): <NEW_LINE> <INDENT> self.assertEqual( normalize_platform_path(path, relative_to=relative_to_posix, target_platform=PathPlatform.POSIX), expected_posix_path) <NEW_LINE> self.assertEqual( normalize_platform_path(path, relative_to=relative_to_windows, target_platform=PathPlatform.WINDOWS), expected_windows_path)
Test path normalization against Windows and POSIX paths. This will test that a path normalizes correctly on both Windows and Linux, with or without relative paths. Args: path (unicode): The path to normalize. expected_posix_path (unicode): The expected resulting POSIX path. expected_windows_path (unicode): The expected resulting Windows path. relative_to_posix (unicode, optional): An optional path to prepend to the normalized POSIX path. relative_to_windows (unicode, optional): An optional path to prepend to the normalized Windows path. Raises: AssertionError: A normalized path did not equal the expected result.
625941b8e5267d203edcdaf6
def p_for(p): <NEW_LINE> <INDENT> p[0] = AST.ForNode([AST.IdNumNode(p[3]), p[5], p[7], p[9]])
instruction : FOR EXPR_START IDENTIFIANT FOR_SEP expression FOR_SEP expression EXPR_END bloc
625941b8009cb60464c63212
def reset_logging(): <NEW_LINE> <INDENT> for handler in logging.getLogger().handlers: <NEW_LINE> <INDENT> if getattr(handler, '_debug_handler', False): <NEW_LINE> <INDENT> handler.stream.close() <NEW_LINE> <DEDENT> <DEDENT> logging.getLogger().handlers = []
Reset logging config
625941b821bff66bcd6847ab
def __str__(self): <NEW_LINE> <INDENT> return self.nama_bahan
String for representing the MyModelName object (in Admin site etc.)
625941b8cc40096d615957a9
def test_link_self(self): <NEW_LINE> <INDENT> self.component.repo = "weblate://test/test" <NEW_LINE> self.component.push = "" <NEW_LINE> self.assertRaisesMessage( ValidationError, "Invalid link to a Weblate project, cannot link it to itself!", self.component.full_clean, )
Link pointing to self.
625941b87b180e01f3dc465b
def draw_maze_obstacle(x,y): <NEW_LINE> <INDENT> draw_obstacle_line_x_pos(x,y) <NEW_LINE> draw_obstacle_line_x_neg(x,y) <NEW_LINE> draw_obstacle_line_y_neg(x,y) <NEW_LINE> draw_obstacle_line_y_pos(x,y)
This function will be draw
625941b8e5267d203edcdaf7
def GetStore(location=None, only_files=()): <NEW_LINE> <INDENT> global _pid_store <NEW_LINE> if not _pid_store: <NEW_LINE> <INDENT> _pid_store = PidStore() <NEW_LINE> if not location: <NEW_LINE> <INDENT> location = PidStoreLocation.location <NEW_LINE> <DEDENT> pid_files = [] <NEW_LINE> for file_name in os.listdir(location): <NEW_LINE> <INDENT> if not file_name.endswith('.proto'): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if only_files and file_name not in only_files: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> pid_files.append(os.path.join(location, file_name)) <NEW_LINE> <DEDENT> _pid_store.Load(pid_files) <NEW_LINE> <DEDENT> REQUIRED_PIDS = [ 'DEVICE_INFO', 'QUEUED_MESSAGE', 'SUPPORTED_PARAMETERS' ] <NEW_LINE> for pid in REQUIRED_PIDS: <NEW_LINE> <INDENT> if not _pid_store.GetName(pid): <NEW_LINE> <INDENT> raise MissingPLASAPIDs( 'Could not find %s in PID datastore, check the directory contains ' 'the ESTA (PLASA) PIDs.' % pid) <NEW_LINE> <DEDENT> <DEDENT> return _pid_store
Get the instance of the PIDStore. Args: location: The location to load the store from. If not specified it uses the location defined in PidStoreLocation.py only_files: Load a subset of the files in the location. Returns: An instance of PidStore.
625941b891af0d3eaac9b86a
def test_rebuild(self): <NEW_LINE> <INDENT> instance = jsonutils.to_primitive(self._create_fake_instance()) <NEW_LINE> instance_uuid = instance['uuid'] <NEW_LINE> image_ref = instance['image_ref'] <NEW_LINE> self.compute.run_instance(self.context, instance_uuid) <NEW_LINE> self.compute.rebuild_instance(self.context, image_ref, image_ref, instance=instance) <NEW_LINE> self.compute.terminate_instance(self.context, instance=instance)
Ensure instance can be rebuilt
625941b80a366e3fb873e66d
def RemoveScheduledComponent(self, comp): <NEW_LINE> <INDENT> self._scheduled_instances.remove(comp)
Remove a component that was scheduled.
625941b8283ffb24f3c55762
def reset_window(self): <NEW_LINE> <INDENT> self.ui.graphicsView_plotView.reset_plots()
reset the window to the initial state, such that no plot is made on the canvas :return:
625941b894891a1f4081b8fe
def choose_best_feature(dataset): <NEW_LINE> <INDENT> if not dataset: <NEW_LINE> <INDENT> raise ValueError("dataset is empty.") <NEW_LINE> <DEDENT> num_feature = len(dataset[0]) - 1 <NEW_LINE> base_entropy = calc_shannon_ent(dataset) <NEW_LINE> best_info_gain, best_feature = 0.0, -1 <NEW_LINE> for i in range(num_feature): <NEW_LINE> <INDENT> ret = split_dataset(dataset, i) <NEW_LINE> new_entropy = 0.0 <NEW_LINE> for feat, items in ret.items(): <NEW_LINE> <INDENT> prob = len(items) / float(len(dataset)) <NEW_LINE> new_entropy += prob * calc_shannon_ent(items) <NEW_LINE> <DEDENT> info_gain = base_entropy - new_entropy <NEW_LINE> if info_gain > best_info_gain: <NEW_LINE> <INDENT> best_info_gain = info_gain <NEW_LINE> best_feature = i <NEW_LINE> <DEDENT> <DEDENT> return best_feature
选择信息增益最大的feature :param dataset: 数据集 :return: 信息增益最大的feature的id
625941b8187af65679ca4f73
@celery_app.task(name='send_sms_code') <NEW_LINE> def send_sms_code(mobile, sms_code): <NEW_LINE> <INDENT> CPP().send_sms_code(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES], 1)
定义发短信异步任务 :param mobile: 手机号 :param sms_code: 短信验证码 :return: None
625941b899cbb53fe6792a3d
def get_filepath(self, len_thresh): <NEW_LINE> <INDENT> if len(self.url_path.rsplit('/')) >= URL_PATH_DEPTH_THRESH: <NEW_LINE> <INDENT> filepath = '/'.join(self.url_path.rsplit('/')[-1*URL_PATH_DEPTH_THRESH:-1]) <NEW_LINE> if len(filepath) > len_thresh: <NEW_LINE> <INDENT> return filepath <NEW_LINE> <DEDENT> <DEDENT> return ""
获取文件路径 :param len_thresh: :return:
625941b866656f66f7cbc000
def test_2_Reward_shopping_order_refund(self): <NEW_LINE> <INDENT> logging.debug("test_2_return_sameCodeAndBar_order") <NEW_LINE> market_service.rewards_order_refund(globals()['shopping_order_id'] ) <NEW_LINE> self._test_data.update_post_verify_data() <NEW_LINE> self.expectedData(0 ,121 , 2 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ) <NEW_LINE> self._data_assertion()
后台退积分商品 ‘hmr组合商品’ 1件 :return:
625941b88e7ae83300e4ae22
def test_basic_file_mapping(): <NEW_LINE> <INDENT> assert_raises(ValueError, get_file_paths, subject=tconf.subject, data_type='sushi', output='raw', run_index=0, hcp_path=tconf.hcp_path) <NEW_LINE> assert_raises(ValueError, get_file_paths, subject=tconf.subject, data_type='rest', output='kimchi', run_index=0, hcp_path=tconf.hcp_path) <NEW_LINE> for run_index in range(3): <NEW_LINE> <INDENT> for output in tconf.hcp_outputs: <NEW_LINE> <INDENT> for data_type in tconf.hcp_data_types: <NEW_LINE> <INDENT> if run_index >= len(run_map[data_type]): <NEW_LINE> <INDENT> assert_raises( ValueError, get_file_paths, subject=tconf.subject, data_type=data_type, output=output, run_index=run_index, hcp_path=tconf.hcp_path) <NEW_LINE> <DEDENT> elif (data_type in ('rest', 'noise_subject', 'noise_empty_room') and output in ('trial_info', 'evoked')): <NEW_LINE> <INDENT> assert_raises( ValueError, get_file_paths, subject=tconf.subject, data_type=data_type, output=output, run_index=run_index, hcp_path=tconf.hcp_path) <NEW_LINE> <DEDENT> elif (data_type in ('noise_subject', 'noise_empty_room') and output in ('epochs', 'evoked', 'ica', 'annot')): <NEW_LINE> <INDENT> assert_raises( ValueError, get_file_paths, subject=tconf.subject, data_type=data_type, output=output, run_index=run_index, hcp_path=tconf.hcp_path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> file_names = get_file_paths( subject=tconf.subject, data_type=data_type, output=output, run_index=run_index, hcp_path=tconf.hcp_path) <NEW_LINE> if output == 'raw': <NEW_LINE> <INDENT> assert_equal( sum('config' in fn for fn in file_names), 1) <NEW_LINE> assert_equal( sum('c,rfDC' in fn for fn in file_names), 1)
Test construction of file paths and names
625941b8d58c6744b4257ab7
def __init__(self, token=None): <NEW_LINE> <INDENT> self._token = None <NEW_LINE> self.discriminator = None <NEW_LINE> if token is not None: <NEW_LINE> <INDENT> self.token = token
BTDeviceTokenParams - a model defined in OpenAPI
625941b885dfad0860c3acaf
def WaitForEvent(self, tab, selector, event_name, timeout): <NEW_LINE> <INDENT> util.WaitFor(lambda: self.HasEventCompleted(tab, selector, event_name), timeout=timeout)
Halts media action until the selector's event is fired. Args: tab: The tab to check for event on. selector: Media element selector. event_name: Name of the event to check if fired or not. timeout: Timeout to check for event, throws an exception if not fired.
625941b8a17c0f6771cbdeaa
def dist (a, b=Atoms('X', positions=[(0,0,0)])): <NEW_LINE> <INDENT> return ((a.x-b.x) ** 2 + (a.y-b.y) ** 2 + (a.z-b.z) ** 2) ** 0.5
this function calculates the distance that the atom a is from atom b. Default for atom b is X species at origin
625941b8be383301e01b52e3
def addResiduesToSeqToStructMap(self, chainType, chainId, seqStr, resIds): <NEW_LINE> <INDENT> assert len(seqStr)== len(resIds), "error, there must be the same number of residues as amino acids are in sequence" <NEW_LINE> for key in sorted(self.seqToStruct): <NEW_LINE> <INDENT> if key[:2]== (chainType, chainId): <NEW_LINE> <INDENT> del self.seqToStruct[key] <NEW_LINE> <DEDENT> <DEDENT> listOfResIdsSeqIds=[] <NEW_LINE> for i, resId in enumerate(resIds): <NEW_LINE> <INDENT> key_seq2Struct= (chainType, chainId, i) <NEW_LINE> flag=" " <NEW_LINE> if resId=="-" : continue <NEW_LINE> if not resId[-1].isdigit(): <NEW_LINE> <INDENT> flag= resId[-1] <NEW_LINE> resId= resId[:-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> resId= int(resId) <NEW_LINE> <DEDENT> tuple_resId= (" ",resId,flag) <NEW_LINE> self.seqToStruct[key_seq2Struct]= tuple_resId <NEW_LINE> listOfResIdsSeqIds.append( (i, seqStr[i], tuple_resId ) ) <NEW_LINE> key_struct2Seq= (chainType, chainId, tuple_resId ) <NEW_LINE> if key_struct2Seq in self.structToSeq: <NEW_LINE> <INDENT> self.structToSeq[key_struct2Seq]= i <NEW_LINE> <DEDENT> <DEDENT> outNameSeqStructMap, prefixAndChainType= self.seqToStructFnames[(chainType, chainId)] <NEW_LINE> __, outNameFasta= self.seqsDict[chainType][chainId] <NEW_LINE> self.seqsDict[chainType][chainId]= (seqStr, outNameFasta) <NEW_LINE> self.saveFasta(seqStr, outNameFasta) <NEW_LINE> self.saveSeqStructMap( listOfResIdsSeqIds, outNameSeqStructMap)
Given an already mapped seq to struct object, modify one chain to potentially add new residues Needed if 3dcons is used as it generally report all residues in sequence and not just the included in pdb
625941b8ac7a0e7691ed3f30
def check_fleet_edges(ai_settings, aliens): <NEW_LINE> <INDENT> for alien in aliens.sprites(): <NEW_LINE> <INDENT> if alien.check_edges(): <NEW_LINE> <INDENT> change_fleet_direction(ai_settings, aliens) <NEW_LINE> break
Responde apropriadamente se algun alienúgena alcançar a borda da tela
625941b89f2886367277a6e7
def __init__(self, gen_pref, gen_feat, dim, n_cols, solver): <NEW_LINE> <INDENT> self.gen_pref = gen_pref <NEW_LINE> self.gen_feat = gen_feat <NEW_LINE> self.n_cols = n_cols <NEW_LINE> self.dim = dim <NEW_LINE> self.solver = solver <NEW_LINE> self.pref_list = self.gen_pref.get_all_prefs() <NEW_LINE> self.n_rows = len(self.pref_list) <NEW_LINE> self.col_list = [] <NEW_LINE> self.col_set = set() <NEW_LINE> self.M = np.zeros((self.n_rows, self.n_cols)) <NEW_LINE> self.Q = None
Initializes all the useful structures. :param gen_pref: the preference generator. See <:genP.GenMacroP> and <:genP.GenMicroP> :param gen_feat: the feature generator :param dim: number of possible labels :param n_cols: number of columns of the matrix sub-game :param solver: game solver. See for example <:solvers.FictitiousPlay> :type gen_pref: object of class which inherits from <:genP.GenP>, e.g., GenMacroP :type gen_feat: object of class which inherits from <:genF.GenF>, e.g., GenHPolyF :type dim: int :type n_cols: int :type solver: object of class which inherits from <:solvers.Solver>
625941b8c4546d3d9de72887
def drop(n, L): <NEW_LINE> <INDENT> if n == 0: <NEW_LINE> <INDENT> return L <NEW_LINE> <DEDENT> if L == []: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> return drop(n-1, L[1:])
Returns the list L[n:].
625941b83eb6a72ae02ec331
def copy_statistics_csv_file(self): <NEW_LINE> <INDENT> webpath = os.path.join(self.config['AUTOCMS_WEBDIR'], self.testname) <NEW_LINE> src_stats = os.path.join(self.config['AUTOCMS_BASEDIR'], self.testname, 'statistics.csv') <NEW_LINE> dst_stats = os.path.join(webpath, 'statistics.csv') <NEW_LINE> shutil.copyfile(src_stats, dst_stats)
Copy the statistics file to the webdir.
625941b83d592f4c4ed1ced6
def get_select_query_results(connection, query, parameters=None): <NEW_LINE> <INDENT> cursor = connection.cursor() <NEW_LINE> if parameters is not None: <NEW_LINE> <INDENT> cursor.execute(query, parameters) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cursor.execute(query) <NEW_LINE> <DEDENT> return cursor
Executes the specified query with the specified tuple of parameters. Returns a cursor for the query results. Raises an exception if the query fails for any reason.
625941b8d8ef3951e3243394
def onEnemyHit(self, laser): <NEW_LINE> <INDENT> self.lasers.remove(laser) <NEW_LINE> self.score += Level.SCORE_PER_HIT <NEW_LINE> if self.score >= Level.SCORE_TO_WIN: <NEW_LINE> <INDENT> self.endTheGame()
Perform on Enemy Hit
625941b8ec188e330fd5a5fd
def make_copies(service, draft_id, n): <NEW_LINE> <INDENT> draft_response = service.users().drafts().get(userId="me", id=draft_id, format="raw").execute() <NEW_LINE> raw_response = {'raw': draft_response["message"]["raw"]} <NEW_LINE> message = {'message': raw_response} <NEW_LINE> try: <NEW_LINE> <INDENT> for x in range(int(n)): <NEW_LINE> <INDENT> draft = service.users().drafts().create(userId="me", body=message).execute() <NEW_LINE> print("draft number "+str(x+1)+" created") <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> print(err) <NEW_LINE> return False
make copies of the draft :param service: authenticated gmail service :param draft_id: GMail draft ID :param n: number of copies :return: True if successful, False otherwise
625941b8cb5e8a47e48b7906
def list_sheets(self, spreadsheet_id: str) -> List[str]: <NEW_LINE> <INDENT> with self.build_sheets_api() as sheets_api: <NEW_LINE> <INDENT> spreadsheet_data = ( sheets_api.spreadsheets() .get( spreadsheetId=spreadsheet_id, fields='sheets.properties.title,sheets.properties.sheetType', ) .execute(**self._google_client_request_kwargs()) ) <NEW_LINE> <DEDENT> return [ sheet['properties']['title'] for sheet in spreadsheet_data['sheets'] if sheet['properties']['sheetType'] == 'GRID' ]
List available sheets
625941b838b623060ff0ac45
def test_callback(self): <NEW_LINE> <INDENT> callback ='jsonp123' <NEW_LINE> response = self.call(self.url, {'appId':self.app.get_token(), 'callback': callback}) <NEW_LINE> self.assert_(self._assertJson(json_string=response.content, status=200, message="", expected_data = ['users'], callback = callback ))
Test that the users for a valid app are retrieved in callbacks
625941b855399d3f0558850a
def reduce(self, ratings): <NEW_LINE> <INDENT> ratings_copy = ratings[::] <NEW_LINE> for index, item in enumerate(ratings_copy): <NEW_LINE> <INDENT> restaurant, score = ratings_copy[index] <NEW_LINE> if score is None: ratings.remove(item) <NEW_LINE> <DEDENT> return ratings
Remove ratings from heap that are None.
625941b89c8ee82313fbb5cb
def get_processing_value( param: QgsProcessingParameterDefinition, inp: WPSInput, context: ProcessingContext) -> Any: <NEW_LINE> <INDENT> typ = param.type() <NEW_LINE> if typ in ('fileDestination','folderDestination'): <NEW_LINE> <INDENT> value = basename(normpath(inp[0].data)) <NEW_LINE> if value != inp[0].data: <NEW_LINE> <INDENT> LOGGER.warning("Value for file or folder destination '%s' has been truncated from '%s' to '%s'", param.name(), inp[0].data, value ) <NEW_LINE> <DEDENT> if typ == 'fileDestination': <NEW_LINE> <INDENT> value = Path(value).with_suffix('.'+param.defaultFileExtension()).as_posix() <NEW_LINE> <DEDENT> <DEDENT> elif typ == 'file': <NEW_LINE> <INDENT> outputfile = (Path(context.workdir)/param.name()).with_suffix(param.extension()) <NEW_LINE> LOGGER.debug("Saving input data as %s", outputfile.as_posix()) <NEW_LINE> inp[0].download_ref(outputfile) <NEW_LINE> value = outputfile.name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = None <NEW_LINE> <DEDENT> return value
Return processing value from wps inputs Processes other inputs than layers
625941b8f9cc0f698b14045c
def _yank_particles(self, num_records): <NEW_LINE> <INDENT> particles_returned = 0 <NEW_LINE> if self._state is not None and StateKey.PARTICLES_RETURNED in self._state and self._state[StateKey.PARTICLES_RETURNED] > 0: <NEW_LINE> <INDENT> particles_returned = self._state[StateKey.PARTICLES_RETURNED] <NEW_LINE> <DEDENT> total_num_records = len(self._record_buffer) <NEW_LINE> num_records_remaining = total_num_records - particles_returned <NEW_LINE> if num_records_remaining < num_records: <NEW_LINE> <INDENT> num_to_fetch = num_records_remaining <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> num_to_fetch = num_records <NEW_LINE> <DEDENT> log.debug("Yanking %s records of %s requested", num_to_fetch, num_records) <NEW_LINE> return_list = [] <NEW_LINE> end_range = particles_returned + num_to_fetch <NEW_LINE> records_to_return = self._record_buffer[particles_returned:end_range] <NEW_LINE> if len(records_to_return) > 0: <NEW_LINE> <INDENT> self._state[StateKey.PARTICLES_RETURNED] = particles_returned+num_to_fetch <NEW_LINE> for item in records_to_return: <NEW_LINE> <INDENT> log.debug("Record to return: %s", item) <NEW_LINE> return_list.append(item) <NEW_LINE> <DEDENT> self._publish_sample(return_list) <NEW_LINE> log.trace("Sending parser state [%s] to driver", self._state) <NEW_LINE> file_ingested = False <NEW_LINE> if self.file_complete and total_num_records == self._state[StateKey.PARTICLES_RETURNED]: <NEW_LINE> <INDENT> file_ingested = True <NEW_LINE> <DEDENT> self._state_callback(self._state, file_ingested) <NEW_LINE> <DEDENT> return return_list
Get particles out of the buffer and publish them. Update the state of what has been published, too. @param num_records The number of particles to remove from the buffer @retval A list with num_records elements from the buffer. If num_records cannot be collected (perhaps due to an EOF), the list will have the elements it was able to collect.
625941b830bbd722463cbc19
def test_market_survey_ontime(self): <NEW_LINE> <INDENT> self.t1.engage_date = '2011-05-01' <NEW_LINE> self.t2.engage_date = '2011-05-01' <NEW_LINE> self.nl1.market_survey = True <NEW_LINE> self.nl1.market_survey_date = '2011-05-03' <NEW_LINE> self.nl2.market_survey = True <NEW_LINE> self.nl2.market_survey_date = '2011-06-15' <NEW_LINE> self.store.commit() <NEW_LINE> r = Report(trans_obj='acquisition') <NEW_LINE> r.buildReport() <NEW_LINE> self.assertEqual(r.market_survey_ontime, Dec('50.0'))
Test that the market survey on time percentage is correct
625941b8b830903b967e976d
def input_path(self, *args): <NEW_LINE> <INDENT> return os.path.join(self.sim_dir, self.get_name(), *args)
Given any arguments, relative to the simulation dir, return the absolute path.
625941b84c3428357757c181
def sample(self, A, c): <NEW_LINE> <INDENT> k = np.round(c - c/5.0) <NEW_LINE> greedy_mdl = GREEDY(A, k=k, num_bases=c) <NEW_LINE> greedy_mdl.factorize(compute_h=False, compute_err=False, niter=1) <NEW_LINE> return greedy_mdl.select
Arguments --------- A : c : Returns ------- s : selection of samples/indices
625941b80383005118ecf43b
def show_fom(self, show=True): <NEW_LINE> <INDENT> if show: <NEW_LINE> <INDENT> self._fom_widget.show() <NEW_LINE> fom = str(self._main_widget.fom) <NEW_LINE> self._fom_line.setText(fom) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._fom_widget.hide()
slot for fom calculable :param show: if it's true it show the fom widget and hide it if not :type show: bool
625941b83c8af77a43ae35f5
def cancel(self): <NEW_LINE> <INDENT> new_act = self.actvalidationstate_set.count() > 1 <NEW_LINE> if self.date <= date.today(): <NEW_LINE> <INDENT> if new_act: <NEW_LINE> <INDENT> self.set_state('ANNUL_NOUS', get_request().user) <NEW_LINE> <DEDENT> self.parent_event = None <NEW_LINE> self.save() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.delete()
Parent event is canceled completely, or partially, act upon it.
625941b8a4f1c619b28afe98
def push(self, item): <NEW_LINE> <INDENT> self.items.append(item)
添加一个新的元素item到栈顶
625941b807f4c71912b112dd
def test_list_projects_default_domain(self): <NEW_LINE> <INDENT> domain = unit.new_domain_ref() <NEW_LINE> self.resource_api.create_domain(domain['id'], domain) <NEW_LINE> project1 = unit.new_project_ref(domain_id=domain['id']) <NEW_LINE> self.resource_api.create_project(project1['id'], project1) <NEW_LINE> refs = self.resource_api.list_projects() <NEW_LINE> self.assertThat( refs, matchers.HasLength(len(default_fixtures.TENANTS) + 3)) <NEW_LINE> refs = self.tenant_controller.get_all_projects( self.make_request(is_admin=True)) <NEW_LINE> self.assertEqual(len(default_fixtures.TENANTS), len(refs['tenants'])) <NEW_LINE> for tenant in default_fixtures.TENANTS: <NEW_LINE> <INDENT> tenant_copy = tenant.copy() <NEW_LINE> tenant_copy.pop('domain_id') <NEW_LINE> tenant_copy.pop('parent_id') <NEW_LINE> tenant_copy.pop('is_domain') <NEW_LINE> self.assertIn(tenant_copy, refs['tenants'])
Test that list projects only returns those in the default domain.
625941b8d164cc6175782ba4
def quit(self): <NEW_LINE> <INDENT> self.session.quit()
Quit session
625941b857b8e32f524832f7
def p_modset_lcb_rcb(p): <NEW_LINE> <INDENT> p[0] = []
modset : LCB RCB
625941b8fbf16365ca6f6013
def __init__( self, export_name=None, member=None, ): <NEW_LINE> <INDENT> if export_name is not None: <NEW_LINE> <INDENT> self.export_name = export_name <NEW_LINE> <DEDENT> if member is not None: <NEW_LINE> <INDENT> self.member = member
Keyword args: export_name (str): The name of the export to create when applying the export policy to the directory. member (ReferenceWithType): Reference to the directory to which the export policy may be applied. The `id` or `name` parameter is required, but cannot be set together. If the `name` parameter is set, `resource_type` must also be set.
625941b838b623060ff0ac46
def save_parameters(self, filetype='text'): <NEW_LINE> <INDENT> if self.hasRider: <NEW_LINE> <INDENT> pathToData = os.path.split(os.path.split(self.directory)[0])[0] <NEW_LINE> pathToParDir = os.path.join(pathToData, 'riders', self.riderName, 'Parameters') <NEW_LINE> pathToCombDir = os.path.join(pathToParDir, 'Combined') <NEW_LINE> if not os.path.exists(pathToCombDir): <NEW_LINE> <INDENT> os.makedirs(pathToCombDir) <NEW_LINE> <DEDENT> fileName = self.riderName + self.bicycleName <NEW_LINE> psets = [x for x in self.riderPar.keys() if x != 'Measured'] <NEW_LINE> parameters = self.riderPar <NEW_LINE> print(('This bicycle has a rider, {0}, so the data will be ' + 'saved here: {1}').format(self.riderName, pathToParDir)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pathToParDir = os.path.join(self.directory, 'Parameters') <NEW_LINE> fileName = self.bicycleName <NEW_LINE> psets = [x for x in self.parameters.keys() if x != 'Measured'] <NEW_LINE> parameters = self.parameters <NEW_LINE> print(('This bicycle has no rider so the data will be ' + 'saved here: {0}').format(pathToParDir)) <NEW_LINE> <DEDENT> if filetype == 'text': <NEW_LINE> <INDENT> for pset in psets: <NEW_LINE> <INDENT> fileName = fileName + pset + '.txt' <NEW_LINE> pathToTxtFile = os.path.join(pathToParDir, fileName) <NEW_LINE> io.write_parameter_text_file(pathToTxtFile, parameters[pset]) <NEW_LINE> if self.hasRider: <NEW_LINE> <INDENT> pathToCombFile = os.path.join(pathToCombDir, fileName) <NEW_LINE> io.write_parameter_text_file(pathToCombFile, self.parameters[pset]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif filetype == 'matlab': <NEW_LINE> <INDENT> raise NotImplementedError("Doesn't work yet.") <NEW_LINE> <DEDENT> elif filetype == 'pickle': <NEW_LINE> <INDENT> raise NotImplementedError("Doesn't work yet.")
Saves all the parameter sets to file. Parameters ---------- filetype : string, optional - 'text' : a text file with parameters as `c = 0.10+/-0.01 ` - 'matlab' : matlab .mat file - 'pickle' : python pickled dictionary
625941b80a50d4780f666ce6
def setUp(self): <NEW_LINE> <INDENT> from hardest.binary_validator import BinaryValidator <NEW_LINE> self.instance = BinaryValidator()
Test setup.
625941b899cbb53fe6792a3e
def limit_offset_sql(self, low_mark, high_mark): <NEW_LINE> <INDENT> limit, offset = self._get_limit_offset_params(low_mark, high_mark) <NEW_LINE> return '%s%s' % ( (' LIMIT %d' % limit) if limit else '', (' OFFSET %d' % offset) if offset else '', )
Return LIMIT/OFFSET SQL clause.
625941b8d99f1b3c44c673ed
def rob(self, root): <NEW_LINE> <INDENT> def helper(root): <NEW_LINE> <INDENT> if not root: <NEW_LINE> <INDENT> return 0, 0 <NEW_LINE> <DEDENT> left = helper(root.left) <NEW_LINE> right = helper(root.right) <NEW_LINE> v1 = root.val + left[1] + right[1] <NEW_LINE> v2 = max(left) + max(right) <NEW_LINE> return v1, v2 <NEW_LINE> <DEDENT> return max(helper(root))
:type root: TreeNode :rtype: int
625941b86fece00bbac2d592
def assert_fetch_redirects(self, view_name, args=None, kwargs=None): <NEW_LINE> <INDENT> response = response_from_view(view_name, args=args, kwargs=kwargs) <NEW_LINE> self.assertRedirects(response, reverse('openstates:api-key-required'))
Assert view redirects to error page when api-key is missing and in debug mode.
625941b8bf627c535bc1302d
def updateData_nv(self, entries): <NEW_LINE> <INDENT> self.ax.cla() <NEW_LINE> self.plotNvisits() <NEW_LINE> nv = float(entries['Nvisits'].get()) <NEW_LINE> if nv > 0: <NEW_LINE> <INDENT> self.plotnvisits(nvisits=nv) <NEW_LINE> <DEDENT> self.ax.set_xlim(self.zmin, self.zmax) <NEW_LINE> self.canvas.draw()
Method to update the figure according to request made on entries zlim and filter allocation will be plotted here. Parameters --------------- entries: dict of tk.Entry
625941b88c0ade5d55d3e816
def sw_align(seqA, seqB, scorer=False, gap=-1): <NEW_LINE> <INDENT> seqA, seqB = _as_lists(seqA, seqB) <NEW_LINE> return malign.sw_align(seqA, seqB, scorer or _get_scorer(seqA, seqB), gap)
Carry out the traditional Smith-Waterman algorithm. Parameters ---------- seqA, seqB : {str, list, tuple} The input strings. These should be iterables, so you can use tuples, lists, or strings. scorer : dict (default=False) If set to c{False} a scorer will automatically be calculated, otherwise, the scorer needs to be passed as a dictionary that covers all segment matches between the input strings. gap : int (default=-1) The gap penalty. Notes ----- The Smith-Waterman algorithm (see :evobib:`Smith1981`) returns a local alignment between two sequences. A local alignment is an alignment of those subsequences of the input sequences that yields the highest score. Returns ------- alm : tuple A tuple consisting of prefix, alignment, and suffix of the first and the second sequence, and the alignment score. Examples -------- Align two sequences:: >>> seqA = 'fat cat' >>> seqB = 'catfat' >>> sw_align(seqA, seqB) (([], ['f', 'a', 't'], [' ', 'c', 'a', 't']), (['c', 'a', 't'], ['f', 'a', 't'], []), 3.0)
625941b82ae34c7f2600cf89
def forward(self, x): <NEW_LINE> <INDENT> return PlusConstantOp.apply(x, self.const)
Use `PlusConstantOp.apply` to call the defined custom operator.
625941b8dc8b845886cb538b
def num_deriv3(f, x, incr=0.001): <NEW_LINE> <INDENT> return (-f(x+3.*incr)+8.*f(x+2.*incr)-13.*f(x+incr)+13.*f(x-incr) - 8.*f(x-2.*incr)+f(x-3.*incr))/(8.*incr**3)
Return third derivative of f at x.
625941b8dc8b845886cb538c
def wls_fit_dki(design_matrix, data): <NEW_LINE> <INDENT> tol = 1e-6 <NEW_LINE> data = np.asarray(data) <NEW_LINE> data_flat = data.reshape((-1, data.shape[-1])) <NEW_LINE> dki_params = np.empty((len(data_flat), 27)) <NEW_LINE> min_diffusivity = tol / -design_matrix.min() <NEW_LINE> inv_design = np.linalg.pinv(design_matrix) <NEW_LINE> for vox in range(len(data_flat)): <NEW_LINE> <INDENT> dki_params[vox] = _wls_iter(design_matrix, inv_design, data_flat[vox], min_diffusivity) <NEW_LINE> <DEDENT> dki_params = dki_params.reshape((data.shape[:-1]) + (27,)) <NEW_LINE> return dki_params
Computes weighted linear least squares (WLS) fit to calculate the diffusion tensor and kurtosis tensor using a weighted linear regression diffusion kurtosis model [1]_. Parameters ---------- design_matrix : array (g, 22) Design matrix holding the covariants used to solve for the regression coefficients. data : array (N, g) Data or response variables holding the data. Note that the last dimension should contain the data. It makes no copies of data. min_signal : default = 1 All values below min_signal are repalced with min_signal. This is done in order to avoid taking log(0) durring the tensor fitting. Returns ------- dki_params : array (N, 27) All parameters estimated from the diffusion kurtosis model for all N voxels. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor References ---------- [1] Veraart, J., Sijbers, J., Sunaert, S., Leemans, A., Jeurissen, B., 2013. Weighted linear least squares estimation of diffusion MRI parameters: Strengths, limitations, and pitfalls. Magn Reson Med 81, 335-346.
625941b8d164cc6175782ba5
def __bool__(self): <NEW_LINE> <INDENT> return _math.VectorOfUInt64___bool__(self)
__bool__(self) -> bool
625941b8adb09d7d5db6c5ea
def get_env_type(env_name): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> env = gym.make(env_name) <NEW_LINE> del env <NEW_LINE> return 'gym' <NEW_LINE> print('{} is not a viable environment.'.format(env_name)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return 'rl'
Get the type of environment from the env_name string
625941b88da39b475bd64dce
def __getattr__(self,a,FloatOnly=False): <NEW_LINE> <INDENT> if a == 'comment': <NEW_LINE> <INDENT> return self.mergeComments() <NEW_LINE> <DEDENT> if a == 'props': <NEW_LINE> <INDENT> if self.geoms: <NEW_LINE> <INDENT> return self[0].props <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> <DEDENT> r = [] <NEW_LINE> for g in self.geoms: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> v = getattr(g,a) <NEW_LINE> if FloatOnly: <NEW_LINE> <INDENT> if type(v) is float: <NEW_LINE> <INDENT> r.append(v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> r.append('') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> r.append(v) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> r.append('') <NEW_LINE> <DEDENT> <DEDENT> return r
returns: parameter values collected from all geometries rtype: list (or string, if 'comment' was requested)
625941b850485f2cf553cbf0
def __init__(self, name='u_net', pretrained_weights=None, input_size=(64, 64, 1)): <NEW_LINE> <INDENT> super(UNet, self).__init__(name, input_size) <NEW_LINE> self.layer_dict = dict([]) <NEW_LINE> self.architecture = Model() <NEW_LINE> self.define() <NEW_LINE> if pretrained_weights is not None: <NEW_LINE> <INDENT> self.architecture.load_weights(pretrained_weights)
:param pretrained_weights: :param input_size: should be 64 multiple
625941b8a17c0f6771cbdeab
def test_known_issues(self): <NEW_LINE> <INDENT> Triton = TritonContext() <NEW_LINE> Triton.setArchitecture(ARCH.X86) <NEW_LINE> Triton.taintRegister(Triton.registers.eax) <NEW_LINE> inst = Instruction() <NEW_LINE> inst.setOpcode(b"\x8D\x04\x06") <NEW_LINE> Triton.processing(inst) <NEW_LINE> self.assertTrue(Triton.isRegisterTainted(Triton.registers.eax)) <NEW_LINE> self.assertFalse(Triton.isRegisterTainted(Triton.registers.ebx))
Check tainting result after processing.
625941b81f5feb6acb0c49ac
def getTimeStepDuration(self): <NEW_LINE> <INDENT> return self.instance.timeStepDuration;
Returns the timeStepDuration, it is a it datetime.timedelta. It defines the size of the time step,
625941b8be383301e01b52e4
@cli.group() <NEW_LINE> @click.pass_context <NEW_LINE> def monitor(ctx): <NEW_LINE> <INDENT> from .monitor import calculate_parallelism, get_user_logs, postprocess_jobdict, JobMonitor <NEW_LINE> moddict = {"calculate_parallelism":calculate_parallelism,"get_user_logs":get_user_logs,"postprocess_jobdict":postprocess_jobdict,"JobMonitor":JobMonitor} <NEW_LINE> ctx.obj["monitormod"] = moddict <NEW_LINE> return
Job monitoring functions.
625941b80c0af96317bb8040
def deserialize_grades(json_string): <NEW_LINE> <INDENT> grades = dict() <NEW_LINE> if json_string: <NEW_LINE> <INDENT> data = json.loads(json_string) <NEW_LINE> if 'scores' in data: <NEW_LINE> <INDENT> for grade in data['scores']: <NEW_LINE> <INDENT> if 'studentId' in grade and 'value' in grade: <NEW_LINE> <INDENT> grades[grade['studentId']] = Grade(grade['studentId'], grade['value']) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return grades
Deserializes the JSON representation received as arguments to a map of student ids to Grade objects. :param json_string: JSON representation of the grades objects :return: a map of student ids to Grade objects
625941b899fddb7c1c9de1ea