code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def write(self, outfile='geom.xyz', label=True, style='xyz'): <NEW_LINE> <INDENT> out = '' <NEW_LINE> if style == 'xyz': <NEW_LINE> <INDENT> if label: <NEW_LINE> <INDENT> out += f'{len(self)}\n\n' <NEW_LINE> <DEDENT> out += f'{self}' <NEW_LINE> <DEDENT> elif style == 'latex': <NEW_LINE> <INDENT> header = f'{len(self)}\\\\\n' <NEW_LINE> line_form = '{:<2}' + ' {:> 13.6f}' * 3 <NEW_LINE> atoms = [line_form.format(atom, *xyz) for atom, xyz in self] <NEW_LINE> atoms = '\n'.join(atoms) <NEW_LINE> out = '\\begin{verbatim}\n' + atoms + '\n\\end{verbatim}' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise SyntaxError('Invalid style') <NEW_LINE> <DEDENT> with open(outfile, 'w') as f: <NEW_LINE> <INDENT> f.write(out)
Writes the geometry to the specified file Prints the size at the beginning if desired (to conform to XYZ format)
625941b592d797404e303f8c
def get(self, path, params=None): <NEW_LINE> <INDENT> params = params or {} <NEW_LINE> params = dict((k, _clean(v)) for k,v in params.iteritems()) <NEW_LINE> _log.debug("Calling %s with params=%r", path, params) <NEW_LINE> if self.api_key: <NEW_LINE> <INDENT> _log.debug("keyID and vCode added") <NEW_LINE> params['keyID'] = self.api_key[0] <NEW_LINE> params['vCode'] = self.api_key[1] <NEW_LINE> <DEDENT> key = self._cache_key(path, params) <NEW_LINE> cached_result = self.cache.get(key) <NEW_LINE> if cached_result is not None: <NEW_LINE> <INDENT> if isinstance(cached_result, APIError): <NEW_LINE> <INDENT> _log.error("Raising cached error: %r" % cached_result) <NEW_LINE> raise cached_result <NEW_LINE> <DEDENT> _log.debug("Cache hit, returning cached payload") <NEW_LINE> return cached_result <NEW_LINE> <DEDENT> params = urlencode(params) <NEW_LINE> full_path = "https://%s/%s.xml.aspx" % (self.base_url, path) <NEW_LINE> response = self.send_request(full_path, params) <NEW_LINE> tree = ElementTree.parse(response) <NEW_LINE> current_time = get_ts_value(tree, 'currentTime') <NEW_LINE> expires_time = get_ts_value(tree, 'cachedUntil') <NEW_LINE> error = tree.find('error') <NEW_LINE> if error is not None: <NEW_LINE> <INDENT> code = error.attrib['code'] <NEW_LINE> message = error.text.strip() <NEW_LINE> exc = APIError(code, message) <NEW_LINE> self.cache.put(key, exc, expires_time - current_time) <NEW_LINE> _log.error("Raising API error: %r" % exc) <NEW_LINE> raise exc <NEW_LINE> <DEDENT> result = tree.find('result') <NEW_LINE> self.cache.put(key, result, expires_time - current_time) <NEW_LINE> return result
Request a specific path from the EVE API. The supplied path should be a slash-separated path frament, e.g. "corp/AssetList". (Basically, the portion of the API url in between the root / and the .xml bit.)
625941b56aa9bd52df036ba4
def contrast_stretch(I, N): <NEW_LINE> <INDENT> T_up = 254 <NEW_LINE> T_low = 1 <NEW_LINE> for i in range(N): <NEW_LINE> <INDENT> if I.max()<T_up: <NEW_LINE> <INDENT> G_max = I.max() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> G_max = T_up <NEW_LINE> <DEDENT> if I.min()>T_low: <NEW_LINE> <INDENT> G_min = I.min() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> G_min = T_low <NEW_LINE> <DEDENT> Gain = T_up/G_max <NEW_LINE> Offset = G_min*Gain <NEW_LINE> I = I*Gain - Offset*np.ones_like(I) <NEW_LINE> <DEDENT> return I
contrast_stretch(I,N) where I is BW image matrix and N is a integer. Carries out contrast enhancement of the image using the method described in the paper by L.Chen et al(2013) : 'An effective image segmentation method for noisy low-contrast unbalanced background in Mura defects using balanced discrete-cosine-transfer (BDCT)'
625941b5dd821e528d63afad
def _execute_multi_specific(self): <NEW_LINE> <INDENT> raise NotImplementedError
Operations specific for particular command dealing with multiple todo IDs.
625941b5d99f1b3c44c6739a
def process_split( self, split, indexers, model_preprocessing_interface ) -> Iterable[Type[Instance]]: <NEW_LINE> <INDENT> def _make_instance(passage, question, answer, label, par_idx, qst_idx, ans_idx): <NEW_LINE> <INDENT> d = {} <NEW_LINE> d["psg_str"] = MetadataField(" ".join(passage)) <NEW_LINE> d["qst_str"] = MetadataField(" ".join(question)) <NEW_LINE> d["ans_str"] = MetadataField(" ".join(answer)) <NEW_LINE> d["psg_idx"] = MetadataField(par_idx) <NEW_LINE> d["qst_idx"] = MetadataField(qst_idx) <NEW_LINE> d["ans_idx"] = MetadataField(ans_idx) <NEW_LINE> d["idx"] = MetadataField(ans_idx) <NEW_LINE> if model_preprocessing_interface.model_flags["uses_pair_embedding"]: <NEW_LINE> <INDENT> inp = model_preprocessing_interface.boundary_token_fn(para, question + answer) <NEW_LINE> d["psg_qst_ans"] = sentence_to_text_field(inp, indexers) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> d["psg"] = sentence_to_text_field( model_preprocessing_interface.boundary_token_fn(passage), indexers ) <NEW_LINE> d["qst"] = sentence_to_text_field( model_preprocessing_interface.boundary_token_fn(question), indexers ) <NEW_LINE> d["ans"] = sentence_to_text_field( model_preprocessing_interface.boundary_token_fn(answer), indexers ) <NEW_LINE> <DEDENT> d["label"] = LabelField(label, label_namespace="labels", skip_indexing=True) <NEW_LINE> return Instance(d) <NEW_LINE> <DEDENT> for example in split: <NEW_LINE> <INDENT> par_idx = example["idx"] <NEW_LINE> para = example["passage"]["text"] <NEW_LINE> for ex in example["passage"]["questions"]: <NEW_LINE> <INDENT> qst_idx = ex["idx"] <NEW_LINE> question = ex["question"] <NEW_LINE> for answer in ex["answers"]: <NEW_LINE> <INDENT> ans_idx = answer["idx"] <NEW_LINE> ans = answer["text"] <NEW_LINE> label = int(answer["label"]) if "label" in answer else 0 <NEW_LINE> yield _make_instance(para, question, ans, label, par_idx, qst_idx, ans_idx)
Process split text into a list of AllenNLP Instances.
625941b5bf627c535bc12fd9
def __init__(self, state_size, action_size, seed): <NEW_LINE> <INDENT> super(QNetwork, self).__init__() <NEW_LINE> self.seed = torch.manual_seed(seed) <NEW_LINE> self.state_size = state_size <NEW_LINE> self.action_size = action_size <NEW_LINE> self.fc1 = nn.Linear(self.state_size, 64) <NEW_LINE> self.fc2 = nn.Linear(64, 64) <NEW_LINE> self.fc3 = nn.Linear(64, self.action_size)
Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed
625941b5baa26c4b54cb0f26
def __resolveRelativeImport(importObj, basePath, result): <NEW_LINE> <INDENT> if basePath is None: <NEW_LINE> <INDENT> result.append(ImportResolution(importObj, None, False, None, None, "Could not resolve 'from " + importObj.name + " import ...' at line " + str(importObj.line) + " because the editing buffer has not been saved yet")) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path = basePath <NEW_LINE> current = importObj.name[1:] <NEW_LINE> error = False <NEW_LINE> while current.startswith('.'): <NEW_LINE> <INDENT> if not path: <NEW_LINE> <INDENT> error = True <NEW_LINE> break <NEW_LINE> <DEDENT> current = current[1:] <NEW_LINE> path = os.path.dirname(path) <NEW_LINE> <DEDENT> if error: <NEW_LINE> <INDENT> result.append(ImportResolution(importObj, None, False, None, None, "Could not resolve 'from " + importObj.name + " import ...' at line " + str(importObj.line))) <NEW_LINE> return <NEW_LINE> <DEDENT> if not path: <NEW_LINE> <INDENT> path = os.path.sep <NEW_LINE> <DEDENT> oldSysPath = sys.path <NEW_LINE> sys.path = [path] <NEW_LINE> __resolveFrom(importObj, current, path, result) <NEW_LINE> sys.path = oldSysPath
Resolves imports like: 'from ..x import y'
625941b57d847024c06be0c2
def time2year(self, t): <NEW_LINE> <INDENT> time = self.ds.variables['time'] <NEW_LINE> if type(t) == np.int: <NEW_LINE> <INDENT> return num2date(t, time.units).year <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return np.asarray([y.year for y in np.asarray(num2date(t, time.units))])
convert time to year
625941b5046cf37aa974cb4d
def prep_level(self): <NEW_LINE> <INDENT> self.level_image = self.font.render(str(self.stats.level), True, self.text_color, self.ai_settings.bg_color) <NEW_LINE> self.level_rect = self.level_image.get_rect() <NEW_LINE> self.level_rect.right = self.screen_rect.right <NEW_LINE> self.level_rect.top =self.score_rect.bottom + 10
将等级转换为渲染的图像
625941b545492302aab5e0c2
def __init__(self, charge): <NEW_LINE> <INDENT> self.charge = charge <NEW_LINE> self.rs = None <NEW_LINE> self.volume = None <NEW_LINE> self.mask_g = None <NEW_LINE> self.gd = None
Initialize the Jellium object Input: charge, the total Jellium background charge.
625941b5462c4b4f79d1d4d2
def on_token_auto_refreshed(self, access_token): <NEW_LINE> <INDENT> with self.lock: <NEW_LINE> <INDENT> self.access_token = access_token
Called after the access token is refreshed (requests-oauthlib can automatically refresh tokens if given an OAuth client ID and secret, so this is how our copy of the token stays up-to-date). Applications that cache access tokens can override this to store the new token - just remember to call the super() method! :param access_token: New token obtained by refreshing
625941b57cff6e4e81117788
def get_css_classes(self): <NEW_LINE> <INDENT> css_classes = set(self.default_css_classes) <NEW_LINE> if self.is_required: <NEW_LINE> <INDENT> css_classes.add('required') <NEW_LINE> <DEDENT> return css_classes
Returns the list of CSS classes to apply to the element. By default, this will include the contents of ``default_css_classes``, and ``required`` if it's a required field. This can be overridden to provide additional CSS classes, if they're not appropraite for ``default_css_classes``.
625941b597e22403b379cd9b
def convert_package_name_or_id_to_title_or_name(package_name_or_id, context): <NEW_LINE> <INDENT> session = context['session'] <NEW_LINE> result = session.query(model.Package).filter_by( id=package_name_or_id).first() <NEW_LINE> if not result: <NEW_LINE> <INDENT> result = session.query(model.Package).filter_by( name=package_name_or_id).first() <NEW_LINE> <DEDENT> if not result: <NEW_LINE> <INDENT> raise df.Invalid('%s: %s' % (_('Not found'), _('Dataset'))) <NEW_LINE> <DEDENT> return result.title or result.name
Return the package title, or name if no title, for the given package name or id. :returns: the name of the package with the given name or id :rtype: string :raises: ckan.lib.navl.dictization_functions.Invalid if there is no package with the given name or id
625941b5de87d2750b85fb90
def __init__(self, stream_id=None, message=None, input_id=None): <NEW_LINE> <INDENT> self.swagger_types = { 'stream_id': 'str', 'message': 'object', 'input_id': 'str' } <NEW_LINE> self.attribute_map = { 'stream_id': 'stream_id', 'message': 'message', 'input_id': 'input_id' } <NEW_LINE> self._stream_id = stream_id <NEW_LINE> self._message = message <NEW_LINE> self._input_id = input_id
SimulationRequest - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
625941b5b545ff76a8913c23
def diffstat_for_changeset(patch_text: str) -> Dict: <NEW_LINE> <INDENT> diffstat = patch.diffstat(patch_text) <NEW_LINE> return { 'changedFiles': diffstat.files_changed, 'additions': diffstat.additions, 'deletions': diffstat.deletions, }
Calculate and return a raw patch's diffstat as JSON.
625941b5b57a9660fec33682
def record(self, numframes=None): <NEW_LINE> <INDENT> if numframes is None: <NEW_LINE> <INDENT> recorded_data = [self._pending_chunk, self._record_chunk()] <NEW_LINE> self._pending_chunk = numpy.zeros([0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> recorded_frames = len(self._pending_chunk) <NEW_LINE> recorded_data = [self._pending_chunk] <NEW_LINE> self._pending_chunk = numpy.zeros([0]) <NEW_LINE> required_frames = numframes*len(set(self.channelmap)) <NEW_LINE> while recorded_frames < required_frames: <NEW_LINE> <INDENT> chunk = self._record_chunk() <NEW_LINE> if len(chunk) == 0: <NEW_LINE> <INDENT> chunk = numpy.zeros(required_frames-recorded_frames, dtype='float32') <NEW_LINE> <DEDENT> recorded_data.append(chunk) <NEW_LINE> recorded_frames += len(chunk) <NEW_LINE> <DEDENT> if recorded_frames > required_frames: <NEW_LINE> <INDENT> to_split = -(recorded_frames-required_frames) <NEW_LINE> recorded_data[-1], self._pending_chunk = numpy.split(recorded_data[-1], [to_split]) <NEW_LINE> <DEDENT> <DEDENT> data = numpy.reshape(numpy.concatenate(recorded_data), [-1, len(set(self.channelmap))]) <NEW_LINE> return data[:, self.channelmap]
Record a block of audio data. The data will be returned as a frames × channels float32 numpy array. This function will wait until numframes frames have been recorded. If numframes is given, it will return exactly `numframes` frames, and buffer the rest for later. If numframes is None, it will return whatever the audio backend has available right now. Use this if latency must be kept to a minimum, but be aware that block sizes can change at the whims of the audio backend. If using `record` with `numframes=None` after using `record` with a required `numframes`, the last buffered frame will be returned along with the new recorded block. (If you want to empty the last buffered frame instead, use `flush`)
625941b5293b9510aa2c309c
def is_bool(a): <NEW_LINE> <INDENT> return isinstance(a, BoolRef)
Return `True` if `a` is a Z3 Boolean expression. >>> p = Bool('p') >>> is_bool(p) True >>> q = Bool('q') >>> is_bool(And(p, q)) True >>> x = Real('x') >>> is_bool(x) False >>> is_bool(x == 0) True
625941b54f6381625f114849
def testComparison_009a(self): <NEW_LINE> <INDENT> quantity1 = 13 <NEW_LINE> quantity2 = ByteQuantity(quantity="12", units=UNIT_BYTES) <NEW_LINE> self.assertNotEqual(quantity1, quantity2) <NEW_LINE> self.assertTrue(not quantity1 == quantity2) <NEW_LINE> self.assertTrue(not quantity1 < quantity2) <NEW_LINE> self.assertTrue(not quantity1 <= quantity2) <NEW_LINE> self.assertTrue(quantity1 > quantity2) <NEW_LINE> self.assertTrue(quantity1 >= quantity2) <NEW_LINE> self.assertTrue(quantity1 != quantity2)
Test comparison of byte quantity to integer bytes, integer larger
625941b51b99ca400220a8b3
def __mul__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, Polynomial) or other.n != self.n: <NEW_LINE> <INDENT> raise ValueError <NEW_LINE> <DEDENT> d = max(len(self.coeff), len(other.coeff)) <NEW_LINE> k = (d * self.n ** 2 + 1).bit_length() <NEW_LINE> k_8 = (k - 1) // 8 + 1 <NEW_LINE> k = k_8 * 8 <NEW_LINE> bt_self = bytes.join( b"", (ai.to_bytes(k_8, byteorder="little") for ai in self.coeff) ) <NEW_LINE> t_self = int.from_bytes(bt_self, byteorder="little") <NEW_LINE> if self == other: <NEW_LINE> <INDENT> t_other = t_self <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bt_other = bytes.join( b"", (ai.to_bytes(k_8, byteorder="little") for ai in other.coeff) ) <NEW_LINE> t_other = int.from_bytes(bt_other, byteorder="little") <NEW_LINE> <DEDENT> t_res = t_self * t_other <NEW_LINE> res = [] <NEW_LINE> bt_res = t_res.to_bytes((t_res.bit_length() - 1) // 8 + 1, byteorder="little") <NEW_LINE> i = 0 <NEW_LINE> while i < len(bt_res): <NEW_LINE> <INDENT> res.append(int.from_bytes(bt_res[i : i + k_8], byteorder="little") % self.n) <NEW_LINE> i += k_8 <NEW_LINE> <DEDENT> return Polynomial(res, self.n)
Multiplies two polynomials self and other. Relies on fast integer multiplication in Python implementation. Args: other (Polynomial): Multiplier. Raises: ValueError: Thrown when other is not Polynomial or is incompatible. Returns: Polynomial: self * other.
625941b5004d5f362079a13a
def MaxFetch(coins): <NEW_LINE> <INDENT> num_coins = len(coins) <NEW_LINE> ans = [[0 for _ in xrange(num_coins+1)] for _ in xrange(num_coins+1)] <NEW_LINE> sides = [[None for _ in xrange(num_coins+1)] for _ in xrange(num_coins+1)] <NEW_LINE> for i in xrange(num_coins-1, -1, -1): <NEW_LINE> <INDENT> for j in xrange(i+1, num_coins+1): <NEW_LINE> <INDENT> left_pick = coins[i] + sum(coins[i+1:j]) - ans[i+1][j] <NEW_LINE> right_pick = coins[j-1] + sum(coins[i:j-1]) - ans[i][j-1] <NEW_LINE> if left_pick > right_pick: <NEW_LINE> <INDENT> ans[i][j] = left_pick <NEW_LINE> sides[i][j] = 'L' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ans[i][j] = right_pick <NEW_LINE> sides[i][j] = 'R' <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return ans[0][num_coins], PickPath(coins, sides, 0, num_coins)
http://leetcode.com/2011/02/coins-in-line.html
625941b53346ee7daa2b2b6b
@pytest.fixture <NEW_LINE> def signed_certificate(known_private_key, known_private_key_2): <NEW_LINE> <INDENT> subject = x509.Name( [ x509.NameAttribute(x509.oid.NameOID.COUNTRY_NAME, "DK"), x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, "example.com"), ] ) <NEW_LINE> issuer = x509.Name( [ x509.NameAttribute(x509.oid.NameOID.COUNTRY_NAME, "DK"), x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, "example.net"), ] ) <NEW_LINE> cert = ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(issuer) .public_key(known_private_key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(datetime.datetime.utcnow()) .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=90)) .add_extension( x509.SubjectAlternativeName([x509.DNSName("example.com")]), critical=False ) .sign(known_private_key_2, primitives.hashes.SHA256(), default_backend()) ) <NEW_LINE> return cert
Return a signed certificate.
625941b5adb09d7d5db6c596
def get_instance(self, payload): <NEW_LINE> <INDENT> return NewFactorInstance( self._version, payload, service_sid=self._solution['service_sid'], identity=self._solution['identity'], )
Build an instance of NewFactorInstance :param dict payload: Payload response from the API :returns: twilio.rest.verify.v2.service.entity.new_factor.NewFactorInstance :rtype: twilio.rest.verify.v2.service.entity.new_factor.NewFactorInstance
625941b53539df3088e2e14d
def read(self, section, key): <NEW_LINE> <INDENT> f = io.open(self.filename, 'r', encoding='utf16') <NEW_LINE> self.config.read_file(f) <NEW_LINE> f.close() <NEW_LINE> if section in self.config: <NEW_LINE> <INDENT> if key in self.config[section]: <NEW_LINE> <INDENT> return self.config[section][key] <NEW_LINE> <DEDENT> <DEDENT> return ''
Read value of key in section. Return value or None if failed.
625941b567a9b606de4a7cc0
def anlaysis_nerve_plotting(track_point): <NEW_LINE> <INDENT> num_true_pts = np.shape(track_point)[0] <NEW_LINE> num_sample_pts = 80 <NEW_LINE> x_sample = track_point[:,0] <NEW_LINE> y_sample = track_point[:,1] <NEW_LINE> z_sample = track_point[:,2] <NEW_LINE> tck, u = interpolate.splprep([x_sample, y_sample, z_sample], s=2) <NEW_LINE> x_knots, y_knots, z_knots = interpolate.splev(tck[0], tck) <NEW_LINE> u_fine = np.linspace(0, 1, num_true_pts) <NEW_LINE> x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck) <NEW_LINE> fig2 = plt.figure(2) <NEW_LINE> ax3d = fig2.add_subplot(111, projection='3d') <NEW_LINE> ax3d.plot(x_knots, y_knots, z_knots, 'go') <NEW_LINE> ax3d.plot(x_fine, y_fine, z_fine, 'g') <NEW_LINE> plt.xlim([0, 500]) <NEW_LINE> plt.ylim([0, 550]) <NEW_LINE> plt.zlim([0, 450]) <NEW_LINE> fig2.show() <NEW_LINE> plt.show()
:param track_point:N X 3, array, [x,y,z] :return:
625941b5d164cc6175782b50
def bind_to(self, exchange='', routing_key='', arguments=None, nowait=False, **kwargs): <NEW_LINE> <INDENT> if isinstance(exchange, Exchange): <NEW_LINE> <INDENT> exchange = exchange.name <NEW_LINE> <DEDENT> return self.channel.exchange_bind(destination=self.name, source=exchange, routing_key=routing_key, nowait=nowait, arguments=arguments)
Binds the exchange to another exchange. Arguments: nowait (bool): If set the server will not respond, and the call will not block waiting for a response. Default is :const:`False`.
625941b516aa5153ce36227b
def add_sample(self, model, reference, label=None): <NEW_LINE> <INDENT> m_mean = np.mean(model) <NEW_LINE> r_mean = np.mean(reference) <NEW_LINE> sigma_r = reference.std() <NEW_LINE> sigma_m = model.std() <NEW_LINE> sigma_n = sigma_m // sigma_r <NEW_LINE> R = np.corrcoef(model, reference)[1,0] <NEW_LINE> n_bias = (m_mean - r_mean) // sigma_r <NEW_LINE> nurmse = np.sqrt(1 + sigma_n**2 - 2 * sigma_n * R) <NEW_LINE> x = nurmse * np.sign(sigma_m - sigma_r) <NEW_LINE> y = n_bias <NEW_LINE> color = self.__get_color() <NEW_LINE> marker = self.__get_shape() <NEW_LINE> self.ax.plot(x,y, marker=marker, color=color, label=label)
Adds a new point to the diagram. Args: - *model*: Numpy array of model data. - *reference*: Numpy array of reference (observation) data. - *label* (optional): The label for this sample (default: None).
625941b544b2445a33931ea3
def __init__(self, *args): <NEW_LINE> <INDENT> this = _blocks_swig3.new_float_to_complex_sptr(*args) <NEW_LINE> try: self.this.append(this) <NEW_LINE> except: self.this = this
__init__(boost::shared_ptr<(gr::blocks::float_to_complex)> self) -> float_to_complex_sptr __init__(boost::shared_ptr<(gr::blocks::float_to_complex)> self, float_to_complex p) -> float_to_complex_sptr
625941b5ff9c53063f47c001
def update_alarm(self, device_id, alarm_id, alarm_time, week_days, recurring=False, enabled=True, label=None, snooze_length=None, snooze_count=None, vibe='DEFAULT'): <NEW_LINE> <INDENT> if not isinstance(week_days, list): <NEW_LINE> <INDENT> raise ValueError("Week days needs to be a list") <NEW_LINE> <DEDENT> for day in week_days: <NEW_LINE> <INDENT> if day not in self.WEEK_DAYS: <NEW_LINE> <INDENT> raise ValueError("Incorrect week day %s. see WEEK_DAY_LIST." % day) <NEW_LINE> <DEDENT> <DEDENT> url = "{0}/{1}/user/-/devices/tracker/{device_id}/alarms/{alarm_id}.json".format( *self._get_common_args(), device_id=device_id, alarm_id=alarm_id ) <NEW_LINE> data = { 'time': alarm_time, 'weekDays': week_days, 'recurring': recurring, 'enabled': enabled, 'vibe': vibe } <NEW_LINE> if label: <NEW_LINE> <INDENT> data['label'] = label <NEW_LINE> <DEDENT> if snooze_length: <NEW_LINE> <INDENT> data['snoozeLength'] = snooze_length <NEW_LINE> <DEDENT> if snooze_count: <NEW_LINE> <INDENT> data['snoozeCount'] = snooze_count <NEW_LINE> <DEDENT> return self.make_request(url, data=data, method="POST")
https://wiki.fitbit.com/display/API/API-Devices-Update-Alarm alarm_time should be a timezone aware datetime object.
625941b515fb5d323cde090b
def metric_rms(ri, ref): <NEW_LINE> <INDENT> rms = np.sum(cutout(ri.real-ref.real)**2) <NEW_LINE> norm = np.sum(cutout(ref.real-1)**2) <NEW_LINE> return np.sqrt(rms/norm)
Root mean square metric (normalized) This metric was used and described in Müller et. al, "ODTbrain: a Python library for full-view, dense diffraction tomography" Bioinformatics 2015
625941b524f1403a9260096d
def post(self, request, nnid): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return_data = AutoMlCommon().update_parm_obj(nnid, request.data) <NEW_LINE> return Response(json.dumps(return_data)) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> return_data = {"status": "404", "result": str(e)} <NEW_LINE> return Response(json.dumps(return_data))
Manage hyperparameter for GA algorithm like eval, population, survive etc Structure : AutoML - NetID - NetVer(Auto Generated by GA) - NetBatch (auto generated on every batch) (1) Define AutoML Graph definition (2) Select Type of Data (3) Select Type of Anal algorithm (4) Select range of hyper parameters (5) Run - AutoML (<- for this step) (6) Check result of each generation with UI/UX (7) Select Best model you want use and activate it --- # Class Name : RunManagerAutoParm # Description: Set hyperparameters for genetic algorithm itself, if it is not set Genetic Algorithm will run with default parmas
625941b5462c4b4f79d1d4d3
def ogr_to_numpy(ogrobj): <NEW_LINE> <INDENT> jsonobj = eval(ogrobj.ExportToJson()) <NEW_LINE> return np.squeeze(jsonobj['coordinates'])
Backconvert a gdal/ogr geometry to a numpy vertex array. Using JSON as a vehicle to efficiently deal with numpy arrays. Parameters ---------- ogrobj : ogr.Geometry object Returns ------- out : :class:`numpy:numpy.ndarray` a nested ndarray of vertices of shape (num vertices, 2)
625941b55f7d997b8717489e
def get_steam_level(self, steamID, format=None): <NEW_LINE> <INDENT> parameters = {'steamid' : steamID} <NEW_LINE> if format is not None: <NEW_LINE> <INDENT> parameters['format'] = format <NEW_LINE> <DEDENT> url = self.create_request_url(self.interface, 'GetSteamLevel', 1, parameters) <NEW_LINE> data = self.retrieve_request(url) <NEW_LINE> return self.return_data(data, format=format)
Returns the Steam Level of a user. steamID: The users ID format: Return format. None defaults to json. (json, xml, vdf)
625941b58a43f66fc4b53e6d
def reload(self): <NEW_LINE> <INDENT> Base.metadata.create_all(self.__engine) <NEW_LINE> session_factory = sessionmaker(bind=self.__engine, expire_on_commit=False) <NEW_LINE> Session = scoped_session(session_factory) <NEW_LINE> self.__session = Session()
reload stuff
625941b531939e2706e4cc74
def __init__(self, input, n_in, n_nodes): <NEW_LINE> <INDENT> self.input = input <NEW_LINE> W_bound = np.sqrt(6. /(n_in+n_nodes)) <NEW_LINE> W_values = np.asarray(np.random.uniform(high=W_bound, low=-W_bound, size=(n_in, n_nodes)), dtype=theano.config.floatX) <NEW_LINE> self.W = theano.shared(value=W_values, name='W', borrow=True) <NEW_LINE> b_values = np.zeros((n_nodes,), dtype=theano.config.floatX) <NEW_LINE> self.b = theano.shared(value=b_values, name='b', borrow=True) <NEW_LINE> self.threshold = 0 <NEW_LINE> dot_product = T.dot(input, self.W) + self.b <NEW_LINE> above_threshold = dot_product>self.threshold <NEW_LINE> self.output = above_threshold * (dot_product-self.threshold) <NEW_LINE> self.params = [self.W, self.b]
Initialize a hidden layer @param input: theano.tensor.dmatrix of shape (batch_size,n_in), represents inputs from previous layer @param n_in: int, number of inputs to layer @param n_nodes: int, number of nodes in the layer. Also the size of output
625941b5090684286d50eae2
def wait_for_operation_async(reactor, compute, operation, timeout_steps): <NEW_LINE> <INDENT> poller = _create_poller(operation) <NEW_LINE> eliot_action = start_action( action_type=u"flocker:node:agents:gce:wait_for_operation_async", operation=operation ) <NEW_LINE> with eliot_action.context(): <NEW_LINE> <INDENT> def finished_operation_result(): <NEW_LINE> <INDENT> latest_operation = poller.poll(compute) <NEW_LINE> if latest_operation['status'] == 'DONE': <NEW_LINE> <INDENT> return latest_operation <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> operation_deferred = loop_until( reactor, finished_operation_result, timeout_steps, ) <NEW_LINE> <DEDENT> def conclude_operation(final_operation): <NEW_LINE> <INDENT> eliot_action.add_success_fields(final_operation=final_operation) <NEW_LINE> eliot_action.finish() <NEW_LINE> return final_operation <NEW_LINE> <DEDENT> operation_deferred.addCallback(conclude_operation) <NEW_LINE> return operation_deferred
Fires a deferred once a GCE operation is complete, or timeout passes. This function will poll the operation until it reaches state 'DONE' or times out, and then returns the final operation resource dict. :param reactor: The twisted ``IReactorTime`` provider to use to schedule delays. :param compute: The GCE compute python API object. :param operation: A dict representing a pending GCE operation resource. This can be either a zone or a global operation. :param timeout_steps: Iterable of times in seconds to wait until timing out the operation. :returns Deferred: A Deferred firing with the concluded GCE operation resource or calling its errback if it times out.
625941b52ae34c7f2600cf34
def test_requestAvatarIdInvalidKey(self): <NEW_LINE> <INDENT> def _checkKey(ignored): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> self.patch(self.checker, 'checkKey', _checkKey) <NEW_LINE> d = self.checker.requestAvatarId(None); <NEW_LINE> return self.assertFailure(d, UnauthorizedLogin)
If L{SSHPublicKeyDatabase.checkKey} returns False, C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
625941b5627d3e7fe0d68c51
def __init__(self): <NEW_LINE> <INDENT> super(hamiltonian_and_energy_output, self).__init__() <NEW_LINE> self.ham_init = None <NEW_LINE> self.energy_init = None
Initialization of particles output
625941b5a17c0f6771cbde57
def _wrap_client_error(e): <NEW_LINE> <INDENT> error_code = e.response['Error']['Code'] <NEW_LINE> message = e.response['Error']['Message'] <NEW_LINE> if error_code == 'BadRequestException': <NEW_LINE> <INDENT> if "Failed to copy S3 object. Access denied:" in message: <NEW_LINE> <INDENT> match = re.search('bucket=(.+?), key=(.+?)$', message) <NEW_LINE> if match: <NEW_LINE> <INDENT> return S3PermissionsRequired(bucket=match.group(1), key=match.group(2)) <NEW_LINE> <DEDENT> <DEDENT> if "Invalid S3 URI" in message: <NEW_LINE> <INDENT> return InvalidS3UriError(message=message) <NEW_LINE> <DEDENT> <DEDENT> return ServerlessRepoClientError(message=message)
Wrap botocore ClientError exception into ServerlessRepoClientError. :param e: botocore exception :type e: ClientError :return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
625941b538b623060ff0abf2
def iterpapers(self): <NEW_LINE> <INDENT> with self.input().open() as f: <NEW_LINE> <INDENT> record = self.nextrecord(f) <NEW_LINE> while record is not None: <NEW_LINE> <INDENT> yield record <NEW_LINE> record = self.nextrecord(f)
Return iterator over all paper records.
625941b52ae34c7f2600cf35
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, **options): <NEW_LINE> <INDENT> if test_finder is None: <NEW_LINE> <INDENT> test_finder = DocTestFinder() <NEW_LINE> <DEDENT> module = _normalize_module(module) <NEW_LINE> tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) <NEW_LINE> if globs is None: <NEW_LINE> <INDENT> globs = module.__dict__ <NEW_LINE> <DEDENT> if not tests: <NEW_LINE> <INDENT> raise ValueError(module, "has no tests") <NEW_LINE> <DEDENT> tests.sort() <NEW_LINE> suite = unittest.TestSuite() <NEW_LINE> for test in tests: <NEW_LINE> <INDENT> if len(test.examples) == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if not test.filename: <NEW_LINE> <INDENT> filename = module.__file__ <NEW_LINE> if filename[-4:] in (".pyc", ".pyo"): <NEW_LINE> <INDENT> filename = filename[:-1] <NEW_LINE> <DEDENT> test.filename = filename <NEW_LINE> <DEDENT> suite.addTest(DocTestCase(test, **options)) <NEW_LINE> <DEDENT> return suite
Convert doctest tests for a module to a unittest test suite. This converts each documentation string in a module that contains doctest tests to a unittest test case. If any of the tests in a doc string fail, then the test case fails. An exception is raised showing the name of the file containing the test and a (sometimes approximate) line number. The `module` argument provides the module to be tested. The argument can be either a module or a module name. If no argument is given, the calling module is used. A number of options may be provided as keyword arguments: setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer.
625941b5097d151d1a222c5f
def test_parse_env(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> testpath = "testenv.txt" <NEW_LINE> with open(testpath, "w") as testfile: <NEW_LINE> <INDENT> testfile.write("FOO_VAR=bar=var\nexport FOO_NUM=42\n") <NEW_LINE> <DEDENT> parse_env(testpath) <NEW_LINE> assert get_string("FOO_VAR", "") == "bar=var" <NEW_LINE> assert get_int("FOO_NUM", 0) == 42 <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> os.remove(testpath)
ensure that the parse_env function is properly processing env files
625941b55510c4643540f1fc
def handle_emoji_extraction( emoji: dict, first_alias: str, path: str, force: bool, real_names: bool ): <NEW_LINE> <INDENT> unicode = ''.join(format(ord(char), 'x') for char in emoji['emoji']) <NEW_LINE> unicode = re.sub(r'fe0[ef]$', '', unicode, re.IGNORECASE) <NEW_LINE> unicode = re.sub(r'^(1f937)(?:200d)(.*)$', r'\1-\2', unicode, re.IGNORECASE) <NEW_LINE> unicode = re.sub(r'^(1f1)(..)(1f1)(..)$', r'\1\2-\3\4', unicode, re.IGNORECASE) <NEW_LINE> logging.info("Unicode value of \'%s\' found : %s", first_alias, unicode) <NEW_LINE> return download_file( url=GITHUB_ASSETS_BASE_URL.format('unicode/' + unicode), path=os.path.join(path, 'unicode'), force=force, real_name=(first_alias if real_names else None) )
Simple function reduce `perform_emojis_extraction` cyclomatic complexity
625941b5dc8b845886cb5337
def itkMeanProjectionImageFilterID3ID3_cast(*args): <NEW_LINE> <INDENT> return _itkMeanProjectionImageFilterPython.itkMeanProjectionImageFilterID3ID3_cast(*args)
itkMeanProjectionImageFilterID3ID3_cast(itkLightObject obj) -> itkMeanProjectionImageFilterID3ID3
625941b5a05bb46b383ec630
def data(self, QModelIndex, p_int): <NEW_LINE> <INDENT> pass
QStringListModel.data(QModelIndex, int) -> QVariant
625941b5ec188e330fd5a5ab
def testAPIChildren(self): <NEW_LINE> <INDENT> self.db.storeItem(1, 2, 0, {"ishort" : "CSE/270", "idesc" : "A computer science lab."}) <NEW_LINE> self.db.storeItem(2, 2, 0, {"ishort" : "CSE 2F", "idesc" : "The second-floor corridor of the Computer Science building."}) <NEW_LINE> self.db.storeItem(3, 0, 1, {"oshort" : "Colin Runciman", "odesc" : "Do you need a description of Colin Runciman?!"}) <NEW_LINE> self.db.storeItem(4, 0, 1, {"oshort" : "Pascal", "odesc" : "He's here! He's finally here!"}) <NEW_LINE> self.db.storeItem(5, 1, 2, {"oshort" : "SuperBerin", "odesc" : "You are acting like an idiot."}) <NEW_LINE> self.assertEqual(self.db.getChildren(1), [3, 4]) <NEW_LINE> self.assertEqual(self.db.getChildren(2), [5]) <NEW_LINE> self.assertEqual(self.db.getChildren(3), [])
Test use of the database backend's API to find children of an object.
625941b563d6d428bbe442f2
def upload_screenshot(self, name, image_data, metadata=None): <NEW_LINE> <INDENT> if not self.build_id: <NEW_LINE> <INDENT> raise PersephoneException('No build is running. Please create a build first.') <NEW_LINE> <DEDENT> screenshot = self.client.post_screenshot( self.project_id, self.build_id, name, image_data, metadata) <NEW_LINE> return screenshot['id']
Uploads a screenshot to the current build. :param name: A freeform name for the screenshot (e.g. subfolder/image.png). :param image_data: A bytes object with a PNG screenshot. :param metadata: An optional freeform dict with JSON serializable values to attach to the image as metadata.
625941b55166f23b2e1a4f5c
def error_handler(self, f): <NEW_LINE> <INDENT> self.handle_error_func = f <NEW_LINE> return f
Decorater
625941b52c8b7c6e89b355c7
def do_GET(self): <NEW_LINE> <INDENT> pass
Ignore GET messages.
625941b5796e427e537b03c5
def environment(sz): <NEW_LINE> <INDENT> sr = 0; sc = 1; su = 2; sg = 3; vg = 4; wt = 5; il = 6 <NEW_LINE> sr_dry = 0; sr_wet = 1; sr_saturated = 2 <NEW_LINE> sc_vs = 0; sc_s = 1; sc_hc = 2; sc_c = 3; sc_s = 4 <NEW_LINE> vg_no = 0; vg_sp = 1; vg_dn = 2 <NEW_LINE> wt_cl = 0; wt_oc = 1; wt_rn = 2 <NEW_LINE> il_lo = 0; il_me = 1; il_hi = 2 <NEW_LINE> moisture = np.random.randint(3, size=sz) <NEW_LINE> composition = np.random.randint(5, size=sz) <NEW_LINE> magnetic = np.random.randint(2, size=sz) <NEW_LINE> vegetation = np.random.randint(3, size=sz) <NEW_LINE> uniformity = np.random.randint(2, size=sz) <NEW_LINE> weather = np.random.randint(3, size=sz) <NEW_LINE> illumination = np.random.randint(3, size=sz) <NEW_LINE> env = np.column_stack((moisture, composition, uniformity, magnetic, vegetation, weather, illumination)) <NEW_LINE> np.random.shuffle(env) <NEW_LINE> return env
Create a simulated environment to match the simulated minefield. The environment data includes: sr : soil moisture (%)- dry [0, 10], wet (10, 40], saturated (>40) sc : soil composition - very sandy, sandy, high-clay, clay, silt su : soil uniformity - no, yes (uniform) sg : magnetic soil - no, yes (magnetic) vg : vegetation - no-vegetation, sparse, dense wt : weather - clear, overcast, raining il : illumination - low, medium, high :param size: One side of the square shaped minefield area :return: a 3d array that includes the environment data over minefield
625941b58e05c05ec3eea174
def shufflenetv2b_wd2(**kwargs): <NEW_LINE> <INDENT> return get_shufflenetv2b( width_scale=(12.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_wd2", **kwargs)
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters.
625941b560cbc95b062c634c
@register.simple_tag <NEW_LINE> def snippet(snippet_name): <NEW_LINE> <INDENT> return snip(snippet_name)
Looks up a snippet by name and returns it. Syntax:: {% snippet [snippet_name] %} Example:: {% snippet frontpage_message %}
625941b50fa83653e4656dc1
def RunInJ(i,Type,X,Y,Z,Range,MeshRange,Iterations,Output = None): <NEW_LINE> <INDENT> EndResult = [None]*(Iterations-1) <NEW_LINE> for j in range(Iterations-1): <NEW_LINE> <INDENT> Result = StartObjectScan(i,j,Type,X,Y,Z,Range,MeshRange) <NEW_LINE> EndResult[j]= Result <NEW_LINE> <DEDENT> Output.append(EndResult)
###################################################### run onver all columns ######################################################
625941b5e1aae11d1e749ab7
def make_wcs_positions(row, col, offset, inverse=False): <NEW_LINE> <INDENT> n=row.size <NEW_LINE> dt=[('wcs_row','f8'), ('wcs_col','f8'), ('zrow','f8'), ('zcol','f8')] <NEW_LINE> data=numpy.zeros(row.size, dtype=dt) <NEW_LINE> if inverse: <NEW_LINE> <INDENT> data['zrow'] = row <NEW_LINE> data['zcol'] = col <NEW_LINE> data['wcs_row'] = row + offset <NEW_LINE> data['wcs_col'] = col + offset <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data['wcs_row'] = row <NEW_LINE> data['wcs_col'] = col <NEW_LINE> data['zrow'] = row - offset <NEW_LINE> data['zcol'] = col - offset <NEW_LINE> <DEDENT> return data
make a structure holding both the original wcs and zero-offset positions. This is only meant for converting between 1-offset and 0-offset wcs positions are called 'wcs_row','wcs_col' and zero offset are called 'zrow','zcol' parameters ---------- row: array rows in the image, wcs coords if inverse=False col: array columns in the image, wcs coords if inverse=False offset: float offset to subtract from the input positions inverse: bool, optional Set to True if the input are zero based
625941b5956e5f7376d70c7f
def CV_multi_stats(X, y, model) : <NEW_LINE> <INDENT> n = N_TRIALS <NEW_LINE> scores = cross_validation.cross_val_score(estimator=model, X=X, y=y) <NEW_LINE> scores_f1 = cross_validation.cross_val_score(estimator=model, X=X, y=y, scoring='f1') <NEW_LINE> print("Model Accuracy: %0.3f (+- %0.2f)" % (scores.mean(), scores.std() * 2)) <NEW_LINE> print("Model f1: %0.3f (+- %0.2f)" % (scores_f1.mean(), scores_f1.std() * 2))
http://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics This version uses multiclass (or multilabel) compatible metrics. May be expanded to use the cross_val_score helper function: http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.cross_val_score.html http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics
625941b550812a4eaa59c129
def read_file(self, f, source=None): <NEW_LINE> <INDENT> super(ExtendedConfigParser, self).read_file(f, source) <NEW_LINE> self.move_defaults()
Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `<???>' is used.
625941b57d43ff24873a2aa7
def test_raises_exception_when_accessed(self): <NEW_LINE> <INDENT> qs = QuerySetMock(None, TestException()) <NEW_LINE> self.assertRaises(TestException, lambda x: x[0], qs)
Exceptions can raise on getitem, too.
625941b54c3428357757c12e
def __init__(self, n_in, n_out, input, rng, poisson_layer=False, mean_doc_size=1, init_W=None, init_b=None, activation = T.tanh, mirroring=False): <NEW_LINE> <INDENT> self.n_in = n_in <NEW_LINE> self.n_out = n_out <NEW_LINE> self.input = input <NEW_LINE> self.mirroring = mirroring <NEW_LINE> T.pprint(self.input) <NEW_LINE> if not input: <NEW_LINE> <INDENT> raise Exception <NEW_LINE> <DEDENT> if (init_W == None): <NEW_LINE> <INDENT> W_values = numpy.asarray( rng.uniform( low = -numpy.sqrt(6./(n_in+n_out)), high = numpy.sqrt(6./(n_in+n_out)), size = (n_in, n_out)), dtype = theano.config.floatX) <NEW_LINE> if activation == theano.tensor.nnet.sigmoid: <NEW_LINE> <INDENT> W_values *= 4 <NEW_LINE> <DEDENT> if poisson_layer == True: <NEW_LINE> <INDENT> W_values *= 1/mean_doc_size; <NEW_LINE> <DEDENT> self.W = theano.shared(value = W_values) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.W = theano.shared(value = init_W) <NEW_LINE> <DEDENT> if (init_b == None): <NEW_LINE> <INDENT> b_values = numpy.zeros((n_out,), dtype= theano.config.floatX) <NEW_LINE> self.b = theano.shared(value= b_values) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.b = theano.shared(value= init_b) <NEW_LINE> <DEDENT> self.output = activation(T.dot(self.input, self.W) + self.b) <NEW_LINE> self.params = [self.W, self.b]
Typical hidden layer of a MLP: units are fully-connected and have sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). NOTE : The nonlinearity used here is tanh Hidden unit activation is given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.matrix/dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer
625941b50c0af96317bb7fed
def read_all_available_shows(self): <NEW_LINE> <INDENT> json_url = ('http://il.srgssr.ch/integrationlayer/1.0/ue/%s/tv/assetGroup/editorialPlayerAlphabetical.json') % BU <NEW_LINE> json_response = json.loads(self.open_url(json_url)) <NEW_LINE> try: <NEW_LINE> <INDENT> show_list = json_response['AssetGroups']['Show'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> log('read_all_available_shows: No shows found.') <NEW_LINE> return [] <NEW_LINE> <DEDENT> if not isinstance(show_list, list) or not show_list: <NEW_LINE> <INDENT> log('read_all_available_shows: No shows found.') <NEW_LINE> return [] <NEW_LINE> <DEDENT> return show_list
Downloads a list of all available RSI shows and returns this list.
625941b556b00c62f0f14460
def set_media(self): <NEW_LINE> <INDENT> media_path = self.files[self.current_file_index] <NEW_LINE> fmt = self.get_fmt(media_path) <NEW_LINE> if fmt.lower() in self.image_fmts: <NEW_LINE> <INDENT> self.media = Image.open(media_path) <NEW_LINE> self.qmedia = ImageQt(self.hologrify(self.media)) <NEW_LINE> <DEDENT> elif fmt.lower() in self.video_fmts: <NEW_LINE> <INDENT> imageio.plugins.ffmpeg.download() <NEW_LINE> if type(self.media) is Video: <NEW_LINE> <INDENT> self.stop() <NEW_LINE> <DEDENT> self.media = imageio.get_reader(media_path, "ffmpeg") <NEW_LINE> self.qmedia = None <NEW_LINE> <DEDENT> self.size_checkbox.setEnabled(True) <NEW_LINE> self.nav_widget.setEnabled(True) <NEW_LINE> self.next_shortcut.setEnabled(True) <NEW_LINE> self.previous_shortcut.setEnabled(True) <NEW_LINE> self.dimensions_shortcut.setEnabled(True) <NEW_LINE> self.refresh()
Sets the current media based on `self.current_file_index`.
625941b5f548e778e58cd37f
def list_hdf_groups(fname): <NEW_LINE> <INDENT> group_type = h5py._hl.group.Group <NEW_LINE> f = h5py.File(fname, 'r') <NEW_LINE> glist = [] <NEW_LINE> for kname in sorted(f.keys()): <NEW_LINE> <INDENT> if type(f[kname]) == group_type: <NEW_LINE> <INDENT> glist += [ kname, ] <NEW_LINE> <DEDENT> <DEDENT> f.close() <NEW_LINE> return glist
Makes a list of hdf groups. Only the 1st level is implemented, TDB: recursive listing. USAGE ===== glist = list_hdf_groups(fname) INPUT ===== fname: filename of hdf file OUTPUT ====== glist: list of group names
625941b5d164cc6175782b51
def _insert_into_or_remove_from_text_archive(self, doc): <NEW_LINE> <INDENT> text_archive_service = get_resource_service('text_archive') <NEW_LINE> if text_archive_service is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if doc.get('state') in ['published', 'corrected']: <NEW_LINE> <INDENT> query = { 'query': { 'filtered': { 'filter': { 'and': [ {'term': {'item_id': doc['item_id']}}, {'term': {'state': 'killed'}} ] } } } } <NEW_LINE> request = ParsedRequest() <NEW_LINE> request.args = {'source': json.dumps(query)} <NEW_LINE> items = super().get(req=request, lookup=None) <NEW_LINE> if items.count() == 0: <NEW_LINE> <INDENT> text_archive_service.post([doc.copy()]) <NEW_LINE> logger.info('Inserted published item {} with headline {} and version {} and expiry {}.'. format(doc['item_id'], doc.get('headline'), doc.get(config.VERSION), doc.get('expiry'))) <NEW_LINE> <DEDENT> <DEDENT> elif doc.get('state') == 'killed': <NEW_LINE> <INDENT> text_archive_service.delete_action({config.ID_FIELD: doc[config.ID_FIELD]}) <NEW_LINE> logger.info('Deleted published item {} with headline {} and version {} and expiry {} ' 'as the state of an article is killed.'.format(doc['item_id'], doc.get('headline'), doc.get(config.VERSION), doc.get('expiry')))
If the state of the article is published, check if it's been killed after publishing. If article has been killed then return otherwise insert into text_archive. If the state of the article is killed then delete the article if the article is available in text_archive.
625941b5d18da76e235322d5
def ex_add_servers_to_load_balancer(self, lb_id, server_ips=[]): <NEW_LINE> <INDENT> body = { 'server_ips': server_ips, } <NEW_LINE> response = self.connection.request( action='load_balancers/%s/server_ips' % lb_id, data=body, method='POST' ) <NEW_LINE> return response.object
Adds server's IP address to load balancer :param lb_id: Load balancer ID :rtype: ``str`` :param server_ips: Array of server IP IDs :rtype: ``list`` of ``str`` :return: Instance of load balancer :rtype: ``dict``
625941b54c3428357757c12f
def stop(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open(self.pidfile,'r') as f: <NEW_LINE> <INDENT> pid = int(f.read().strip()) <NEW_LINE> <DEDENT> <DEDENT> except IOError: <NEW_LINE> <INDENT> pid = None <NEW_LINE> <DEDENT> if not pid: <NEW_LINE> <INDENT> message = "pidfile {0} does not exist. Daemon not running!\n".format(self.pidfile) <NEW_LINE> sys.stderr.write(message) <NEW_LINE> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> while 1: <NEW_LINE> <INDENT> os.kill(pid, SIGTERM) <NEW_LINE> time.sleep(0.1) <NEW_LINE> <DEDENT> <DEDENT> except OSError as err: <NEW_LINE> <INDENT> err = str(err) <NEW_LINE> if err.find("No such process") > 0: <NEW_LINE> <INDENT> if os.path.exists(self.pidfile): <NEW_LINE> <INDENT> os.remove(self.pidfile) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print(str(err)) <NEW_LINE> sys.exit(1)
Stops daemon.
625941b5e64d504609d74644
def predict_chunk(df, clfs): <NEW_LINE> <INDENT> object_id = df['object_id'].values <NEW_LINE> del df['object_id'] <NEW_LINE> preds = None <NEW_LINE> for clf in clfs: <NEW_LINE> <INDENT> if preds is None: <NEW_LINE> <INDENT> preds = clf.predict_proba(df, num_iteration=clf.best_iteration_) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> preds += clf.predict_proba(df, num_iteration=clf.best_iteration_) <NEW_LINE> <DEDENT> <DEDENT> preds = preds / len(clfs) <NEW_LINE> preds_df = pd.DataFrame(preds, columns=['class_{}'.format(s) for s in clfs[0].classes_]) <NEW_LINE> preds_df['object_id'] = object_id <NEW_LINE> return preds_df
Prediction for chunk
625941b5656771135c3eb676
@app.route('/api/v1/event/<event_id>/version', methods=['GET']) <NEW_LINE> @cross_origin() <NEW_LINE> def get_event_version(event_id): <NEW_LINE> <INDENT> version = Version.query.filter_by(event_id=event_id).order_by(Version.id.desc()).first() <NEW_LINE> if version: <NEW_LINE> <INDENT> return jsonify(version.serialize) <NEW_LINE> <DEDENT> return jsonify({"version": []})
Returns event's the latest version
625941b52eb69b55b151c6ae
def fourieruij_iso(beta,ct_over_cl, theta, phi): <NEW_LINE> <INDENT> gamt = (1-beta**2) <NEW_LINE> gaml = (1-(ct_over_cl*beta)**2) <NEW_LINE> uij = np.zeros((3,3,len(theta),len(phi))) <NEW_LINE> sinph = np.sin(phi) <NEW_LINE> cosph = np.cos(phi) <NEW_LINE> sinth = np.sin(theta) <NEW_LINE> denomL = 1-(ct_over_cl*beta*cosph)**2 <NEW_LINE> denomT = 1-(beta*cosph)**2 <NEW_LINE> if beta==0: <NEW_LINE> <INDENT> crat2 = ct_over_cl**2 <NEW_LINE> uij[0,0] = -np.outer(sinth,sinph*(sinph**2 + (2*crat2-1)*cosph**2)) <NEW_LINE> uij[0,1] = np.outer(sinth,cosph*(2*(1-crat2)*sinph**2 + 1)) <NEW_LINE> uij[1,0] = np.outer(sinth,cosph*(2*(1-crat2)*sinph**2 - 1)) <NEW_LINE> uij[1,1] = np.outer(sinth,sinph*(2*(1-crat2)*sinph**2 - 1)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> uij[0,0] = (-2/(beta**2))*np.outer(sinth,sinph*(1/denomL-(1-beta**2/2)/denomT)) <NEW_LINE> uij[0,1] = (2/(beta**2))*np.outer(sinth,cosph*(gaml/denomL-(1-beta**2/2)*gamt/denomT)) <NEW_LINE> uij[1,0] = (2/(beta**2))*np.outer(sinth,cosph*(gaml/denomL-(1-beta**2/2)/denomT)) <NEW_LINE> uij[1,1] = (2/(beta**2))*np.outer(sinth,sinph*(gaml/denomL-(1-beta**2/2)/denomT)) <NEW_LINE> <DEDENT> uij[2,0] = -np.outer(np.cos(theta),sinph*1/denomT) <NEW_LINE> uij[2,1] = np.outer(np.cos(theta),cosph*gamt/denomT) <NEW_LINE> return uij
Compute the dislocation displacement gradient field in the isotropic limit in Fourier space multiplied by iq/b (the radial coordinate over the magnitude of the Burgers vector), i.e. we only return the dependence on the (discretized) polar angle phi in Fourier space, and hence the result is a 3x3xNthetaxNphi dimensional array. Required input parameters are: the dislocation velocity beta in units of transverse sound speed, the ratio of transverse to longitudinal sound speed, and two arrays encoding the discretized dependence on the angle theta between dislocation line and Burgers vector and the polar angle phi.
625941b545492302aab5e0c3
def setUp(self): <NEW_LINE> <INDENT> mynote = TextNote() <NEW_LINE> mynote.content = '1st' <NEW_LINE> mynote.is_published = False <NEW_LINE> mynote.save() <NEW_LINE> TextNote.objects.create( content='2nd', is_published=True, ) <NEW_LINE> TextNote.objects.create( content='3rd', is_published=False, ) <NEW_LINE> TextNote.objects.create( content='4th', is_published=True, ) <NEW_LINE> TextNote.objects.create( content='5th', is_published=True, )
create TextNotes objects, publish 2,4,5
625941b5091ae35668666d6a
def parse_apx_xml(self, fs_apx_pn): <NEW_LINE> <INDENT> assert fs_apx_pn <NEW_LINE> l_data_file = QtCore.QFile(fs_apx_pn) <NEW_LINE> assert l_data_file is not None <NEW_LINE> l_data_file.open(QtCore.QIODevice.ReadOnly) <NEW_LINE> if not l_data_file.isOpen(): <NEW_LINE> <INDENT> l_log = logging.getLogger("CApxData::make_apx") <NEW_LINE> l_log.setLevel(logging.CRITICAL) <NEW_LINE> l_log.critical(u"<E01: erro na abertura de {}.".format(fs_apx_pn)) <NEW_LINE> l_evt = events.CQuit() <NEW_LINE> assert l_evt <NEW_LINE> self.__event.post(l_evt) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> l_xdoc_apx = QtXml.QDomDocument("aproximacoes") <NEW_LINE> assert l_xdoc_apx is not None <NEW_LINE> if not l_xdoc_apx.setContent(l_data_file): <NEW_LINE> <INDENT> l_data_file.close() <NEW_LINE> l_log = logging.getLogger("CApxData::make_apx") <NEW_LINE> l_log.setLevel(logging.CRITICAL) <NEW_LINE> l_log.critical(u"<E02: falha no parse de {}.".format(fs_apx_pn)) <NEW_LINE> l_evt = events.CQuit() <NEW_LINE> assert l_evt <NEW_LINE> self.__event.post(l_evt) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> l_data_file.close() <NEW_LINE> l_elem_root = l_xdoc_apx.documentElement() <NEW_LINE> assert l_elem_root is not None <NEW_LINE> ldct_root = parser.parse_root_element(l_elem_root) <NEW_LINE> l_node_list = l_elem_root.elementsByTagName("aproximacao") <NEW_LINE> for li_ndx in xrange(l_node_list.length()): <NEW_LINE> <INDENT> ldct_data = {} <NEW_LINE> ldct_data["breakpoints"] = [] <NEW_LINE> l_element = l_node_list.at(li_ndx).toElement() <NEW_LINE> assert l_element is not None <NEW_LINE> if l_element.hasAttribute("nApx"): <NEW_LINE> <INDENT> ldct_data["nApx"] = int(l_element.attribute("nApx")) <NEW_LINE> <DEDENT> l_node = l_element.firstChild() <NEW_LINE> assert l_node is not None <NEW_LINE> while not l_node.isNull(): <NEW_LINE> <INDENT> l_element = l_node.toElement() <NEW_LINE> assert l_element is not None <NEW_LINE> if not l_element.isNull(): <NEW_LINE> <INDENT> ldct_tmp = parser.parse_aproximacao(l_element) <NEW_LINE> if "breakpoint" in ldct_tmp: <NEW_LINE> <INDENT> ldct_data["breakpoints"].append(ldct_tmp["breakpoint"]) <NEW_LINE> del ldct_tmp["breakpoint"] <NEW_LINE> <DEDENT> ldct_data.update(ldct_tmp) <NEW_LINE> <DEDENT> l_node = l_node.nextSibling() <NEW_LINE> assert l_node is not None <NEW_LINE> <DEDENT> self.make_apx(ldct_root, ldct_data)
carrega o arquivo de procedimentos de aproximação @param fs_apx_pn: pathname do arquivo em disco
625941b5adb09d7d5db6c597
def i_set(self, a, b): <NEW_LINE> <INDENT> self.registers[self.eval_reg(a)] = self.eval_num(b)
Set register a to value of <b>.
625941b55fdd1c0f98dc0035
def build_f(self, f, part, t): <NEW_LINE> <INDENT> if not isinstance(part, particles): <NEW_LINE> <INDENT> raise ProblemError('something is wrong during build_f, got %s' % type(part)) <NEW_LINE> <DEDENT> N = self.params.nparts <NEW_LINE> rhs = acceleration(self.params.nparts) <NEW_LINE> for n in range(N): <NEW_LINE> <INDENT> rhs.values[3 * n:3 * n + 3] = part.q[n] / part.m[n] * ( f.elec.values[3 * n:3 * n + 3] + np.cross(part.vel.values[3 * n:3 * n + 3], f.magn.values[3 * n:3 * n + 3])) <NEW_LINE> <DEDENT> return rhs
Helper function to assemble the correct right-hand side out of B and E field Args: f (dtype_f): the field values part (dtype_u): the current particles data t (float): the current time Returns: acceleration: correct RHS of type acceleration
625941b5cad5886f8bd26de6
def field_thick_cover_diffraction_matrix(shape, ks, d = 1., epsv = (1,1,1), epsa = (0,0,0.), d_cover = 0, epsv_cover = (1.,1.,1.), epsa_cover = (0.,0.,0.), mode = "b", betamax = BETAMAX, out = None): <NEW_LINE> <INDENT> ks = np.asarray(ks, dtype = FDTYPE) <NEW_LINE> epsv = np.asarray(epsv, dtype = CDTYPE) <NEW_LINE> epsa = np.asarray(epsa, dtype = FDTYPE) <NEW_LINE> alpha, f, fi = diffraction_alphaffi(shape, ks, epsv = epsv, epsa = epsa, betamax = betamax) <NEW_LINE> alpha0, f0 = diffraction_alphaf(shape, ks, epsv = epsv_cover ,epsa = epsa_cover, betamax = betamax) <NEW_LINE> alphac = alpha0 - alpha / 1.5 <NEW_LINE> alphac = alphac - alphac[...,0,0,:][...,None,None,:] <NEW_LINE> kd = ks * d_cover <NEW_LINE> pmatc = phase_matrix(alphac, kd , mode = mode) <NEW_LINE> kd = ks * d <NEW_LINE> pmat = phase_matrix(alpha, kd , mode = mode) <NEW_LINE> pmat = pmat * pmatc <NEW_LINE> return dotmdm(f,pmat,fi,out = out)
Build field diffraction matrix.
625941b523849d37ff7b2e96
def getListData(request, models): <NEW_LINE> <INDENT> pageNo = request.GET.get('pageNo', '') <NEW_LINE> pageNo = 1 if (pageNo == '') else int(pageNo) <NEW_LINE> sortCol = request.GET.get('sortCol', '') <NEW_LINE> sortCol = '-name' if sortCol != '' and sortCol == request.session.get( 'sortCol', '') else 'name' <NEW_LINE> request.session['sortCol'] = sortCol <NEW_LINE> masterList = models.objects.all().exclude(pk='0').order_by(sortCol) <NEW_LINE> paginator = Paginator(masterList, settings.PAGE_SIZE) <NEW_LINE> page_range = paginator.page_range <NEW_LINE> if pageNo != 0: <NEW_LINE> <INDENT> page_data = paginator.page(int(pageNo)) <NEW_LINE> masterList = page_data.object_list <NEW_LINE> <DEDENT> return masterList, page_range
pagination for master config
625941b54d74a7450ccd3fc6
def create(self, allow_existing=False): <NEW_LINE> <INDENT> if not allow_existing: <NEW_LINE> <INDENT> if self.exists(): <NEW_LINE> <INDENT> raise google.api_core.exceptions.AlreadyExists(f"Topic {self.path!r} already exists.") <NEW_LINE> <DEDENT> <DEDENT> if not self.exists(): <NEW_LINE> <INDENT> MESSAGES[get_service_id(self.path)] = []
Register the topic in the global messages dictionary. :param bool allow_existing: if True, don't raise an error if the topic already exists :raise google.api_core.exceptions.AlreadyExists: if the topic already exists :return None:
625941b50a366e3fb873e61a
def __idiv__(self, other): <NEW_LINE> <INDENT> if isinstance(other, GPUArray): <NEW_LINE> <INDENT> return self._div(other, self) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if other == 1: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._axpbz(1/other, 0, self)
Divides an array by an array or a scalar:: x /= n
625941b521bff66bcd684759
@P.cluster_runnable <NEW_LINE> def getTimepointIntersections(infiles, n_times, outfile): <NEW_LINE> <INDENT> file_dictionary = {} <NEW_LINE> for infile in infiles: <NEW_LINE> <INDENT> gene_list = [] <NEW_LINE> with IOTools.openFile(infile, "rb") as gene_file: <NEW_LINE> <INDENT> gene_list = gene_file.read().split("\n") <NEW_LINE> <DEDENT> gene_list.remove('') <NEW_LINE> file_dictionary[infile] = gene_list <NEW_LINE> <DEDENT> time_point_dict = {} <NEW_LINE> for tme in n_times: <NEW_LINE> <INDENT> tpoints = [t for t in file_dictionary.keys() if re.search(str(tme), t)] <NEW_LINE> time_set = set(file_dictionary[tpoints[0]]) <NEW_LINE> for i in range(1, len(tpoints)): <NEW_LINE> <INDENT> gene_list = file_dictionary[tpoints[i]] <NEW_LINE> time_set = time_set.intersection(gene_list) <NEW_LINE> <DEDENT> time_point_dict[str(tme)] = time_set <NEW_LINE> <DEDENT> core_set = set(time_point_dict[str(n_times[0])]) <NEW_LINE> for j in range(1, len(time_point_dict.keys())): <NEW_LINE> <INDENT> core_set = core_set.intersection(time_point_dict[str(n_times[j])]) <NEW_LINE> <DEDENT> core_list = list(core_set) <NEW_LINE> core_genes = [ge for ge in list(core_list) if re.search("EN", ge)] <NEW_LINE> core_lncs = [lc for lc in list(core_list) if re.search("LNC", lc)] <NEW_LINE> mg = mygene.MyGeneInfo() <NEW_LINE> out_core = mg.querymany(core_genes, scopes="ensemblgene", fields="symbol", returnall=True)['out'] <NEW_LINE> out_df = pd.DataFrame(out_core) <NEW_LINE> out_df.drop(['notfound'], inplace=True, axis=1) <NEW_LINE> out_df.index = out_df['query'] <NEW_LINE> out_df.drop_duplicates(subset='query', take_last=True, inplace=True) <NEW_LINE> out_df.drop(['query'], inplace=True, axis=1) <NEW_LINE> out_df.to_csv(outfile, sep="\t", index_label="gene_id") <NEW_LINE> condition = outfile.split("-")[0] <NEW_LINE> lnc_out = "%s-timepoint_intersection_lncRNAs.tsv" % condition <NEW_LINE> with IOTools.openFile(lnc_out, "w") as lnc_file: <NEW_LINE> <INDENT> lnc_file.write("lncRNA_id") <NEW_LINE> for lncrna in core_lncs: <NEW_LINE> <INDENT> lnc_file.write("%s\n" % lncrna)
Take first n timepoints and intersect for each in vitro activation condition.
625941b592d797404e303f8e
def split_patients(path, train=0.7, test=0.15, validate=0.15, use_first=None): <NEW_LINE> <INDENT> assert train + test + validate == 1.0, "Train, Test, and Validate must add to 1.0." <NEW_LINE> result = { 'train': [], 'test': [], 'validate': [], } <NEW_LINE> folder = pathlib.Path(path) <NEW_LINE> patients = [patient for patient in folder.iterdir() if patient.is_dir()] <NEW_LINE> if use_first is not None: <NEW_LINE> <INDENT> patients = patients[0:use_first] <NEW_LINE> <DEDENT> num_patients = len(patients) <NEW_LINE> num_train = math.floor(num_patients * train) <NEW_LINE> num_test = math.floor(num_patients * test) <NEW_LINE> num_validate = num_patients - (num_train + num_test) <NEW_LINE> for i in range(0, num_train): <NEW_LINE> <INDENT> result['train'].append(patients.pop(0)) <NEW_LINE> <DEDENT> for i in range(0, num_test): <NEW_LINE> <INDENT> result['test'].append(patients.pop(0)) <NEW_LINE> <DEDENT> for i in range(0, num_validate): <NEW_LINE> <INDENT> result['validate'].append(patients.pop(0)) <NEW_LINE> <DEDENT> print(f"Actual Patient Split is train: {num_train} ({num_train/num_patients: 0.2f}) test: {num_test} " f"({num_test/num_patients: 0.2f}) " f"validate: {num_validate} ({num_validate/num_patients: 0.2f}) ") <NEW_LINE> return result
Divide the data into train, test, and validate on the patient level. This prevents unfair bias from preview into the test/validate sets. :param use_first: int optional, only use the first use_first number of patients in this path. :param path: path-like to the directory containing patient directories :param train: float 0.0 - 1.0 amount of total data to allocate to training :param test: float 0.0 - 1.0 amount of total data to allocate to testing :param validate: float 0.0 - 1.0 amount of total data to allocate to validation :return: dict {'train': [...], 'test': [...], 'validate': [...]}
625941b5b57a9660fec33683
def draw_pattack(self): <NEW_LINE> <INDENT> if self.visible: <NEW_LINE> <INDENT> self.screen.blit(self.image, self.rect)
Draws an attack
625941b55fdd1c0f98dc0036
def _get_maybe_error_index(scores, y_score, median, threshold=1.4): <NEW_LINE> <INDENT> scores = scores.flatten() <NEW_LINE> maybe_error_indices = np.where((y_score > threshold) & (scores < median)) <NEW_LINE> maybe_error_scores = scores[maybe_error_indices] <NEW_LINE> return list(maybe_error_indices[0]), maybe_error_scores
取疑似错字的位置,通过平均绝对离差(MAD) :param scores: np.array :param threshold: 阈值越小,得到疑似错别字越多 :return:
625941b54d74a7450ccd3fc7
def test_collect_subscriber(self): <NEW_LINE> <INDENT> result = collect_subscriber.delay(1) <NEW_LINE> self.assertEqual(result.successful(), True)
Test that the ``collect_subscriber`` task runs with no errors, and returns the correct result.
625941b521bff66bcd68475a
def _decode_newlines(s): <NEW_LINE> <INDENT> def repl(m): <NEW_LINE> <INDENT> return LinewiseSerializer._subst[m.group(1)] <NEW_LINE> <DEDENT> return LinewiseSerializer._escape_re.sub(repl, s)
Return s with newlines and backslashes decoded. This function reverses the encoding of _encode_newlines().
625941b510dbd63aa1bd29b4
def p_Iterable(self, p): <NEW_LINE> <INDENT> location = self.getLocation(p, 2) <NEW_LINE> identifier = IDLUnresolvedIdentifier( location, "__iterable", allowDoubleUnderscore=True ) <NEW_LINE> if len(p) > 6: <NEW_LINE> <INDENT> keyType = p[3] <NEW_LINE> valueType = p[5] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> keyType = None <NEW_LINE> valueType = p[3] <NEW_LINE> <DEDENT> p[0] = IDLIterable(location, identifier, keyType, valueType, self.globalScope())
Iterable : ITERABLE LT TypeWithExtendedAttributes GT SEMICOLON | ITERABLE LT TypeWithExtendedAttributes COMMA TypeWithExtendedAttributes GT SEMICOLON
625941b54527f215b584c25f
def __setattr__(self, name, value): <NEW_LINE> <INDENT> if self._initialized and not hasattr(self, name): <NEW_LINE> <INDENT> raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) <NEW_LINE> <DEDENT> super(Sonar, self).__setattr__(name, value)
Don't allow setting non-existent attributes on this class
625941b5293b9510aa2c309d
def canonize(self, string, dummy_state): <NEW_LINE> <INDENT> return string
Returns the canonized string (student answer). To return an error: return False, "Syntax error"
625941b555399d3f055884b7
def comp_model(params, plate): <NEW_LINE> <INDENT> kn = params[0] <NEW_LINE> b = np.asarray(params[1:]) <NEW_LINE> neighbourhood = plate.neighbourhood <NEW_LINE> mask = plate.mask <NEW_LINE> neigh_nos = plate.neigh_nos <NEW_LINE> def growth(amounts, times): <NEW_LINE> <INDENT> np.maximum(0, amounts, out=amounts) <NEW_LINE> C, N = np.split(amounts, 2) <NEW_LINE> N_diffs = neigh_nos*N - np.sum(mask*N, axis=1) <NEW_LINE> C_rates = b*C*N <NEW_LINE> N_rates = -C_rates - kn*N_diffs <NEW_LINE> rates = np.append(C_rates, N_rates) <NEW_LINE> return rates <NEW_LINE> <DEDENT> return growth
Return a function for running the competition model. Args ---- params : list Model parameters excluding intial amounts. plate : Plate A corresponding plate with attributes describing the neighbourhood.
625941b57b25080760e39260
def render_wavedrom_image(sphinx, node): <NEW_LINE> <INDENT> image_format = determine_format(sphinx.builder.supported_image_types) <NEW_LINE> if image_format is None: <NEW_LINE> <INDENT> raise SphinxError("Cannot determine a suitable output format") <NEW_LINE> <DEDENT> bname = "wavedrom-{}".format(uuid4()) <NEW_LINE> outpath = os.path.join(sphinx.builder.outdir, sphinx.builder.imagedir) <NEW_LINE> if sphinx.builder.config.render_using_wavedrompy: <NEW_LINE> <INDENT> imgname = render_wavedrom_py(node, outpath, bname, image_format) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> imgname = render_wavedrom_cli(sphinx, node, outpath, bname, image_format) <NEW_LINE> <DEDENT> image_node = node['image_node'] <NEW_LINE> image_node['uri'] = os.path.join(sphinx.builder.imgpath, imgname) <NEW_LINE> node.append(image_node)
Visit the wavedrom node
625941b545492302aab5e0c4
def dataReceived(self, data): <NEW_LINE> <INDENT> self.log.debug("Data received: %s" % data) <NEW_LINE> self.buffer.append(data) <NEW_LINE> for frame in self.buffer: <NEW_LINE> <INDENT> self.log.debug("Processing frame: %s" % frame) <NEW_LINE> self.engine.process_frame(frame)
Twisted calls this method when data is received. Note: The data may not be not be a complete frame or may be more than one frame.
625941b51f5feb6acb0c495a
def __init__(self): <NEW_LINE> <INDENT> self._vtm_go_epg = VtmGoEpg()
Initialise object
625941b571ff763f4b549492
def setProducer(self, _oProducer): <NEW_LINE> <INDENT> if not isinstance(_oProducer, Producer): <NEW_LINE> <INDENT> raise RuntimeError('Producer is not a subclass of LogWatcher.Producers.Producer') <NEW_LINE> <DEDENT> self.__oProducer = _oProducer
Set the data producer. @param Producer _oProducer Log data producer
625941b56fb2d068a760eea6
def __init__(self, *args): <NEW_LINE> <INDENT> this = _filter_swig.new_fir_filter_fcc_sptr(*args) <NEW_LINE> try: self.this.append(this) <NEW_LINE> except: self.this = this
__init__(boost::shared_ptr<(gr::filter::fir_filter_fcc)> self) -> fir_filter_fcc_sptr __init__(boost::shared_ptr<(gr::filter::fir_filter_fcc)> self, fir_filter_fcc p) -> fir_filter_fcc_sptr
625941b5cb5e8a47e48b78b4
def limit(self, *args): <NEW_LINE> <INDENT> return self._new(self.rows, self.cols, lambda i, j: self[i, j].limit(*args))
Calculate the limit of each element in the matrix. Examples ======== >>> import sympy >>> from sympy.abc import x, y >>> M = sympy.matrices.Matrix([[x, y], [1, 0]]) >>> M.limit(x, 2) [2, y] [1, 0] See Also ======== integrate diff
625941b5462c4b4f79d1d4d4
def generate_article_info(parsed_result, feed_id): <NEW_LINE> <INDENT> article_info = parsed_result <NEW_LINE> article_info['feed_id'] = feed_id <NEW_LINE> return article_info
:param parsed_result: dict returned from parse_entry() :param feee_id: feed ID :return:
625941b5be8e80087fb20a53
def possibilities_calculator(self): <NEW_LINE> <INDENT> output = 0 <NEW_LINE> for house in self.grid.houses: <NEW_LINE> <INDENT> output += house.output <NEW_LINE> <DEDENT> cap_kind = [450, 900, 1800] <NEW_LINE> options = [] <NEW_LINE> max_num = [0, 0, 0] <NEW_LINE> for i in range(len(cap_kind)): <NEW_LINE> <INDENT> max_num[i] = round(output / cap_kind[i]) <NEW_LINE> <DEDENT> i = 0 <NEW_LINE> j = 0 <NEW_LINE> k = 0 <NEW_LINE> for i in range(max_num[2] + 1): <NEW_LINE> <INDENT> for j in range(max_num[1] + 1): <NEW_LINE> <INDENT> for k in range(max_num[0] + 1): <NEW_LINE> <INDENT> total_cap = i * cap_kind[2] + j * cap_kind[1] + k * cap_kind[0] <NEW_LINE> if total_cap >= output and total_cap < (output + cap_kind[0]) and [k, j, i] not in options: <NEW_LINE> <INDENT> options.append([k, j, i]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return options
A function that calculates the least amount of battery combinations that are possible, taking into account the capacity
625941b5f7d966606f6a9e0d
def addDigits(self, num): <NEW_LINE> <INDENT> if num == 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> return num-9*int((num-1)/9)
:type num: int :rtype: int https://en.wikipedia.org/wiki/Digital_root
625941b5796e427e537b03c6
def main(): <NEW_LINE> <INDENT> args = [] <NEW_LINE> project, account = bootstrapping.GetActiveProjectAndAccount() <NEW_LINE> pass_credentials = ( properties.VALUES.core.pass_credentials_to_gsutil.GetBool() and not properties.VALUES.auth.disable_credentials.GetBool()) <NEW_LINE> _MaybeAddBotoOption(args, 'GSUtil', 'default_project_id', project) <NEW_LINE> if pass_credentials: <NEW_LINE> <INDENT> encoding.SetEncodedValue( os.environ, 'CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL', '1') <NEW_LINE> if account in c_gce.Metadata().Accounts(): <NEW_LINE> <INDENT> _MaybeAddBotoOption(args, 'GoogleCompute', 'service_account', 'default') <NEW_LINE> encoding.SetEncodedValue( os.environ, 'CLOUDSDK_PASSED_GCE_SERVICE_ACCOUNT_TO_GSUTIL', '1') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> legacy_config_path = config.Paths().LegacyCredentialsGSUtilPath(account) <NEW_LINE> boto_config = encoding.GetEncodedValue(os.environ, 'BOTO_CONFIG', '') <NEW_LINE> boto_path = encoding.GetEncodedValue(os.environ, 'BOTO_PATH', '') <NEW_LINE> if boto_config: <NEW_LINE> <INDENT> boto_path = os.pathsep.join([boto_config, legacy_config_path]) <NEW_LINE> <DEDENT> elif boto_path: <NEW_LINE> <INDENT> boto_path = os.pathsep.join([boto_path, legacy_config_path]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path_parts = ['/etc/boto.cfg', os.path.expanduser(os.path.join('~', '.boto')), legacy_config_path] <NEW_LINE> boto_path = os.pathsep.join(path_parts) <NEW_LINE> <DEDENT> encoding.SetEncodedValue(os.environ, 'BOTO_CONFIG', None) <NEW_LINE> encoding.SetEncodedValue(os.environ, 'BOTO_PATH', boto_path) <NEW_LINE> <DEDENT> <DEDENT> encoding.SetEncodedValue( os.environ, 'GA_CID', metrics.GetCIDIfMetricsEnabled()) <NEW_LINE> proxy_params = properties.VALUES.proxy <NEW_LINE> proxy_address = proxy_params.address.Get() <NEW_LINE> if proxy_address: <NEW_LINE> <INDENT> _MaybeAddBotoOption(args, 'Boto', 'proxy', proxy_address) <NEW_LINE> _MaybeAddBotoOption(args, 'Boto', 'proxy_port', proxy_params.port.Get()) <NEW_LINE> _MaybeAddBotoOption(args, 'Boto', 'proxy_rdns', proxy_params.rdns.GetBool()) <NEW_LINE> _MaybeAddBotoOption(args, 'Boto', 'proxy_user', proxy_params.username.Get()) <NEW_LINE> _MaybeAddBotoOption(args, 'Boto', 'proxy_pass', proxy_params.password.Get()) <NEW_LINE> <DEDENT> disable_ssl = properties.VALUES.auth.disable_ssl_validation.GetBool() <NEW_LINE> _MaybeAddBotoOption(args, 'Boto', 'https_validate_certificates', None if disable_ssl is None else not disable_ssl) <NEW_LINE> _MaybeAddBotoOption(args, 'Boto', 'ca_certificates_file', properties.VALUES.core.custom_ca_certs_file.Get()) <NEW_LINE> bootstrapping.ExecutePythonTool('platform/gsutil', 'gsutil', *args)
Launches gsutil.
625941b5566aa707497f437f
def set_times(event_list): <NEW_LINE> <INDENT> for event in event_list: <NEW_LINE> <INDENT> event.start = event.event_date_start.strftime('%H:%M') <NEW_LINE> event.end = event.event_date_end.strftime('%H:%M') <NEW_LINE> <DEDENT> return event_list
Formates time
625941b5d58c6744b4257a65
def test_get(self): <NEW_LINE> <INDENT> self.flash['message'] = 'Message' <NEW_LINE> self.storage.set(self.flash, self.request, self.response) <NEW_LINE> self.assertEqual(None, self.storage.get(self.request)) <NEW_LINE> self._transfer_cookies_from_response_to_request() <NEW_LINE> self.assertEqual('Message', self.storage.get(self.request)['message'])
CookieStorage: should return the stored object.
625941b53c8af77a43ae35a2
def consumer_commit_for_times(consumer, partition_to_offset, atomic=False): <NEW_LINE> <INDENT> no_offsets = set() <NEW_LINE> for tp, offset in six.iteritems(partition_to_offset): <NEW_LINE> <INDENT> if offset is None: <NEW_LINE> <INDENT> logging.error( "No offsets found for topic-partition {tp}. Either timestamps not supported" " for the topic {tp}, or no offsets found after timestamp specified, or there is no" " data in the topic-partition.".format(tp=tp), ) <NEW_LINE> no_offsets.add(tp) <NEW_LINE> <DEDENT> <DEDENT> if atomic and len(no_offsets) > 0: <NEW_LINE> <INDENT> logging.error( "Commit aborted; offsets were not found for timestamps in" " topics {}".format(",".join([str(tp) for tp in no_offsets])), ) <NEW_LINE> return <NEW_LINE> <DEDENT> offsets_metadata = { tp: OffsetAndMetadata(partition_to_offset[tp].offset, metadata=None) for tp in six.iterkeys(partition_to_offset) if tp not in no_offsets } <NEW_LINE> if len(offsets_metadata) != 0: <NEW_LINE> <INDENT> consumer.commit(offsets_metadata)
Commits offsets to Kafka using the given KafkaConsumer and offsets, a mapping of TopicPartition to Unix Epoch milliseconds timestamps. Arguments: consumer (KafkaConsumer): an initialized kafka-python consumer. partitions_to_offset (dict TopicPartition: OffsetAndTimestamp): Map of TopicPartition to OffsetAndTimestamp. Return value of offsets_for_times. atomic (bool): Flag to specify whether the commit should fail if offsets are not found for some TopicPartition: timestamp pairs.
625941b5b545ff76a8913c24
def get_res(self, context, element=None, ui=False) -> "BFResult or None": <NEW_LINE> <INDENT> if DEBUG: print("BFDS: BFObject.get_res:", self.idname) <NEW_LINE> return BFCommon.get_res(self, context, self, ui)
Get full BFResult (children and mine). On error raise BFException.
625941b5442bda511e8be22b
def get_user_input(input_value): <NEW_LINE> <INDENT> if input_value: <NEW_LINE> <INDENT> if check_input_validity(input_value): <NEW_LINE> <INDENT> return input_value <NEW_LINE> <DEDENT> <DEDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> current_code = raw_input("Your code (max len 13 char): ") <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> current_code = input("Your code (max len 13 char): ") <NEW_LINE> <DEDENT> if check_input_validity(current_code): <NEW_LINE> <INDENT> return current_code
Question user for input :return:
625941b5d7e4931a7ee9dd20
def vote_positive(self): <NEW_LINE> <INDENT> return self.upvotes > self.downvotes
return the weight of all votes, positive if upvotes are more, negative if downvotes are more
625941b5adb09d7d5db6c598