code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def test_allow_request(self): <NEW_LINE> <INDENT> ruleset = Mock() <NEW_LINE> ruleset.evaluate = Mock(return_value=True) <NEW_LINE> IPFilter(self.app, ruleset=ruleset) <NEW_LINE> response = self.client.get("/") <NEW_LINE> self.assertEqual(response.status_code, 200) <NEW_LINE> ruleset.evaluate.assert_called()
Test allowing a request. Set up an IPFilter that will allow all requests and verify that a request is actually allowed. We expect the response to have an HTTP status of 200.
625941b799cbb53fe6792a29
def __init__(self, size, type): <NEW_LINE> <INDENT> self.size = size <NEW_LINE> self.type = type <NEW_LINE> self.solutions_count = 0 <NEW_LINE> self.solutions = [] <NEW_LINE> if self.type == 1: <NEW_LINE> <INDENT> self.solve_brute_force() <NEW_LINE> <DEDENT> elif self.type == 2: <NEW_LINE> <INDENT> self.solutions = self.solve_backtracking(self.size) <NEW_LINE> self.solutions_count = len(self.solutions)
size = Integer type = Integer
625941b7be383301e01b52cf
def test_0016(self): <NEW_LINE> <INDENT> h = int(data_user.du(self, 16, self.ID)) <NEW_LINE> data_use = eval(data_user.du(self, h, self.data)) <NEW_LINE> host = (data_user.du(self, h, self.host)) <NEW_LINE> a ,b = Newuser.new_use(self,data_use,host,rs) <NEW_LINE> vale = a.decode('utf-8') <NEW_LINE> print (vale) <NEW_LINE> data_user.xg(self, h, self.ststus_code, b) <NEW_LINE> data_user.xg(self, h, self.result, vale) <NEW_LINE> data_user.xg(self, h, self.execute, "是") <NEW_LINE> Newuser.is_new(self, a.decode('utf-8'),h)
安全验证密码为空
625941b7a17c0f6771cbde96
def __init__(self): <NEW_LINE> <INDENT> root = Tk() <NEW_LINE> Frame.__init__(self, root) <NEW_LINE> events.ModeDeferrer.__init__(self) <NEW_LINE> root.title("Sudoku Solver") <NEW_LINE> styles.setup(root) <NEW_LINE> self.clear_text = StringVar(self, "") <NEW_LINE> self.step_text = StringVar(self, "") <NEW_LINE> self.end_text = StringVar(self, "") <NEW_LINE> self.boxes = dict() <NEW_LINE> self.highlighted_box = None <NEW_LINE> self.status_text = StringVar(self, "") <NEW_LINE> self._init_ui() <NEW_LINE> self._init_events() <NEW_LINE> self.mode = events.InitializingMode(self)
Construct a MainFrame with parent master. Args: master: The parent frame.
625941b7b5575c28eb68de3f
def test_post_calls_requests(self): <NEW_LINE> <INDENT> with patch('requests.post') as requests_mock, response_context() as response_mock: <NEW_LINE> <INDENT> requests_mock.return_value = response_mock <NEW_LINE> request = JsonApiRequest('http://www.example.com', 'admin', 's3cr3t') <NEW_LINE> data = request.post('ham/eggs', {'ham': True}) <NEW_LINE> requests_mock.assert_called_with( 'http://www.example.com/api/ham/eggs', '{"ham": true}', headers={'Content-type': 'application/json', 'Accept': 'application/json'}, auth=('admin', 's3cr3t')) <NEW_LINE> self.assertEqual(data, {'spam': True})
JsonApiRequest.post should call requests.post and return the JSON result
625941b71b99ca400220a8f3
@cliutils.arg( '-c', '--chassis', dest='chassis_uuid', metavar='<chassis>', help='UUID of the chassis that this node belongs to.') <NEW_LINE> @cliutils.arg( '--chassis_uuid', help=argparse.SUPPRESS) <NEW_LINE> @cliutils.arg( '-d', '--driver', metavar='<driver>', required=True, help='Driver used to control the node [REQUIRED].') <NEW_LINE> @cliutils.arg( '-i', '--driver-info', metavar='<key=value>', action='append', help='Key/value pair used by the driver, such as out-of-band management ' 'credentials. Can be specified multiple times.') <NEW_LINE> @cliutils.arg( '--driver_info', action='append', help=argparse.SUPPRESS) <NEW_LINE> @cliutils.arg( '-p', '--properties', metavar='<key=value>', action='append', help='Key/value pair describing the physical characteristics of the ' 'node. This is exported to Nova and used by the scheduler. ' 'Can be specified multiple times.') <NEW_LINE> @cliutils.arg( '-e', '--extra', metavar='<key=value>', action='append', help="Record arbitrary key/value metadata. " "Can be specified multiple times.") <NEW_LINE> @cliutils.arg( '-u', '--uuid', metavar='<uuid>', help="Unique UUID for the node.") <NEW_LINE> @cliutils.arg( '-n', '--name', metavar='<name>', help="Unique name for the node.") <NEW_LINE> def do_node_create(cc, args): <NEW_LINE> <INDENT> field_list = ['chassis_uuid', 'driver', 'driver_info', 'properties', 'extra', 'uuid', 'name'] <NEW_LINE> fields = dict((k, v) for (k, v) in vars(args).items() if k in field_list and not (v is None)) <NEW_LINE> fields = utils.args_array_to_dict(fields, 'driver_info') <NEW_LINE> fields = utils.args_array_to_dict(fields, 'extra') <NEW_LINE> fields = utils.args_array_to_dict(fields, 'properties') <NEW_LINE> node = cc.node.create(**fields) <NEW_LINE> data = dict([(f, getattr(node, f, '')) for f in field_list]) <NEW_LINE> cliutils.print_dict(data, wrap=72)
Register a new node with the Ironic service.
625941b766656f66f7cbbfec
def tokenize(unit_str): <NEW_LINE> <INDENT> tokens = [] <NEW_LINE> char_index = 0 <NEW_LINE> while char_index < len(unit_str): <NEW_LINE> <INDENT> for regex, token_type in TOKEN_REGEXES.items(): <NEW_LINE> <INDENT> match = re.match(regex, unit_str[char_index:]) <NEW_LINE> if match is not None: <NEW_LINE> <INDENT> token_text = match.group(0) <NEW_LINE> tokens.append(Token(token_type, token_text)) <NEW_LINE> char_index += len(token_text) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> err = "unrecognized token at pos {} of '{}'".format( char_index, unit_str) <NEW_LINE> raise ValueError(err) <NEW_LINE> <DEDENT> <DEDENT> return tokens
Returns a list of Tokens from the given unit string.
625941b7ec188e330fd5a5e9
def record(self, rs=4, fname="back.wav"): <NEW_LINE> <INDENT> CHUNK = 1024 <NEW_LINE> FORMAT = pyaudio.paInt16 <NEW_LINE> CHANNELS = 1 <NEW_LINE> RATE = 16000 <NEW_LINE> RECORD_SECONDS = rs <NEW_LINE> p = pyaudio.PyAudio() <NEW_LINE> stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) <NEW_LINE> stream.start_stream() <NEW_LINE> print("* 开始录音<<<<<<") <NEW_LINE> frames = [] <NEW_LINE> for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): <NEW_LINE> <INDENT> data = stream.read(CHUNK) <NEW_LINE> frames.append(data) <NEW_LINE> <DEDENT> stream.stop_stream() <NEW_LINE> stream.close() <NEW_LINE> p.terminate() <NEW_LINE> wf = wave.open(fname, 'wb') <NEW_LINE> wf.setnchannels(CHANNELS) <NEW_LINE> wf.setsampwidth(p.get_sample_size(FORMAT)) <NEW_LINE> wf.setframerate(RATE) <NEW_LINE> wf.writeframes(b''.join(frames)) <NEW_LINE> wf.close() <NEW_LINE> print("* 结束录音<<<<<<")
录音
625941b730bbd722463cbc05
@jit <NEW_LINE> def objective_function(matrix): <NEW_LINE> <INDENT> return abs(np.sum(matrix))
Função objetivo para a avaliação da solução atual. :param matrix: Matriz a ser avaliada. :return: Retorna a soma de todos os elementos da metriz.
625941b767a9b606de4a7cff
def update_dic(D, A, B): <NEW_LINE> <INDENT> for j in range(D.shape[1]): <NEW_LINE> <INDENT> u = (B[:, j] - D.dot(A[:, j])) <NEW_LINE> u += A[j, j] * D[:, j] <NEW_LINE> u_norm = np.sqrt(u.T.dot(u)) <NEW_LINE> if u_norm < 1e-20: <NEW_LINE> <INDENT> u = np.random.rand(D.shape[0]) <NEW_LINE> u_norm = np.sqrt(u.T.dot(u)) <NEW_LINE> A[j, :] = 0.0 <NEW_LINE> <DEDENT> u /= u_norm <NEW_LINE> D[:, j] = u <NEW_LINE> <DEDENT> return D
Updates dictionary such that D minimizes 1/2 * Tr(Dt.D.A) - Tr(Dt.B) :param D: dictionary :param A: sum of alpha*alpha.T :param B: sum of x*alpha.T :return: updated dictionary
625941b7cb5e8a47e48b78f2
def identity_preserving_hysteresis_thresholding( img, high_threshold, low_threshold, min_size, max_size=None, out=None ): <NEW_LINE> <INDENT> logger.debug("Computing high threshold") <NEW_LINE> binary_seeds = (img >= high_threshold).view(numpy.uint8) <NEW_LINE> logger.debug("Labeling") <NEW_LINE> seed_labels = label_with_background(binary_seeds) <NEW_LINE> logger.debug("Inverting image") <NEW_LINE> img_max = img.max() <NEW_LINE> inverted_img = -img + img_max <NEW_LINE> inverted_low_threshold = -1*img.dtype.type(low_threshold) + img_max <NEW_LINE> logger.debug("First watershed") <NEW_LINE> watershed_labels, max_label = vigra.analysis.watershedsNew( inverted_img, seeds=seed_labels, terminate=vigra.analysis.SRGType.StopAtThreshold, max_cost=inverted_low_threshold, out=out ) <NEW_LINE> logger.debug("Filtering labels") <NEW_LINE> filter_labels(watershed_labels, min_size, max_size) <NEW_LINE> logger.debug("Second watershed") <NEW_LINE> vigra.analysis.watershedsNew( inverted_img, seeds=watershed_labels, terminate=vigra.analysis.SRGType.StopAtThreshold, max_cost=inverted_low_threshold, out=watershed_labels ) <NEW_LINE> logger.debug("Complete") <NEW_LINE> return watershed_labels
Threshold the given image at two levels (hysteresis thresholding), but don't allow two 'high' thresholded regions bleed into each other when the low threshold is applied. A labeled image is returned, and no connected component will be too small or too large according to the given min/max sizes. Ideas for improvement: Allow separate images for the high and low thresholding steps.
625941b7956e5f7376d70cbd
def delete(table, chain=None, position=None, rule=None, family='ipv4'): <NEW_LINE> <INDENT> if position and rule: <NEW_LINE> <INDENT> return 'Error: Only specify a position or a rule, not both' <NEW_LINE> <DEDENT> if not check_table(table, family=family): <NEW_LINE> <INDENT> return 'Error: table {0} in family {1} does not exist'. format(table, family) <NEW_LINE> <DEDENT> if not check_chain(table, chain, family=family): <NEW_LINE> <INDENT> return 'Error: chain {0} in table {1} in family {2} does not exist'. format(chain, table, family) <NEW_LINE> <DEDENT> if not check(table, chain, rule, family=family): <NEW_LINE> <INDENT> return 'Error: rule {0} chain {1} in table {2} in family {3} does not exist'. format(rule, chain, table, family) <NEW_LINE> <DEDENT> if not position: <NEW_LINE> <INDENT> position = get_rule_handle(table, chain, rule, family) <NEW_LINE> <DEDENT> nft_family = _NFTABLES_FAMILIES[family] <NEW_LINE> cmd = '{0} delete rule {1} {2} {3} handle {4}'. format(_nftables_cmd(), nft_family, table, chain, position) <NEW_LINE> out = __salt__['cmd.run'](cmd) <NEW_LINE> if len(out) == 0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
Delete a rule from the specified table & chain, specifying either the rule in its entirety, or the rule's position in the chain. This function accepts a rule in a standard nftables command format, starting with the chain. Trying to force users to adapt to a new method of creating rules would be irritating at best, and we already have a parser that can handle it. CLI Examples: .. code-block:: bash salt '*' nftables.delete filter input position=3 salt '*' nftables.delete filter input \ rule='input tcp dport 22 log accept' IPv6: salt '*' nftables.delete filter input position=3 family=ipv6 salt '*' nftables.delete filter input \ rule='input tcp dport 22 log accept' \ family=ipv6
625941b72eb69b55b151c6ed
def trim_add_noise(data_path,checker_path,search_pattern): <NEW_LINE> <INDENT> from numpy import var <NEW_LINE> from numpy.random import normal <NEW_LINE> from glob import glob <NEW_LINE> from obspy import read <NEW_LINE> checker_files=glob(checker_path+search_pattern) <NEW_LINE> for k in range(len(checker_files)): <NEW_LINE> <INDENT> ch=read(checker_files[k]) <NEW_LINE> sta=checker_files[k].split('/')[-1].split('.')[1] <NEW_LINE> vord=checker_files[k].split('/')[-1].split('.')[2] <NEW_LINE> comp=checker_files[k].split('/')[-1].split('.')[3] <NEW_LINE> data_file=glob(data_path+sta+'*'+vord+'*'+comp) <NEW_LINE> st=read(data_file[0]) <NEW_LINE> ch.trim(starttime=st[0].stats.starttime,endtime=st[0].stats.endtime) <NEW_LINE> v=2e-5 <NEW_LINE> noise=normal(loc=0.0, scale=v**0.5, size=ch[0].stats.npts) <NEW_LINE> ch[0].data=ch[0].data+noise <NEW_LINE> ch.write(checker_files[k],format='SAC')
Trim checkerboard data and Add gaussian noise to data data_path='/Volumes/Kanagawa/Slip_Inv/tohoku_10s/data/waveforms/' search_pattern='checker.*disp*' checker_path='/Volumes/Kanagawa/Slip_Inv/tohoku_10s/output/forward_models/'
625941b7de87d2750b85fbd1
def put(self, package): <NEW_LINE> <INDENT> pkgidx = self.workingArea.put_package(package) <NEW_LINE> logger = logging.getLogger(__name__) <NEW_LINE> logger.info('submitting {}'.format(self.workingArea.package_relpath(pkgidx))) <NEW_LINE> runid = self.dispatcher.run(self.workingArea, pkgidx) <NEW_LINE> self.runid_pkgidx_map[runid] = pkgidx <NEW_LINE> return pkgidx
put a task This method places a task in the working area and have the dispatcher execute it. If you need to put multiple tasks, it can be much faster to use `put_multiple()` than to use this method multiple times depending of the dispatcher. Parameters ---------- package : callable A task Returns ------- int A package index assigned by the working area
625941b76aa9bd52df036be5
def load_test_data(data_path): <NEW_LINE> <INDENT> images, cls = _load_data(filename="test_batch",data_path=data_path) <NEW_LINE> return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
Load all the test-data for the CIFAR-10 data-set. Returns the images, class-numbers and one-hot encoded class-labels.
625941b73346ee7daa2b2bac
def bootstrap_elasticsearch(): <NEW_LINE> <INDENT> from wselasticsearch.bootstrap import create_user_info_index, update_model_mappings <NEW_LINE> update_model_mappings() <NEW_LINE> create_user_info_index()
Bootstrap Elasticsearch to contain the proper document typings. :return: None
625941b74e696a04525c9297
def disable_intelligence_pack( self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config): <NEW_LINE> <INDENT> api_version = "2015-11-01-preview" <NEW_LINE> url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Disable' <NEW_LINE> path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> header_parameters = {} <NEW_LINE> header_parameters['Content-Type'] = 'application/json; charset=utf-8' <NEW_LINE> if self.config.generate_client_request_id: <NEW_LINE> <INDENT> header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) <NEW_LINE> <DEDENT> if custom_headers: <NEW_LINE> <INDENT> header_parameters.update(custom_headers) <NEW_LINE> <DEDENT> if self.config.accept_language is not None: <NEW_LINE> <INDENT> header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') <NEW_LINE> <DEDENT> request = self._client.post(url, query_parameters) <NEW_LINE> response = self._client.send(request, header_parameters, **operation_config) <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> exp = CloudError(response) <NEW_LINE> exp.request_id = response.headers.get('x-ms-request-id') <NEW_LINE> raise exp <NEW_LINE> <DEDENT> if raw: <NEW_LINE> <INDENT> client_raw_response = ClientRawResponse(None, response) <NEW_LINE> return client_raw_response
Disables an intelligence pack for a given workspace. :param resource_group_name: The name of the resource group to get. The name is case insensitive. :type resource_group_name: str :param workspace_name: Name of the Log Analytics Workspace. :type workspace_name: str :param intelligence_pack_name: The name of the intelligence pack to be disabled. :type intelligence_pack_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :rtype: None or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
625941b7e1aae11d1e749af6
def find_OTP_key(plaintext1, ciphertext1, ciphertext2): <NEW_LINE> <INDENT> OTP_key = str_xor(plaintext1, ciphertext1) <NEW_LINE> plaintext2 = str_xor(OTP_key, ciphertext2) <NEW_LINE> print("Alice: ", binary_to_english(plaintext1)) <NEW_LINE> print("Bob: ", binary_to_english(plaintext2))
Breaking a one time pad when the plaintext and ciphertext of Alice is known and the ciphertext of Bob is known. Plaintexts and ciphertexts are in binary.
625941b74527f215b584c29e
def lst_to_obj(self): <NEW_LINE> <INDENT> data_iter = iter(self.raw_data) <NEW_LINE> attr_map = { 'INDI': { 'INDI': 'indi_id', 'NAME': 'name', 'SEX': 'sex', 'BIRT': 'birt_dt', 'DEAT': 'deat_dt', 'FAMC': 'fam_c', 'FAMS': 'fam_s' }, 'FAM': { 'FAM': 'fam_id', 'MARR': 'marr_dt', 'DIV': 'div_dt', 'HUSB': 'husb_id', 'WIFE': 'wife_id', 'CHIL': 'chil_id' } } <NEW_LINE> cat_cont = {'INDI': self.indis, 'FAM': self.fams} <NEW_LINE> cat_pool = {'INDI': Individual, 'FAM': Family} <NEW_LINE> curr_entity = None <NEW_LINE> curr_id = None <NEW_LINE> curr_cat = None <NEW_LINE> for ln_ind, lvl, tag, arg in data_iter: <NEW_LINE> <INDENT> if lvl == '0' and tag in cat_pool: <NEW_LINE> <INDENT> if curr_entity: <NEW_LINE> <INDENT> if curr_id in cat_cont[curr_cat]: <NEW_LINE> <INDENT> self.msg_collections['err']['msg_container']['US22']['tokens'].append((curr_id, curr_cat)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cat_cont[curr_cat][curr_id] = curr_entity <NEW_LINE> <DEDENT> curr_entity, curr_cat, curr_id = None, None, None <NEW_LINE> <DEDENT> curr_entity, curr_cat, curr_id = cat_pool[tag](arg), tag, arg <NEW_LINE> <DEDENT> if curr_entity and lvl == '1': <NEW_LINE> <INDENT> attr = attr_map[curr_cat][tag] <NEW_LINE> if tag in ('BIRT', 'DEAT', 'MARR', 'DIV'): <NEW_LINE> <INDENT> ln_ind, lvl, tag, arg = next(data_iter) <NEW_LINE> curr_entity[attr] = datetime.strptime(arg, Gedcom.dt_fmt) <NEW_LINE> <DEDENT> elif tag == 'NAME': <NEW_LINE> <INDENT> regex_obj = re.search(Gedcom.names_regex, arg) <NEW_LINE> curr_entity[attr]['first'] = regex_obj.group(1) <NEW_LINE> curr_entity[attr]['last'] = regex_obj.group(2) <NEW_LINE> <DEDENT> elif tag in ('CHIL', 'FAMS'): <NEW_LINE> <INDENT> curr_entity[attr].add(arg) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> curr_entity[attr] = arg <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> cat_cont[curr_cat][curr_id] = curr_entity
translate the raw data in the list to dict of entity objects
625941b7d99f1b3c44c673da
def split_rectangle(rect,d,values): <NEW_LINE> <INDENT> sorted_values = np.sort(values) <NEW_LINE> out = [] <NEW_LINE> current_left = rect[d,0] <NEW_LINE> for v in sorted_values: <NEW_LINE> <INDENT> if current_left < v and rect[d,1] > v: <NEW_LINE> <INDENT> new_rect = np.copy(rect) <NEW_LINE> new_rect[d,0] = current_left <NEW_LINE> new_rect[d,1] = v <NEW_LINE> current_left = v <NEW_LINE> out.append(new_rect) <NEW_LINE> <DEDENT> <DEDENT> new_rect = np.copy(rect) <NEW_LINE> new_rect[d,0] = current_left <NEW_LINE> out.append(new_rect) <NEW_LINE> return out
This function splits a rectangle along a given direction at the given values. INPUT: rect: rectangle to split (size (dim,2)) d: direction in which to split values: list of values OUTPUT: a list of rectangles
625941b73cc13d1c6d3c71c7
def scale_data(self, dataframe): <NEW_LINE> <INDENT> dataset = dataframe.values <NEW_LINE> cols = dataframe.shape[1] <NEW_LINE> X = dataset[:,3:cols-2].astype(float) <NEW_LINE> scaler = MinMaxScaler() <NEW_LINE> try: <NEW_LINE> <INDENT> scaled_X = scaler.fit_transform(X) <NEW_LINE> <DEDENT> except ValueError as e: <NEW_LINE> <INDENT> scaled_X = None <NEW_LINE> self.logger.warn("Error scaling the X axis: %s", e) <NEW_LINE> <DEDENT> return scaled_X
Scale the data. TODO: Should we be doing this in enrich.py?
625941b7d4950a0f3b08c19d
def test_generic_relation(): <NEW_LINE> <INDENT> pass
>>> from uliweb.utils.generic import GenericReference, GenericRelation >>> db = get_connection('sqlite://') >>> db.echo = False >>> db.metadata.drop_all() >>> from uliweb.contrib.tables.models import Tables >>> class Article(Model): ... title = Field(str) ... content = Field(TEXT) ... tags = GenericRelation('tag') >>> class Tag(Model): ... name = Field(str) ... content_object = GenericReference() >>> a = Article(title='Test') >>> a.save() True >>> b = Article(title='Linux') >>> b.save() True >>> print list(a.all()) # doctest:+ELLIPSIS [<Article {'title':u'Test','content':u'','tags':<uliweb.orm.Result ...>,'id':1}>, <Article {'title':u'Linux','content':u'','tags':<uliweb.orm.Result ...>,'id':2}>] >>> t = Tag(name='python', content_object=a) >>> t.save() True >>> t1 = Tag(name='linux', content_object=a) >>> t1.save() True >>> b = list(t.all())[0] >>> print repr(b) # doctest:+ELLIPSIS <Tag {'name':u'python','content_object':<Article {'title':u'Test','content':u'','tags':<uliweb.orm.Result ...>,'id':1}>,'id':1,'table_id':1,'object_id':1}> >>> print b.to_dict() {'content_object': (1, 1), 'table_id': 1, 'name': 'python', 'object_id': 1, 'id': 1} >>> print b.content_object 1 >>> print [x.name for x in a.tags] [u'python', u'linux'] >>> print [x.name for x in Tag.content_object.filter(a)] [u'python', u'linux'] >>> print [x.name for x in Tag.content_object.filter(('article', a.id))] [u'python', u'linux']
625941b738b623060ff0ac32
def _incremental_weighted_mean_and_var(X, sample_weight, last_mean, last_variance, last_weight_sum): <NEW_LINE> <INDENT> if sample_weight is None: <NEW_LINE> <INDENT> return _incremental_mean_and_var(X, last_mean, last_variance, last_weight_sum) <NEW_LINE> <DEDENT> nan_mask = np.isnan(X) <NEW_LINE> sample_weight_T = np.reshape(sample_weight, (1, -1)) <NEW_LINE> new_weight_sum = np.dot(sample_weight_T, ~nan_mask).ravel().astype(np.float64) <NEW_LINE> total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0) <NEW_LINE> X_0 = np.where(nan_mask, 0, X) <NEW_LINE> new_mean = np.average(X_0, weights=sample_weight, axis=0).astype(np.float64) <NEW_LINE> new_mean *= total_weight_sum / new_weight_sum <NEW_LINE> updated_weight_sum = last_weight_sum + new_weight_sum <NEW_LINE> updated_mean = ( (last_weight_sum * last_mean + new_weight_sum * new_mean) / updated_weight_sum) <NEW_LINE> if last_variance is None: <NEW_LINE> <INDENT> updated_variance = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> X_0 = np.where(nan_mask, 0, (X-new_mean)**2) <NEW_LINE> new_variance = _safe_accumulator_op( np.average, X_0, weights=sample_weight, axis=0) <NEW_LINE> new_variance *= total_weight_sum / new_weight_sum <NEW_LINE> new_term = ( new_weight_sum * (new_variance + (new_mean - updated_mean) ** 2)) <NEW_LINE> last_term = ( last_weight_sum * (last_variance + (last_mean - updated_mean) ** 2)) <NEW_LINE> updated_variance = (new_term + last_term) / updated_weight_sum <NEW_LINE> <DEDENT> return updated_mean, updated_variance, updated_weight_sum
Calculate weighted mean and weighted variance incremental update. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape (n_samples, n_features) Data to use for mean and variance update. sample_weight : array-like of shape (n_samples,) or None Sample weights. If None, then samples are equally weighted. last_mean : array-like of shape (n_features,) Mean before the incremental update. last_variance : array-like of shape (n_features,) or None Variance before the incremental update. If None, variance update is not computed (in case scaling is not required). last_weight_sum : array-like of shape (n_features,) Sum of weights before the incremental update. Returns ------- updated_mean : array of shape (n_features,) updated_variance : array of shape (n_features,) or None If None, only mean is computed. updated_weight_sum : array of shape (n_features,) Notes ----- NaNs in `X` are ignored. `last_mean` and `last_variance` are statistics computed at the last step by the function. Both must be initialized to 0.0. The mean is always required (`last_mean`) and returned (`updated_mean`), whereas the variance can be None (`last_variance` and `updated_variance`). For further details on the algorithm to perform the computation in a numerically stable way, see [Finch2009]_, Sections 4 and 5. References ---------- .. [Finch2009] `Tony Finch, "Incremental calculation of weighted mean and variance", University of Cambridge Computing Service, February 2009. <https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf>`_
625941b721a7993f00bc7b2c
def test_write_without_path_fails(): <NEW_LINE> <INDENT> hc = mod.HostapdConf() <NEW_LINE> with pytest.raises(RuntimeError): <NEW_LINE> <INDENT> hc.write()
Calling write() without a path fails
625941b79b70327d1c4e0c17
def probabilistic_nan_mean( df: "classes.BeliefsDataFrame", output_resolution, input_resolution, distribution: Optional[str] = None, ) -> "classes.BeliefsDataFrame": <NEW_LINE> <INDENT> if output_resolution < input_resolution: <NEW_LINE> <INDENT> raise ValueError( "Cannot use a downsampling policy to upsample from %s to %s." % (input_resolution, output_resolution) ) <NEW_LINE> <DEDENT> event_starts = df.groupby(["event_start"]).groups.keys() <NEW_LINE> cdf_v = [] <NEW_LINE> cdf_p = [] <NEW_LINE> for event_start in event_starts: <NEW_LINE> <INDENT> vp = df.xs(event_start, level="event_start") <NEW_LINE> cdf_v.append(vp.values.flatten()) <NEW_LINE> cdf_p.append(vp.index.get_level_values("cumulative_probability").values) <NEW_LINE> <DEDENT> if distribution is None: <NEW_LINE> <INDENT> cdf_p, cdf_v = interpret_complete_cdf(cdf_p, cdf_v) <NEW_LINE> cdf_p, cdf_v = multivariate_marginal_to_univariate_joint_cdf( cdf_p, cdf_v, agg_function=np.nanmean ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cdfs = interpret_complete_cdf(cdf_p, cdf_v, distribution=distribution) <NEW_LINE> cdf_p, cdf_v = multivariate_marginal_to_univariate_joint_cdf( cdfs, agg_function=np.nanmean ) <NEW_LINE> <DEDENT> first_row = df.iloc[0:1] <NEW_LINE> first_row = first_row.reset_index() <NEW_LINE> df = pd.concat([first_row] * len(cdf_p), ignore_index=True) <NEW_LINE> df["event_value"] = cdf_v <NEW_LINE> df["cumulative_probability"] = cdf_p <NEW_LINE> return df.set_index( ["event_start", "belief_time", "source", "cumulative_probability"] )
Calculate the mean value while ignoring nan values.
625941b7627d3e7fe0d68c91
def func_abund(self,R,theta,z,*pars): <NEW_LINE> <INDENT> a0 = pars <NEW_LINE> return a0 + np.zeros(shape=np.shape(z))
Default abundance function. Parameters ---------- R : array_like, shape(n,) Array of cylindrical R's with respect to the model long axis. theta : array_like, shape(n,) Array of azimuthal theta's with respect to the model long axis. z : array_like, shape(n,) Array of z's with respect to the model long axis. *pars : scalar(s) Function parameters. Default expected parameters: ``a0`` Returns ------- abundance : array_like, shape(n,) Abundance at each (R,theta,z) point. Notes ----- Default model: constant abundance .. math:: a(R,\theta,z) = a_0
625941b7a934411ee37514de
def _create_template(self): <NEW_LINE> <INDENT> template = dict( apiVersion="v1", kind="Route", metadata=dict( name=self.route_name, ), spec=dict( host=self.host, to=dict( kind="Service", name=self.to_service ), port=dict( targetPort=self.target_port ) ) ) <NEW_LINE> if self.labels: <NEW_LINE> <INDENT> template['metadata']['labels'] = self.labels <NEW_LINE> <DEDENT> return template
apiVersion: v1 kind: Route metadata: name: wordpress-wordpress labels: wordpress: wordpress spec: host: wordpress.local to: kind: Service name: wordpress-wordpress port: targetPort: main
625941b7cdde0d52a9e52e72
def get_user_friend(): <NEW_LINE> <INDENT> return User( id='507f1f77bcf86cd799439022', display_name='TestUser2', friends=['507f1f77bcf86cd799439011'], )
Returns a friend's user
625941b75fcc89381b1e1507
def __configure_module(self, module, sub=None, rename=None): <NEW_LINE> <INDENT> prefix = None <NEW_LINE> surfix = None <NEW_LINE> if not sub: <NEW_LINE> <INDENT> prefix = 'import' <NEW_LINE> if isinstance(rename, str): <NEW_LINE> <INDENT> surfix = " ".join(['as', rename]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(sub, (str, list)): <NEW_LINE> <INDENT> prefix = 'from' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> methods.raiseerror(messages.Errors.InputTypeError, 'Wrong type on module, use str or list of str') <NEW_LINE> <DEDENT> if rename: <NEW_LINE> <INDENT> if isinstance(rename, (str, list)): <NEW_LINE> <INDENT> if isinstance(sub, str): <NEW_LINE> <INDENT> if isinstance(rename, str): <NEW_LINE> <INDENT> surfix = " ".join([sub, 'as', rename]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> methods.raiseerror(messages.Errors.InputTypeError, 'Datatype of sub and rename must be same') <NEW_LINE> <DEDENT> <DEDENT> if isinstance(sub, list): <NEW_LINE> <INDENT> if isinstance(rename, list): <NEW_LINE> <INDENT> if len(sub) != len(rename): <NEW_LINE> <INDENT> methods.raiseerror(messages.Errors.InputValueError, 'The number of object in list between sub and rename must be same') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> surfix = ", ".join([" ".join([s, 'as', rename[i]]) for i, s in enumerate(sub)]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> surfix = sub <NEW_LINE> <DEDENT> <DEDENT> if surfix: <NEW_LINE> <INDENT> if prefix == 'from': <NEW_LINE> <INDENT> package = " ".join([prefix, module, 'import', surfix]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> package = " ".join([prefix, module, surfix]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> package = " ".join([prefix, module]) <NEW_LINE> <DEDENT> if package not in self.__import: <NEW_LINE> <INDENT> self.__import.append(package) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
Method to import module during steps Only one module can be imported, for importing multiple modules Use this method multiple time. :param module: any python module installed in your environment :param sub: submodule you want to import from the parent module if you want to import multiple submodules, use list instead :param rename: new name for imported module. if you want to rename submodules, use list instead :type module: str :type sub: str or list of str :type rename: str or list of str
625941b7293b9510aa2c30dc
def remove_node(self, session_id): <NEW_LINE> <INDENT> if session_id not in self.nodes: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> node = self.nodes[session_id] <NEW_LINE> for job_uuid in node.jobs: <NEW_LINE> <INDENT> log.info("Job %s failed due to node %s shutdown.", job_uuid, session_id) <NEW_LINE> <DEDENT> del self.nodes[session_id] <NEW_LINE> log.info("Removed node %s from pool.", session_id)
Remove the given node and mark all its jobs as failed.
625941b78e71fb1e9831d5f0
def do_action(self, names, kwargs): <NEW_LINE> <INDENT> ret = {} <NEW_LINE> names = set(names) <NEW_LINE> for alias, drivers in self.map_providers_parallel().items(): <NEW_LINE> <INDENT> if not names: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> for driver, vms in drivers.items(): <NEW_LINE> <INDENT> if not names: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> fun = '{0}.{1}'.format(driver, self.opts['action']) <NEW_LINE> if fun not in self.clouds: <NEW_LINE> <INDENT> log.info( '\'{0}()\' is not available. Not actioning...'.format( fun ) ) <NEW_LINE> continue <NEW_LINE> <DEDENT> for vm_name, vm_details in vms.items(): <NEW_LINE> <INDENT> if not names: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if vm_name not in names: <NEW_LINE> <INDENT> log.debug('vm:{0} in provider:{1} is not in name list:{2!r}'.format( vm_name, driver, names )) <NEW_LINE> continue <NEW_LINE> <DEDENT> with context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): <NEW_LINE> <INDENT> if alias not in ret: <NEW_LINE> <INDENT> ret[alias] = {} <NEW_LINE> <DEDENT> if driver not in ret[alias]: <NEW_LINE> <INDENT> ret[alias][driver] = {} <NEW_LINE> <DEDENT> if kwargs: <NEW_LINE> <INDENT> ret[alias][driver][vm_name] = self.clouds[fun]( vm_name, kwargs, call='action' ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret[alias][driver][vm_name] = self.clouds[fun]( vm_name, call='action' ) <NEW_LINE> <DEDENT> names.remove(vm_name) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if not names: <NEW_LINE> <INDENT> return ret <NEW_LINE> <DEDENT> ret['Not Actioned/Not Running'] = list(names) <NEW_LINE> return ret
Perform an action on a VM which may be specific to this cloud provider
625941b71d351010ab855960
def _check_parsing_pointer(self, event, previous_parsing_pointer_value): <NEW_LINE> <INDENT> logging.info('Enter to the method, publication_date of the event: %s, previous_parsing_pointer_value: %s', event.publication_date, previous_parsing_pointer_value) <NEW_LINE> if previous_parsing_pointer_value is None: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if int(event.publication_date) > int(previous_parsing_pointer_value): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
Return True if this event is already in the database (publication_date of the event is less than parsing_pointer) Return False if this event is new (publication_date of the event is more than parsing_pointer) :param event: :param previous_parsing_pointer_value: :return:
625941b757b8e32f524832e3
def expire(self): <NEW_LINE> <INDENT> pass
Expire/logout of the session.
625941b7187af65679ca4f60
def get_id(self): <NEW_LINE> <INDENT> return self._id
Returns this sardana object ID :return: this sardana object ID :rtype: int
625941b763d6d428bbe44332
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, ignore_responders_from_payload=None, ignore_tags_from_payload=None, ignore_extra_properties_from_payload=None, responders=None, priority=None, custom_priority=None, tags=None, extra_properties=None, assigned_team=None, feature_type=None, allow_configuration_access=None, allow_read_access=None, allow_write_access=None, allow_delete_access=None): <NEW_LINE> <INDENT> self._suppress_notifications = None <NEW_LINE> self._ignore_teams_from_payload = None <NEW_LINE> self._ignore_recipients_from_payload = None <NEW_LINE> self._recipients = None <NEW_LINE> self._is_advanced = None <NEW_LINE> self._ignore_responders_from_payload = None <NEW_LINE> self._ignore_tags_from_payload = None <NEW_LINE> self._ignore_extra_properties_from_payload = None <NEW_LINE> self._responders = None <NEW_LINE> self._priority = None <NEW_LINE> self._custom_priority = None <NEW_LINE> self._tags = None <NEW_LINE> self._extra_properties = None <NEW_LINE> self._assigned_team = None <NEW_LINE> self._feature_type = None <NEW_LINE> self._allow_configuration_access = None <NEW_LINE> self._allow_read_access = None <NEW_LINE> self._allow_write_access = None <NEW_LINE> self._allow_delete_access = None <NEW_LINE> self.discriminator = None <NEW_LINE> if suppress_notifications is not None: <NEW_LINE> <INDENT> self.suppress_notifications = suppress_notifications <NEW_LINE> <DEDENT> if ignore_teams_from_payload is not None: <NEW_LINE> <INDENT> self.ignore_teams_from_payload = ignore_teams_from_payload <NEW_LINE> <DEDENT> if ignore_recipients_from_payload is not None: <NEW_LINE> <INDENT> self.ignore_recipients_from_payload = ignore_recipients_from_payload <NEW_LINE> <DEDENT> if recipients is not None: <NEW_LINE> <INDENT> self.recipients = recipients <NEW_LINE> <DEDENT> if is_advanced is not None: <NEW_LINE> <INDENT> self.is_advanced = is_advanced <NEW_LINE> <DEDENT> if ignore_responders_from_payload is not None: <NEW_LINE> <INDENT> self.ignore_responders_from_payload = ignore_responders_from_payload <NEW_LINE> <DEDENT> if ignore_tags_from_payload is not None: <NEW_LINE> <INDENT> self.ignore_tags_from_payload = ignore_tags_from_payload <NEW_LINE> <DEDENT> if ignore_extra_properties_from_payload is not None: <NEW_LINE> <INDENT> self.ignore_extra_properties_from_payload = ignore_extra_properties_from_payload <NEW_LINE> <DEDENT> if responders is not None: <NEW_LINE> <INDENT> self.responders = responders <NEW_LINE> <DEDENT> if priority is not None: <NEW_LINE> <INDENT> self.priority = priority <NEW_LINE> <DEDENT> if custom_priority is not None: <NEW_LINE> <INDENT> self.custom_priority = custom_priority <NEW_LINE> <DEDENT> if tags is not None: <NEW_LINE> <INDENT> self.tags = tags <NEW_LINE> <DEDENT> if extra_properties is not None: <NEW_LINE> <INDENT> self.extra_properties = extra_properties <NEW_LINE> <DEDENT> if assigned_team is not None: <NEW_LINE> <INDENT> self.assigned_team = assigned_team <NEW_LINE> <DEDENT> if feature_type is not None: <NEW_LINE> <INDENT> self.feature_type = feature_type <NEW_LINE> <DEDENT> if allow_configuration_access is not None: <NEW_LINE> <INDENT> self.allow_configuration_access = allow_configuration_access <NEW_LINE> <DEDENT> if allow_read_access is not None: <NEW_LINE> <INDENT> self.allow_read_access = allow_read_access <NEW_LINE> <DEDENT> if allow_write_access is not None: <NEW_LINE> <INDENT> self.allow_write_access = allow_write_access <NEW_LINE> <DEDENT> if allow_delete_access is not None: <NEW_LINE> <INDENT> self.allow_delete_access = allow_delete_access
AmazonRoute53HealthCheckIntegration - a model defined in Swagger
625941b7e64d504609d74683
def show_properties_menu_item_callback(self, *args): <NEW_LINE> <INDENT> print("CoverArtBrowser DEBUG - show_properties_menu_item_callback") <NEW_LINE> self.entry_view.select_all() <NEW_LINE> info_dialog = RB.SongInfo(source=self, entry_view=self.entry_view) <NEW_LINE> info_dialog.show_all() <NEW_LINE> print("CoverArtBrowser DEBUG - end show_properties_menu_item_callback")
Callback called when the show album properties option is selected from the cover view popup. It shows a SongInfo dialog showing the selected albums' entries info, which can be modified.
625941b750485f2cf553cbdc
def fetch_option_taskfileinfos(self, typ, element): <NEW_LINE> <INDENT> inter = self.get_typ_interface(typ) <NEW_LINE> return inter.fetch_option_taskfileinfos(element)
Fetch the options for possible files to load, replace etc for the given element. Thiss will call :meth:`ReftypeInterface.fetch_option_taskfileinfos`. :param typ: the typ of options. E.g. Asset, Alembic, Camera etc :type typ: str :param element: The element for which the options should be fetched. :type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot` :returns: The options :rtype: list of :class:`TaskFileInfo`
625941b70c0af96317bb802c
def open_html_file(html_file_name): <NEW_LINE> <INDENT> html_file = open(html_file_name, "r", encoding="utf-8") <NEW_LINE> return html_file
HTML 기사 파일을 열어서 파일 객체를 돌려준다
625941b77c178a314d6ef29b
def train_for(self, iterations): <NEW_LINE> <INDENT> raise NotImplementedError('train_for() is not implemented.')
:param iterations: Number of iterations to train for. :return: Nothing.
625941b7be7bc26dc91cd449
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, TopicAverageScoreMatrixGraphContext): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.to_dict() == other.to_dict()
Returns true if both objects are equal
625941b7379a373c97cfa98e
def croelectStopwords(): <NEW_LINE> <INDENT> swords = croStopwords() <NEW_LINE> newswords = u'a b c ć č d đ e f g h i j k l m n o p q r s š t u v z ž www com http hr' <NEW_LINE> allswords = set() <NEW_LINE> for s in swords: allswords.add(s.lower()) <NEW_LINE> for s in newswords.split(): allswords.add(s) <NEW_LINE> return allswords
Construct and return a set of stopwords.
625941b70fa83653e4656e00
def __init__( self, four_vector: Tuple[float, float, float, float] = None, initial: Action = C, ) -> None: <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self._initial = initial <NEW_LINE> self.set_initial_four_vector(four_vector)
Parameters ---------- four_vector: list or tuple of floats of length 4 The response probabilities to the preceding round of play ( P(C|CC), P(C|CD), P(C|DC), P(C|DD) ) initial: C or D The initial move Special Cases ------------- Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C) Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C) Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), D) Random is equivalent to MemoryOnePlayer((0.5, 0.5, 0.5, 0.5)) (with a random choice for the initial state) TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C) WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C) See also: The remaining strategies in this file Multiple strategies in titfortat.py Grofman, Joss in axelrod_tournaments.py
625941b776d4e153a657e973
def get_question(self): <NEW_LINE> <INDENT> return self.question_answer_list[0]
Input: none Output: str Purpose: Returns question
625941b74428ac0f6e5ba635
def text_to_ngrams(text, n=2): <NEW_LINE> <INDENT> sentences = text_to_sentences(text) <NEW_LINE> return sentences_to_ngrams(sentences, n, split=True)
Takes a text and returns an array of n-tuples.
625941b7a8ecb033257d2f1a
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None): <NEW_LINE> <INDENT> if name not in datasets_map: <NEW_LINE> <INDENT> raise ValueError('Name of dataset unknown %s' % name) <NEW_LINE> <DEDENT> return datasets_map[name].get_split( split_name, dataset_dir, file_pattern, reader)
Given a dataset name and a split_name returns a Dataset. Args: name: String, the name of the dataset. split_name: A train/test split name. dataset_dir: The directory where the dataset files are stored. file_pattern: The file pattern to use for matching the dataset source files. reader: The subclass of tf.ReaderBase. If left as `None`, then the default reader defined by each dataset is used. Returns: A `Dataset` class. Raises: ValueError: If the dataset `name` is unknown.
625941b721a7993f00bc7b2d
def getUserByLogin(login): <NEW_LINE> <INDENT> pass
Return the User object by looking it up by it's login
625941b7adb09d7d5db6c5d7
def unique_slug_generator(instance, new_slug=None): <NEW_LINE> <INDENT> if new_slug is not None: <NEW_LINE> <INDENT> slug = new_slug <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> slug = slugify(instance.title) <NEW_LINE> <DEDENT> if slug in DONT_USE: <NEW_LINE> <INDENT> new_slug = "{slug}-{randstr}".format( slug=slug, randstr=random_string_generator(size=20) ) <NEW_LINE> return unique_slug_generator(instance, new_slug=new_slug) <NEW_LINE> <DEDENT> Klass = instance.__class__ <NEW_LINE> qs_exists = Klass.objects.filter(slug=slug).exists() <NEW_LINE> if qs_exists: <NEW_LINE> <INDENT> new_slug = "{slug}-{randstr}".format( slug=slug, randstr=random_string_generator(size=10) ) <NEW_LINE> return unique_slug_generator(instance, new_slug=new_slug) <NEW_LINE> <DEDENT> return slug
This is for a Django project and it assumes your instance has a model with a slug field and a title character (char) field.
625941b77b180e01f3dc464a
def _get_pair(self, positive): <NEW_LINE> <INDENT> g = self._get_graph() <NEW_LINE> if self._permute: <NEW_LINE> <INDENT> permuted_g = permute_graph_nodes(g) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> permuted_g = g <NEW_LINE> <DEDENT> n_changes = self._k_pos if positive else self._k_neg <NEW_LINE> changed_g = substitute_random_edges(g, n_changes) <NEW_LINE> return permuted_g, changed_g
Generate one pair of graphs.
625941b763d6d428bbe44333
def pauli_to_arb_weights(w,b,hs): <NEW_LINE> <INDENT> b_ = np.tensordot(b,hs,[0,0]) <NEW_LINE> b_ = (np.real(b_)+np.imag(b_))/2 <NEW_LINE> b_ = b_.flatten() <NEW_LINE> w_ = np.tensordot(w,hs,[1,0]) <NEW_LINE> w_ = (np.real(w_)+np.imag(w_))/2 <NEW_LINE> w_.shape = w_.shape[0], w_.shape[1]*w_.shape[2] <NEW_LINE> return w_,b_
Transform the weights of `StateProbabilitiesPaulied` to `StateProbabilities` weights.
625941b78e71fb1e9831d5f1
def get_captcha_image(self): <NEW_LINE> <INDENT> text, image, result = self.gen_captcha_text_and_image() <NEW_LINE> plt.imshow(image) <NEW_LINE> plt.axis('off') <NEW_LINE> dir_name = "{base_dir}/v_code".format(base_dir=BASE_DIR) <NEW_LINE> file_name = "{name}.png".format(name=datetime.now().strftime('%Y%m%d%H%M%S')) <NEW_LINE> file_path = dir_name + '/' + file_name <NEW_LINE> if not os.path.exists(dir_name): <NEW_LINE> <INDENT> os.mkdir(dir_name) <NEW_LINE> <DEDENT> plt.savefig(file_path) <NEW_LINE> image_data = open(file_path, "rb").read() <NEW_LINE> os.remove(file_path) <NEW_LINE> return text, result, file_path, image_data
输出验证码 :return:
625941b78a43f66fc4b53ead
def build_target(platform, install, nb_cores): <NEW_LINE> <INDENT> work_directory = BASEDIR + '/' + platform <NEW_LINE> binutils_directory = work_directory + '/binutils-' + BINUTILS_VERSION <NEW_LINE> gcc_directory = work_directory + '/gcc-' + GCC_VERSION <NEW_LINE> obj_directory = work_directory + '/gcc-obj' <NEW_LINE> gdb_directory = work_directory + '/gdb-' + GDB_VERSION <NEW_LINE> target = set_target_from_platform(platform) <NEW_LINE> if os.environ.get('CROSS_PREFIX'): <NEW_LINE> <INDENT> cross_prefix = os.environ['CROSS_PREFIX'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cross_prefix = '/usr/local/cross/' <NEW_LINE> <DEDENT> prefix = cross_prefix + platform <NEW_LINE> os.environ['PATH'] += ':{0}{1}/bin'.format(INSTALL_DIR, prefix) <NEW_LINE> os.environ['PATH'] += ':{0}/bin'.format(prefix) <NEW_LINE> cleanup_previous_build(install, prefix, work_directory, obj_directory) <NEW_LINE> unpack_tarballs(work_directory) <NEW_LINE> build_binutils(install, nb_cores, binutils_directory, target, prefix) <NEW_LINE> build_gcc(install, nb_cores, obj_directory, prefix, gcc_directory, target) <NEW_LINE> build_gdb(install, nb_cores, gdb_directory, target, prefix) <NEW_LINE> os.chdir(BASEDIR) <NEW_LINE> print('>>> Cleaning up') <NEW_LINE> cleanup_dir(work_directory)
Cross-compile gcc toolchain for a given architecture.
625941b7fb3f5b602dac34d2
def initialize(self, supersubscenario): <NEW_LINE> <INDENT> self.store = {} <NEW_LINE> self.evaluator.store = self.store <NEW_LINE> self.prepare_params() <NEW_LINE> self.setup_subscenario(supersubscenario) <NEW_LINE> current_dates = self.dates[:self.foresight_periods] <NEW_LINE> current_dates_as_string = self.dates_as_string[:self.foresight_periods] <NEW_LINE> step = self.dates[0].day <NEW_LINE> start = current_dates_as_string[0] <NEW_LINE> end = current_dates_as_string[-1] <NEW_LINE> step = step <NEW_LINE> initial_volumes = {} <NEW_LINE> for tattr_idx, values in self.initial_volumes.items(): <NEW_LINE> <INDENT> scale = self.params[tattr_idx]['scale'] <NEW_LINE> unit = self.params[tattr_idx]['unit'] <NEW_LINE> for resource_id, value in values.items(): <NEW_LINE> <INDENT> initial_volumes[resource_id] = convert(value * scale, 'Volume', unit, 'hm^3') <NEW_LINE> <DEDENT> <DEDENT> self.model = PywrModel( network=self.network, template=self.template, start=start, end=end, step=step, initial_volumes=initial_volumes )
A wrapper for all initialization steps.
625941b74d74a7450ccd4006
def memory(since=0.0): <NEW_LINE> <INDENT> return _VmB('VmSize:') - since
Return memory usage in bytes.
625941b794891a1f4081b8ec
def find_object(self, obj_type, obj_name): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> obj = self.model_map['object'][obj_type][obj_name][1] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> obj = None <NEW_LINE> <DEDENT> return obj
Find object by name in the model_map, if it exists. :param: obj_type: type of the object to look up. :type: obj_type: str :param: obj_name: name of the object to look up. :type: obj_name: str
625941b75fdd1c0f98dc0075
def turn_off(self): <NEW_LINE> <INDENT> self.set_hvac_mode(OPERATION_MODE_OFF) <NEW_LINE> self._signal_zone_update()
Turn. off the zone.
625941b7e5267d203edcdae5
def check_data(self, values): <NEW_LINE> <INDENT> if not isinstance(values, list): <NEW_LINE> <INDENT> raise TypeError("Data needs to be a list.") <NEW_LINE> <DEDENT> if not self.check_length(values): <NEW_LINE> <INDENT> raise AVMListLengthError("Data is not the correct length.") <NEW_LINE> <DEDENT> checked_data = [] <NEW_LINE> for value in values: <NEW_LINE> <INDENT> if value: <NEW_LINE> <INDENT> if (isinstance(value, datetime.date) or isinstance(value, datetime.datetime)): <NEW_LINE> <INDENT> value = value.isoformat() <NEW_LINE> checked_data.append(value) <NEW_LINE> <DEDENT> elif isinstance(value, basestring): <NEW_LINE> <INDENT> value = value <NEW_LINE> checked_data.append(value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError("Elements of the list need to be a Python Date or Datetime object.") <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> checked_data.append("-") <NEW_LINE> <DEDENT> <DEDENT> if len(set(checked_data)) == 1 and checked_data[0] == "-": <NEW_LINE> <INDENT> checked_data = [] <NEW_LINE> <DEDENT> return checked_data
Checks that the data passed is a Python List, and that the elements are Date or Datetime objects. :return: List of Datetime objects in ISO format (i.e. Strings encoded as UTF-8)
625941b7fbf16365ca6f6000
def get_row(puzzle: str, row_no: int) -> str: <NEW_LINE> <INDENT> grl = get_row_length(puzzle) <NEW_LINE> return puzzle[row_no + (grl * row_no): grl * (row_no + 1) + row_no]
Return the number of letters in the row related to the row number. >>>get_row('abcd efgh ', 1) efgh >>>get_row('abcd efgh ijkl ', 2) ijkl
625941b70a366e3fb873e65b
def load_lookups(self, auth): <NEW_LINE> <INDENT> d1 = self.proxy.callRemote('jira1.getIssueTypes', self.auth) <NEW_LINE> d1.addCallback(self.process_issue_types) <NEW_LINE> d2 = self.proxy.callRemote('jira1.getPriorities', self.auth) <NEW_LINE> d2.addCallback(self.process_priorities) <NEW_LINE> return defer.DeferredList([d1, d2])
Load and process lookup tables: issue types priorities
625941b755399d3f055884f7
def __init__(self, cornerID, trackDeathProb, deltaT, initModel, motionModel, maxLen = 50) : <NEW_LINE> <INDENT> self.trackDeathProb = trackDeathProb <NEW_LINE> self.cornerID = cornerID <NEW_LINE> self._motionModel = motionModel <NEW_LINE> self._framesRemain = maxLen <NEW_LINE> self.deltaT = deltaT <NEW_LINE> self.strm_size = 0. <NEW_LINE> self.frameNum, self.xLoc, self.yLoc, self.xSpeed, self.ySpeed = initModel() <NEW_LINE> self._useInitState = initModel.useInitState <NEW_LINE> self._isFirstCall = True
Create a point that will be used to create a track. Parameters ---------- cornerID : int Integer to use to begin incrementally ID-ing the points generated. trackDeathProb : float between 0 and 1 The probability that a track will die at some particular iteration. 0.0 for eternal tracks, 1.0 for single points. deltaT : float The time step for each frame in the track. maxLen : int Maximum length of the track
625941b78a349b6b435e7fb8
def create_view_if_not_exists(client, view, tables, exclude, sql_dir): <NEW_LINE> <INDENT> if any(fnmatchcase(pattern, view) for pattern in exclude): <NEW_LINE> <INDENT> logging.info("skipping table: matched by exclude pattern: {view}") <NEW_LINE> return <NEW_LINE> <DEDENT> if view.endswith("_"): <NEW_LINE> <INDENT> logging.info("skipping table ending in _: {view}") <NEW_LINE> return <NEW_LINE> <DEDENT> version = max( int(match.group()[2:]) for table in tables for match in [VERSION_RE.search(table)] if match is not None ) <NEW_LINE> project, dataset, viewname = view.split(".") <NEW_LINE> target = f"{view}_v{version}" <NEW_LINE> view_dataset = dataset.rsplit("_", 1)[0] <NEW_LINE> full_view_id = ".".join([project, view_dataset, viewname]) <NEW_LINE> target_file = os.path.join(sql_dir, project, view_dataset, viewname, "view.sql") <NEW_LINE> if not os.path.exists(target_file): <NEW_LINE> <INDENT> table = client.get_table(target) <NEW_LINE> replacements = ["mozfun.norm.metadata(metadata)" " AS metadata"] <NEW_LINE> schema_id = table.labels.get("schema_id", None) <NEW_LINE> if schema_id == "glean_ping_1": <NEW_LINE> <INDENT> replacements += ["mozfun.norm.glean_ping_info(ping_info)" " AS ping_info"] <NEW_LINE> if table.table_id == "baseline_v1": <NEW_LINE> <INDENT> replacements += [ "mozfun.norm.glean_baseline_client_info" "(client_info, metrics)" " AS client_info" ] <NEW_LINE> <DEDENT> if table.dataset_id.startswith( "org_mozilla_fenix" ) and table.table_id.startswith("metrics"): <NEW_LINE> <INDENT> replacements += [ "`moz-fx-data-shared-prod.udf.normalize_fenix_metrics`" "(client_info.telemetry_sdk_build, metrics)" " AS metrics" ] <NEW_LINE> <DEDENT> if table.dataset_id.startswith("firefox_desktop"): <NEW_LINE> <INDENT> replacements += [ "'Firefox' AS normalized_app_name", ] <NEW_LINE> <DEDENT> <DEDENT> elif schema_id in ("main_ping_1", "main_ping_4"): <NEW_LINE> <INDENT> replacements += [ "`moz-fx-data-shared-prod.udf.normalize_main_payload`(payload)" " AS payload" ] <NEW_LINE> <DEDENT> replacements = ",\n ".join(replacements) <NEW_LINE> view_query = VIEW_QUERY_TEMPLATE.format( target=target, replacements=replacements ).strip() <NEW_LINE> full_sql = f"CREATE OR REPLACE VIEW\n `{full_view_id}`\nAS {view_query}\n" <NEW_LINE> print("Creating " + target_file) <NEW_LINE> if not os.path.exists(os.path.dirname(target_file)): <NEW_LINE> <INDENT> os.makedirs(os.path.dirname(target_file)) <NEW_LINE> <DEDENT> with open(target_file, "w") as f: <NEW_LINE> <INDENT> f.write(full_sql)
Create view unless a local file for creating the view exists.
625941b7f548e778e58cd3c0
def lf_list(self, log): <NEW_LINE> <INDENT> log.cl_stdout("Filesystem name: %20s", self.lf_fsname) <NEW_LINE> log.cl_stdout("") <NEW_LINE> log.cl_stdout("Services") <NEW_LINE> table = prettytable.PrettyTable() <NEW_LINE> table.field_names = ["Service", "Type"] <NEW_LINE> for service in self.lf_service_dict.values(): <NEW_LINE> <INDENT> table.add_row([service.ls_service_name, service.ls_service_type]) <NEW_LINE> <DEDENT> log.cl_stdout(table.get_string()) <NEW_LINE> log.cl_stdout("") <NEW_LINE> log.cl_stdout("Clients") <NEW_LINE> table = prettytable.PrettyTable() <NEW_LINE> table.field_names = ["Host", "Mount point"] <NEW_LINE> for client in self.lf_clients.values(): <NEW_LINE> <INDENT> table.add_row([client.lc_host.sh_hostname, client.lc_mnt]) <NEW_LINE> <DEDENT> log.cl_stdout(table) <NEW_LINE> return 0
Print information about this filesystem
625941b797e22403b379cddd
def flatten(self,key_list=None): <NEW_LINE> <INDENT> if not key_list: <NEW_LINE> <INDENT> key_list = self <NEW_LINE> <DEDENT> return dict((k,self[k]) for k in key_list)
return dict of all non-shadowed bindings
625941b723849d37ff7b2ed6
@pytest.fixture <NEW_LINE> def bot(): <NEW_LINE> <INDENT> return create_bot()
Return a new logged in user.
625941b701c39578d7e74c88
def delete_raw(self, basename): <NEW_LINE> <INDENT> if not self.keep_raw: <NEW_LINE> <INDENT> subprocess.call( ["/bin/rm", "{}.raw".format(basename)] )
Remove raw file after encoding
625941b71b99ca400220a8f5
def __init__(self, chart_handler, name, friendly_name, unit, icon): <NEW_LINE> <INDENT> self._chart = chart_handler <NEW_LINE> self._name = name <NEW_LINE> self._friendly_name = friendly_name <NEW_LINE> self._icon = icon <NEW_LINE> self._unit = unit
Initialize the psychrometric sensor object.
625941b7ec188e330fd5a5eb
def create_post(post_text, days): <NEW_LINE> <INDENT> time = timezone.now() + datetime.timedelta(days=days) <NEW_LINE> return Post.objects.create(post_text=post_text, pub_date=time)
Creates a post with the given `post_text` and published the given number of `days` offset to now (negative for posts published in the past, positive for posts that have yet to be published).
625941b730bbd722463cbc07
def test_share_deleted(self): <NEW_LINE> <INDENT> notif = ShareDeleted(shared_to_id='user1') <NEW_LINE> self.assertEqual({'user1'}, notif.recipient_ids)
Test that ShareDeleted has the correct recipient_id list.
625941b732920d7e50b28010
def get_percentile_hub(self, percentile): <NEW_LINE> <INDENT> q_value = self.get_hubs()['trips_in_area'].quantile(percentile) <NEW_LINE> targets = self.get_hubs().iloc[(self.get_hubs()['trips_in_area'] - q_value).abs().argsort()[:2]] <NEW_LINE> return targets.iloc[0]['stop_I']
returns the stop_I of the stop closest to the percentile when considering trips_in_area
625941b76aa9bd52df036be6
def delete_credential(self, credential_id): <NEW_LINE> <INDENT> resp, body = self.delete('credentials/%s' % credential_id, self.headers) <NEW_LINE> return resp, body
Deletes a credential.
625941b767a9b606de4a7d01
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None): <NEW_LINE> <INDENT> for frequency in self.unique_frequencies: <NEW_LINE> <INDENT> if freq_filter is not None and not freq_filter(frequency): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> digest_panel = self.digest_panels.get(frequency, None) <NEW_LINE> while algo_dt > self.cur_window_closes[frequency]: <NEW_LINE> <INDENT> earliest_minute = self.cur_window_starts[frequency] <NEW_LINE> latest_minute = self.cur_window_closes[frequency] <NEW_LINE> minutes_to_process = self.buffer_panel_minutes( buffer_panel, earliest_minute=earliest_minute, latest_minute=latest_minute, ) <NEW_LINE> self.roll(frequency, digest_panel, minutes_to_process, latest_minute) <NEW_LINE> self.cur_window_starts[frequency] = frequency.next_window_start(latest_minute) <NEW_LINE> self.cur_window_closes[frequency] = frequency.window_close(self.cur_window_starts[frequency])
Check whether @algo_dt is greater than cur_window_close for any of our frequencies. If so, roll a digest for that frequency using data drawn from @buffer panel and insert it into the appropriate digest panels. If @freq_filter is specified, only use the given data to update frequencies on which the filter returns True.
625941b78a43f66fc4b53eae
def main(): <NEW_LINE> <INDENT> parser = ArgumentParser() <NEW_LINE> parser.add_argument('-r', '--repository', help='docker image repository', dest='repository', required=True) <NEW_LINE> parser.add_argument('-t', '--tag', help='docker image tag', dest='tag', required=False, default='') <NEW_LINE> args = parser.parse_args() <NEW_LINE> generate_image_manifest(args.repository, args.tag)
Entry point
625941b78e7ae83300e4ae10
def _grad(self, values): <NEW_LINE> <INDENT> w, v = LA.eigh(values[0]) <NEW_LINE> d = np.zeros(w.shape) <NEW_LINE> d[-1] = 1 <NEW_LINE> d = np.diag(d) <NEW_LINE> D = v.dot(d).dot(v.T) <NEW_LINE> return [sp.csc_matrix(D.ravel(order='F')).T]
Gives the (sub/super)gradient of the atom w.r.t. each argument. Matrix expressions are vectorized, so the gradient is a matrix. Args: values: A list of numeric values for the arguments. Returns: A list of SciPy CSC sparse matrices or None.
625941b738b623060ff0ac33
def _fill_tab_element(self): <NEW_LINE> <INDENT> table_element = "v_ui_element_x_" + self.feature_type <NEW_LINE> self._fill_tbl_element_man(self.dlg_cf, self.tbl_element, table_element, self.filter) <NEW_LINE> tools_gw.set_tablemodel_config(self.dlg_cf, self.tbl_element, table_element)
Fill tab 'Element'
625941b74c3428357757c16f
def inputs_count(self): <NEW_LINE> <INDENT> return len(self._input_nodes_map.keys())
Returns number of inputs for the matched sub-graph. Only unique input tensors are considered, thus if the same tensor is consumed by two or more input nodes of the sub-graph it is counted only once. :return: Number or unique input tensors.
625941b7851cf427c661a35f
def check(self, f=None, verbose=True, level=1, checktype=None): <NEW_LINE> <INDENT> chk = self._get_check(f, verbose, level, checktype) <NEW_LINE> if ( self.options_dict["StopOption"] == 3 and self.options_dict["TimePointOption"] == 3 ): <NEW_LINE> <INDENT> if self.time_pts[-1] < self.stop_time: <NEW_LINE> <INDENT> chk._add_to_summary( type="Error", value=self.stop_time, desc="Stop time greater than last TimePoint", ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> chk.append_passed("Valid stop time") <NEW_LINE> <DEDENT> chk.summarize() <NEW_LINE> <DEDENT> return chk
Check package data for common errors. Parameters ---------- f : str or file handle String defining file name or file handle for summary file of check method output. If a sting is passed a file handle is created. If f is None, check method does not write results to a summary file. (default is None) verbose : bool Boolean flag used to determine if check method results are written to the screen level : int Check method analysis level. If level=0, summary checks are performed. If level=1, full checks are performed. Returns ------- None Examples --------
625941b760cbc95b062c638e
def airdateModifyStamp(self): <NEW_LINE> <INDENT> if not self.show.airs and self.show.network: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> airdate_ordinal = self.airdate.toordinal() <NEW_LINE> if airdate_ordinal < 1: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> airdatetime = tz_updater.parse_date_time(airdate_ordinal, self.show.airs, self.show.network) <NEW_LINE> if sickrage.srCore.srConfig.FILE_TIMESTAMP_TIMEZONE == 'local': <NEW_LINE> <INDENT> airdatetime = airdatetime.astimezone(tz_updater.sr_timezone) <NEW_LINE> <DEDENT> filemtime = datetime.datetime.fromtimestamp(os.path.getmtime(self.location)).replace( tzinfo=tz_updater.sr_timezone) <NEW_LINE> if filemtime != airdatetime: <NEW_LINE> <INDENT> import time <NEW_LINE> airdatetime = airdatetime.timetuple() <NEW_LINE> sickrage.srCore.srLogger.debug(str(self.show.indexerid) + ": About to modify date of '" + self.location + "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime)) <NEW_LINE> try: <NEW_LINE> <INDENT> if touchFile(self.location, time.mktime(airdatetime)): <NEW_LINE> <INDENT> sickrage.srCore.srLogger.info( str(self.show.indexerid) + ": Changed modify date of " + os.path.basename(self.location) + " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sickrage.srCore.srLogger.error( str(self.show.indexerid) + ": Unable to modify date of " + os.path.basename( self.location) + " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime)) <NEW_LINE> <DEDENT> <DEDENT> except Exception: <NEW_LINE> <INDENT> sickrage.srCore.srLogger.error( str(self.show.indexerid) + ": Failed to modify date of '" + os.path.basename(self.location) + "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime))
Make the modify date and time of a file reflect the show air date and time. Note: Also called from postProcessor
625941b76aa9bd52df036be7
def _get_priority_vars(objects, priority_arg, compat='equals'): <NEW_LINE> <INDENT> if priority_arg is None: <NEW_LINE> <INDENT> priority_vars = {} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> expanded = expand_variable_dicts([objects[priority_arg]]) <NEW_LINE> priority_vars = merge_variables(expanded, compat=compat) <NEW_LINE> <DEDENT> return priority_vars
Extract the priority variable from a list of mappings. We need this method because in some cases the priority argument itself might have conflicting values (e.g., if it is a dict with two DataArray values with conflicting coordinate values). Parameters ---------- objects : list of dictionaries of variables Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional Compatibility checks to use when merging variables. Returns ------- None, if priority_arg is None, or an OrderedDict with Variable objects as values indicating priority variables.
625941b7091ae35668666daa
def test_persistent_close(self): <NEW_LINE> <INDENT> request = webhttp.message.Request() <NEW_LINE> request.method = "GET" <NEW_LINE> request.uri = "/test/index.html" <NEW_LINE> request.set_header("Host", "localhost:{}".format(portnr)) <NEW_LINE> request.set_header("Connection", "keep-alive") <NEW_LINE> self.client_socket.settimeout(20) <NEW_LINE> for x in range(0, 5): <NEW_LINE> <INDENT> self.client_socket.send(str(request)) <NEW_LINE> message = self.client_socket.recv(1024) <NEW_LINE> response = self.parser.parse_response(message) <NEW_LINE> self.assertNotEqual(response.get_header("Connection"), "close") <NEW_LINE> <DEDENT> request.set_header("Connection", "close") <NEW_LINE> self.client_socket.send(str(request)) <NEW_LINE> message = self.client_socket.recv(1024) <NEW_LINE> response = self.parser.parse_response(message) <NEW_LINE> self.assertEqual(response.get_header("Connection"), "close") <NEW_LINE> try: <NEW_LINE> <INDENT> self.client_socket.settimeout(10) <NEW_LINE> self.assertTrue(len(self.client_socket.recv(1024)) == 0) <NEW_LINE> <DEDENT> except socket.timeout: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> except socket.error: <NEW_LINE> <INDENT> pass
Multiple GETs over the same (persistent) connection with the last GET prompting closing the connection, the connection should be closed.
625941b744b2445a33931ee4
def process_log_files(source_name, log_file_list): <NEW_LINE> <INDENT> result_list = [] <NEW_LINE> out_fname = create_out_fname(source_name, suffix='_sum', ext=".csv") <NEW_LINE> for log_file in log_file_list: <NEW_LINE> <INDENT> result_list += process_log(log_file) <NEW_LINE> <DEDENT> if len(result_list) == 0: <NEW_LINE> <INDENT> warning("Found no lammps log data to process from: {}".format(source_name)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> write_csv(result_list, out_fname, LOG_FIELDNAMES, extrasaction="ignore")
Loops through all files and prints output @param source_name: the source name to use as the base for creating an outfile name @param log_file_list: list of file names to read and process
625941b77cff6e4e811177cb
def test_user_info_logged_in(self): <NEW_LINE> <INDENT> result = self.client.get('/user_info') <NEW_LINE> self.assertIn("User information for", result.data)
tests user_info route
625941b707d97122c41786d0
def islocked(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> lock = self._readlock() <NEW_LINE> os.kill(int(lock['pid']), 0) <NEW_LINE> return (lock['host'] == self.host) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return False
Check if we already have a lock
625941b73c8af77a43ae35e3
def _pair_verify_one(self, tlv_objects): <NEW_LINE> <INDENT> logger.debug("Pair verify [1/2].") <NEW_LINE> client_public = tlv_objects[HAP_TLV_TAGS.PUBLIC_KEY] <NEW_LINE> private_key = curve25519.Private() <NEW_LINE> public_key = private_key.get_public() <NEW_LINE> shared_key = private_key.get_shared_key( curve25519.Public(client_public), lambda x: x) <NEW_LINE> mac = self.state.mac.encode() <NEW_LINE> material = public_key.serialize() + mac + client_public <NEW_LINE> server_proof = self.state.private_key.sign(material) <NEW_LINE> output_key = hap_hkdf(shared_key, self.PVERIFY_1_SALT, self.PVERIFY_1_INFO) <NEW_LINE> self._set_encryption_ctx(client_public, private_key, public_key, shared_key, output_key) <NEW_LINE> message = tlv.encode(HAP_TLV_TAGS.USERNAME, mac, HAP_TLV_TAGS.PROOF, server_proof) <NEW_LINE> cipher = CHACHA20_POLY1305(output_key, "python") <NEW_LINE> aead_message = bytes( cipher.seal(self.PVERIFY_1_NONCE, bytearray(message), b"")) <NEW_LINE> data = tlv.encode(HAP_TLV_TAGS.SEQUENCE_NUM, b'\x02', HAP_TLV_TAGS.ENCRYPTED_DATA, aead_message, HAP_TLV_TAGS.PUBLIC_KEY, public_key.serialize()) <NEW_LINE> self.send_response(200) <NEW_LINE> self.send_header("Content-Type", self.PAIRING_RESPONSE_TYPE) <NEW_LINE> self.end_response(data)
Generate new session key pair and send a proof to the client. @param tlv_objects: The TLV data received from the client. @type tlv_object: dict
625941b7f8510a7c17cf9549
def markio_src(args): <NEW_LINE> <INDENT> _, source, lang = markio_extract_source(args) <NEW_LINE> if args.output: <NEW_LINE> <INDENT> with open(args.output, 'w', encoding='utf8') as F: <NEW_LINE> <INDENT> F.write(source) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print(source)
`markio src <file>` command.
625941b74527f215b584c2a0
def removeself(self): <NEW_LINE> <INDENT> if self.container: <NEW_LINE> <INDENT> self.container.remove(self)
Remove self from any container
625941b71f037a2d8b946044
def _render_wrapper(self, prefix: str = "./"): <NEW_LINE> <INDENT> template = self._env.get_template("Grid_NaunetWrapper.C.j2") <NEW_LINE> specnum = [ "DeNum" if s.iselectron else f"{s.alias}Num" for s in self.netinfo.species ] <NEW_LINE> declare = ", ".join(specnum) <NEW_LINE> initial = " = ".join([*specnum, "0"]) <NEW_LINE> abund = [] <NEW_LINE> invabund = [] <NEW_LINE> for s, n in zip(self.netinfo.species, specnum): <NEW_LINE> <INDENT> massnum = 1.0 if s.iselectron else s.massnumber <NEW_LINE> if self.device == "cpu": <NEW_LINE> <INDENT> abund.append( f"y[IDX_{s.alias}] = max(BaryonField[{n}][igrid], 1e-40) * NumberDensityUnits / {massnum}" ) <NEW_LINE> invabund.append( f"BaryonField[{n}][igrid] = max(y[IDX_{s.alias}] * {massnum} / NumberDensityUnits, 1e-40)" ) <NEW_LINE> <DEDENT> elif self.device == "gpu": <NEW_LINE> <INDENT> abund.append( f"y[sidx + IDX_{s.alias}] = max(BaryonField[{n}][igrid], 1e-40) * NumberDensityUnits / {massnum}" ) <NEW_LINE> invabund.append( f"BaryonField[{n}][igrid] = max(y[sidx + IDX_{s.alias}] * {massnum} / NumberDensityUnits, 1e-40)" ) <NEW_LINE> <DEDENT> <DEDENT> result = template.render( device=self.device, declare=declare, initial=initial, abund=abund, invabund=invabund, ) <NEW_LINE> with open(os.path.join(prefix, "Grid_NaunetWrapper.C"), "w") as out: <NEW_LINE> <INDENT> out.write(result)
Render Grid_NaunetWrapper.C for Enzo Args: prefix (str, optional): Path to save output file. Defaults to "./".
625941b738b623060ff0ac34
def test_simple_stats(self): <NEW_LINE> <INDENT> stats = Stats() <NEW_LINE> stats.test = 1 <NEW_LINE> assert stats.test == 1 <NEW_LINE> assert stats['test'] == 1 <NEW_LINE> stats['test2'] = 2 <NEW_LINE> assert stats.test2 == 2 <NEW_LINE> assert stats['test2'] == 2 <NEW_LINE> stats['test'] = 2 <NEW_LINE> assert stats.test == 2 <NEW_LINE> assert stats['test'] == 2 <NEW_LINE> stats.test2 = 1 <NEW_LINE> assert stats.test2 == 1 <NEW_LINE> assert stats['test2'] == 1
Various setter and getter tests.
625941b7099cdd3c635f0aa1
def __init__(self, encoder_h_dim=64, input_dim=2, embedding_dim=16, dropout=0.0): <NEW_LINE> <INDENT> super(MotionEncoder, self).__init__() <NEW_LINE> self.encoder_h_dim = encoder_h_dim <NEW_LINE> self.embedding_dim = embedding_dim <NEW_LINE> self.input_dim = input_dim <NEW_LINE> if embedding_dim: <NEW_LINE> <INDENT> self.spatial_embedding = nn.Linear(input_dim, embedding_dim) <NEW_LINE> self.encoder = nn.LSTM(embedding_dim, encoder_h_dim) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.encoder = nn.LSTM(input_dim, encoder_h_dim)
Initialize MotionEncoder. Parameters. encoder_h_dim (int) - - dimensionality of hidden state input_dim (int) - - input dimensionality of spatial coordinates embedding_dim (int) - - dimensionality spatial embedding dropout (float) - - dropout in LSTM layer
625941b79b70327d1c4e0c19
def test_md_matrix_init_marix(self): <NEW_LINE> <INDENT> assert self.md_mtx.mtx == [ [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], ]
.
625941b730dc7b76659017af
def test_1_5_aromatic_heteroatom_canonicalization10(): <NEW_LINE> <INDENT> assert canonicalize_tautomer_smiles('O=c1nc2[nH]ccn2cc1') == 'O=c1ccn2cc[nH]c2n1'
1,5 aromatic heteroatom H shift
625941b732920d7e50b28011
def jie(self, register, offset): <NEW_LINE> <INDENT> if self.registers[register] % 2 == 0: <NEW_LINE> <INDENT> return int(offset)
r, offset is like jmp, but only jumps if register r is even ("jump if even").
625941b76fece00bbac2d580
def to_dict(self): <NEW_LINE> <INDENT> dict_repr = {} <NEW_LINE> for key, value in self.__dict__.items(): <NEW_LINE> <INDENT> dict_repr[key] = value <NEW_LINE> if isinstance(value, datetime): <NEW_LINE> <INDENT> dict_repr[key] = value.strftime('%Y-%m-%dT%H:%M:%S.%f') <NEW_LINE> <DEDENT> <DEDENT> dict_repr["__class__"] = type(self).__name__ <NEW_LINE> return dict_repr
returns a dictionary containing all keys/values of __dict__
625941b7fff4ab517eb2f27e
def renew_close_to_expiration(self, margin_in_seconds=A_DAY): <NEW_LINE> <INDENT> subscriptions = self.storage.close_to_expiration(margin_in_seconds) <NEW_LINE> for subscription in subscriptions: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.subscribe_impl(**subscription) <NEW_LINE> <DEDENT> except SubscriberError as e: <NEW_LINE> <INDENT> warn(RENEW_FAILURE % (subscription['topic_url'], subscription['callback_id']), e)
Automatically renew subscriptions that are close to expiring, or have already expired. margin_in_seconds determines if a subscription is in fact close to expiring. By default, said margin is set to be a single day (24 hours). This is a long-running method for any non-trivial usage of the subscriber module, as renewal requires several http requests, and subscriptions are processed serially. Because of that, it is recommended to run this method in a celery task.
625941b7a05bb46b383ec672
def flist_meflistrenametarget_get(self, flist, target, headers=None, query_params=None, content_type="application/json"): <NEW_LINE> <INDENT> uri = self.client.base_url + "/flist/me/"+flist+"/rename/"+target <NEW_LINE> return self.client.get(uri, None, headers, query_params, content_type)
Rename one of your flist It is method for GET /flist/me/{flist}/rename/{target}
625941b7d268445f265b4cba
def remove_low_amp(popt_list, amp_cutoff): <NEW_LINE> <INDENT> values_to_remove = [] <NEW_LINE> for index, value in enumerate(popt_list): <NEW_LINE> <INDENT> if index % 3 == 0: <NEW_LINE> <INDENT> current_amplitude = value <NEW_LINE> if current_amplitude < amp_cutoff: <NEW_LINE> <INDENT> values_to_remove.extend([popt_list[index], popt_list[index + 1], popt_list[index + 2]]) <NEW_LINE> <DEDENT> <DEDENT> if index % 3 == 2: <NEW_LINE> <INDENT> current_width = value <NEW_LINE> if current_width < 1e-2: <NEW_LINE> <INDENT> values_to_remove.extend([popt_list[index - 2], popt_list[index - 1], popt_list[index]]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for value in values_to_remove: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> popt_list.remove(value) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> return popt_list
Helper method to remove low amplitude peaks for both protein and non-protein parameter lists Also remove peaks of miniscule width, as these can result in similar behavior. NOTE: the width cutoff is calculated against centroid to allow for different magnitude drift axes sigma must be > 0.01 * centroid, which corresponds to a resolution of 100 * 2sqrt(2), above any typical IM system today. :param popt_list: list of Gaussian parameters [amp1, centroid1, sigma1, amp2, centroid2, sigma2, ... ] :param amp_cutoff: minimum amplitude to allow :return: updated popt_list with low amplitude peaks removed
625941b70c0af96317bb802e
def lookups(self, request, model_admin): <NEW_LINE> <INDENT> return ( ('8--19', '8–18 years'), ('19--26', '19–25 years'), ('26--36', '26–35 years'), ('36--56', '36–55 years'), ('56--', '56+ years'), ('0--', 'Any'), )
Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.
625941b763b5f9789fde6f2a
def handler500(request): <NEW_LINE> <INDENT> response = render_to_response('netcutterform/500.html', {}, context_instance=RequestContext(request)) <NEW_LINE> response.status_code = 500 <NEW_LINE> return response
Handler for error 500 (internal server error), doesn't work.
625941b726068e7796caeb1d
def close_session(self): <NEW_LINE> <INDENT> self.s.close() <NEW_LINE> return
关闭session 针对页面跳转,会出现打开新的session, 当前的session也应该相应的关闭 关闭所有adapter(适配器) such as the session
625941b715baa723493c3db7
def _smallest_size_at_least(height, width, smallest_side): <NEW_LINE> <INDENT> smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) <NEW_LINE> height = tf.to_float(height) <NEW_LINE> width = tf.to_float(width) <NEW_LINE> smallest_side = tf.to_float(smallest_side) <NEW_LINE> scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) <NEW_LINE> new_height = tf.to_int32(height * scale) <NEW_LINE> new_width = tf.to_int32(width * scale) <NEW_LINE> return new_height, new_width
Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width.
625941b77047854f462a1252
def __init__(self, smartctl_path, options: List[str] = []): <NEW_LINE> <INDENT> self.smartctl_path = smartctl_path <NEW_LINE> self.options: List[str] = options
Instantiates and initializes the Smartctl wrapper.
625941b7fff4ab517eb2f27f
def mapf(x): <NEW_LINE> <INDENT> if x == None: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return x
Internal function to set cero error to fixed parameters.
625941b799fddb7c1c9de1d8