code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def close_file(self): <NEW_LINE> <INDENT> self._flush() <NEW_LINE> self.file.close()
Closes file handle
625941b4a8ecb033257d2ea9
def calzone(year): <NEW_LINE> <INDENT> return calcium(year, weekends=False)
Prints YYYYMMDD calendar, like calcium without weekends
625941b476d4e153a657e903
def check_json_precision(): <NEW_LINE> <INDENT> n = Decimal("20000000.00000003") <NEW_LINE> satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) <NEW_LINE> if satoshis != 2000000000000003: <NEW_LINE> <INDENT> raise RuntimeError("JSON encode/decode loses precision")
Make sure json library being used does not lose precision converting UCS values
625941b42eb69b55b151c67d
@auth.route('/register', methods=['GET', 'POST']) <NEW_LINE> def register(): <NEW_LINE> <INDENT> if user_is_registered(): <NEW_LINE> <INDENT> flash('A user is already registered. Log in.') <NEW_LINE> return redirect(url_for('auth.login')) <NEW_LINE> <DEDENT> form = RegistrationForm() <NEW_LINE> if form.validate_on_submit(): <NEW_LINE> <INDENT> password_hash = pwd_context.hash(form.password.data) <NEW_LINE> creator_id = create_user(form.email.data, password_hash) <NEW_LINE> Budget(None, None).add() <NEW_LINE> flash('Registration successful. You can login now.') <NEW_LINE> return redirect(url_for('auth.login')) <NEW_LINE> <DEDENT> return render_template('register.html', form=form)
Register user and create pagination table with one page and no entries.
625941b43539df3088e2e11f
def test_get_attacking_queen_sqs(self): <NEW_LINE> <INDENT> board = Board('7k/8/7q/8/8/7K/8/8 w - - 0 1') <NEW_LINE> king = Piece.WK if board.turn == Color.WHITE else Piece.BK <NEW_LINE> king_sq = board.piece_sq[king][0] <NEW_LINE> queen_sqs = board._get_attacking_queen_sqs(king_sq) <NEW_LINE> self.assertEqual(len(queen_sqs), 1) <NEW_LINE> self.assertListEqual(sorted(queen_sqs), sorted([Sq.H6])) <NEW_LINE> board = Board('5q1Q/8/8/8/8/7K/8/3k4 w - - 0 1') <NEW_LINE> king = Piece.WK if board.turn == Color.WHITE else Piece.BK <NEW_LINE> king_sq = board.piece_sq[king][0] <NEW_LINE> queen_sqs = board._get_attacking_queen_sqs(king_sq) <NEW_LINE> self.assertEqual(len(queen_sqs), 0) <NEW_LINE> board = Board('8/8/8/5b2/q2N3K/8/8/3k4 w - - 0 1') <NEW_LINE> king = Piece.WK if board.turn == Color.WHITE else Piece.BK <NEW_LINE> king_sq = board.piece_sq[king][0] <NEW_LINE> queen_sqs = board._get_attacking_queen_sqs(king_sq) <NEW_LINE> self.assertEqual(len(queen_sqs), 0) <NEW_LINE> board = Board('7K/8/6Q1/5n2/8/3Q2k1/7Q/6b1 b - - 0 1') <NEW_LINE> king = Piece.WK if board.turn == Color.WHITE else Piece.BK <NEW_LINE> king_sq = board.piece_sq[king][0] <NEW_LINE> queen_sqs = board._get_attacking_queen_sqs(king_sq) <NEW_LINE> self.assertEqual(len(queen_sqs), 3) <NEW_LINE> self.assertListEqual(sorted(queen_sqs), sorted([Sq.H2, Sq.D3, Sq.G6]))
Tests the _get_attacking_queen_sqs() function of the Board class
625941b44d74a7450ccd3f97
def test_materializations_events(self): <NEW_LINE> <INDENT> devices = self.get_fixtures_computers() <NEW_LINE> vaio, _ = self.get(self.DEVICES, '', devices[0]) <NEW_LINE> account_id = str(self.account['_id']) <NEW_LINE> materialized_events = [ {'@type': Snapshot.type_name, 'secured': False, 'byUser': account_id, 'incidence': False}, {'@type': Register.type_name, 'secured': False, 'byUser': account_id, 'incidence': False}, ] <NEW_LINE> fields = {'@type', '_id', 'byUser', 'incidence', 'secured', '_updated'} <NEW_LINE> fields_snapshot = fields | {'snapshotSoftware'} <NEW_LINE> self.assertIn('events', vaio) <NEW_LINE> for event, materialized_event in zip(vaio['events'], materialized_events): <NEW_LINE> <INDENT> assert_that(materialized_event).is_subset_of(event) <NEW_LINE> fields_to_check = fields if materialized_event['@type'] != 'devices:Snapshot' else fields_snapshot <NEW_LINE> assert_that(set(event.keys())).is_equal_to(fields_to_check)
Tests materializations related to events. :return:
625941b4293b9510aa2c306d
def parse_datetime_range(time_filter): <NEW_LINE> <INDENT> if not time_filter: <NEW_LINE> <INDENT> time_filter = "[* TO *]" <NEW_LINE> <DEDENT> start, end = parse_solr_time_range_as_pair(time_filter) <NEW_LINE> start, end = parse_datetime(start), parse_datetime(end) <NEW_LINE> if None not in [start, end] and start > end: <NEW_LINE> <INDENT> raise Exception("Start must come before End: {0}".format(time_filter)) <NEW_LINE> <DEDENT> return start, end
Parse the url param to python objects. From what time range to divide by a.time.gap into intervals. Defaults to q.time and otherwise 90 days. Validate in API: re.search("\[(.*) TO (.*)\]", value) :param time_filter: [2013-03-01 TO 2013-05-01T00:00:00] :return: datetime.datetime(2013, 3, 1, 0, 0), datetime.datetime(2013, 5, 1, 0, 0)
625941b497e22403b379cd6c
def _formatFilters(self, filters, validFields): <NEW_LINE> <INDENT> formatted_filters = [] <NEW_LINE> inequality_field = None <NEW_LINE> for f in filters: <NEW_LINE> <INDENT> filtr = {field.name: getattr(f, field.name) for field in f.all_fields()} <NEW_LINE> try: <NEW_LINE> <INDENT> filtr["field"] = validFields[filtr["field"]] <NEW_LINE> filtr["operator"] = OPERATORS[filtr["operator"]] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise endpoints.BadRequestException("Filter contains invalid field (%s) or operator."%filtr["field"]) <NEW_LINE> <DEDENT> if filtr["operator"] != "=": <NEW_LINE> <INDENT> if inequality_field and inequality_field != filtr["field"]: <NEW_LINE> <INDENT> raise endpoints.BadRequestException("Inequality filter is allowed on only one field.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> inequality_field = filtr["field"] <NEW_LINE> <DEDENT> <DEDENT> formatted_filters.append(filtr) <NEW_LINE> <DEDENT> return (inequality_field, formatted_filters)
Parse, check validity and format user supplied filters.
625941b421bff66bcd684729
def _process_notification_for_handler(self, handler, notification): <NEW_LINE> <INDENT> event_type = notification['event_type'] <NEW_LINE> payload = notification['payload'] <NEW_LINE> if event_type in handler.get_event_types(): <NEW_LINE> <INDENT> LOG.debug('Found handler for: %s' % event_type) <NEW_LINE> handler.process_notification(event_type, payload)
Processes an incoming notification for a specific handler, checking to see if the handler is interested in the notification before handing it over.
625941b4cdde0d52a9e52e02
def change_to_tag(self, tag): <NEW_LINE> <INDENT> self.repo.git.checkout(tag)
切换tag :param tag: :return:
625941b4be8e80087fb20a23
def update_tabs_text(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> for index, fname in enumerate(self.filenames): <NEW_LINE> <INDENT> client = self.clients[index] <NEW_LINE> if fname: <NEW_LINE> <INDENT> self.rename_client_tab(client, self.disambiguate_fname(fname)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.rename_client_tab(client, None) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except IndexError: <NEW_LINE> <INDENT> pass
Update the text from the tabs.
625941b4aad79263cf39080e
def draw_ellipse(ax): <NEW_LINE> <INDENT> aux_tr_box = AuxTransformBox(ax.transData) <NEW_LINE> aux_tr_box.add_artist(Ellipse((0, 0), width=0.1, height=0.15)) <NEW_LINE> box = AnchoredOffsetbox(child=aux_tr_box, loc="lower left", frameon=True) <NEW_LINE> ax.add_artist(box)
Draw an ellipse of width=0.1, height=0.15 in data coordinates.
625941b4167d2b6e31218972
def averaging_models_by_two(trainX, validX, trainy, validy, testX): <NEW_LINE> <INDENT> averaged_2models(trainX, validX, trainy, validy, testX, RF_model, XGB_model, "RF", "XGB") <NEW_LINE> averaged_2models(trainX, validX, trainy, validy, testX, RF_model, MLP_model, "RF", "MLP") <NEW_LINE> averaged_2models(trainX, validX, trainy, validy, testX, RF_model, ET_model, "RF", "Extra_tree") <NEW_LINE> averaged_2models(trainX, validX, trainy, validy, testX, XGB_model, ET_model, "XGB", "ET") <NEW_LINE> averaged_2models(trainX, validX, trainy, validy, testX, XGB_model, MLP_model, "XGB", "MLP") <NEW_LINE> averaged_2models(trainX, validX, trainy, validy, testX, MLP_model, ET_model, "MLP", "ET")
Run all combinations of 2 models for ensemble averaging Best to use averaging_probas to avoid retraining existing models
625941b4dd821e528d63af80
def render_image1(self, data, order, win_coord): <NEW_LINE> <INDENT> self.logger.debug("redraw surface") <NEW_LINE> if self.figure is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> dst_x = dst_y = 0 <NEW_LINE> if self.mpimage is None: <NEW_LINE> <INDENT> self.mpimage = self.figure.figimage(data, xo=dst_x, yo=dst_y, origin='upper') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.mpimage.ox = dst_x <NEW_LINE> self.mpimage.oy = dst_y <NEW_LINE> self.mpimage.set_data(data)
Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. NOTE: this version uses a Figure.FigImage to render the image.
625941b4a8370b7717052675
def search_dashboard_deleted_for_facet_with_http_info(self, facet, **kwargs): <NEW_LINE> <INDENT> all_params = ['facet', 'body'] <NEW_LINE> all_params.append('async_req') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in six.iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method search_dashboard_deleted_for_facet" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if self.api_client.client_side_validation and ('facet' not in params or params['facet'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `facet` when calling `search_dashboard_deleted_for_facet`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> path_params = {} <NEW_LINE> if 'facet' in params: <NEW_LINE> <INDENT> path_params['facet'] = params['facet'] <NEW_LINE> <DEDENT> query_params = [] <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> if 'body' in params: <NEW_LINE> <INDENT> body_params = params['body'] <NEW_LINE> <DEDENT> header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) <NEW_LINE> header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) <NEW_LINE> auth_settings = ['api_key'] <NEW_LINE> return self.api_client.call_api( '/api/v2/search/dashboard/deleted/{facet}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseContainerFacetResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Lists the values of a specific facet over the customer's deleted dashboards # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_dashboard_deleted_for_facet_with_http_info(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread.
625941b47c178a314d6ef22b
def addSliceCallback(self, callback): <NEW_LINE> <INDENT> self.SliceCallbacks.append(callback)
Add a function to call when a Slice of data is completed.
625941b4187af65679ca4ef8
def get_all_tables(self) -> list: <NEW_LINE> <INDENT> tables = self.c.execute( "SELECT name FROM sqlite_master WHERE type='table';") <NEW_LINE> return [table[0] for table in tables]
Get the name of all the tables in the database.
625941b463f4b57ef0000ef7
def _get_ref_network(self): <NEW_LINE> <INDENT> pass
_get_ref_network is a helper function which returns the name of the network which references this NIC-like resource. NOTE: It is stubbed and must be implemented by all inheriting classes.
625941b4cc40096d61595728
def __init__(self, *args): <NEW_LINE> <INDENT> pass
Initialize a URI resolver plugin
625941b48e05c05ec3eea145
def _load_devicelist_with_diff(output_diff: bool, old_prds: dict = {}) -> dict: <NEW_LINE> <INDENT> with open(DEVICELIST_FILE, "rt") as dlfile: <NEW_LINE> <INDENT> prds = json.load(dlfile) <NEW_LINE> <DEDENT> if old_prds and output_diff: <NEW_LINE> <INDENT> print_prd_diff(old_prds, prds) <NEW_LINE> <DEDENT> return prds
Load local devicelist and output diff if requested.
625941b4d6c5a10208143e1a
def M_prime(self, A): <NEW_LINE> <INDENT> res = set() <NEW_LINE> for g in self.G: <NEW_LINE> <INDENT> if all(self.ctx[(g, a)] for a in A): <NEW_LINE> <INDENT> res.add(g) <NEW_LINE> <DEDENT> <DEDENT> return res
Computes B'. :param B: subset of objects B <= M :returns: subset of features B' <= G
625941b43eb6a72ae02ec2ae
def datetime_as_str(the_datetime = None, format = '%Y-%m-%d %H:%M:%S'): <NEW_LINE> <INDENT> if the_datetime == None: <NEW_LINE> <INDENT> the_datetime = datetime.datetime.now() <NEW_LINE> <DEDENT> return the_datetime.strftime(format)
Generate a string representation of a datetime object. @param the_datetime an object of type datetime.datetime. @param format the desired format of the generated string.
625941b4d6c5a10208143e1b
def validate(val_loader, model, criterion, print_freq, filename, is_cuda=False): <NEW_LINE> <INDENT> batch_time = AverageMeter() <NEW_LINE> losses = AverageMeter() <NEW_LINE> top1 = AverageMeter() <NEW_LINE> top5 = AverageMeter() <NEW_LINE> model.eval() <NEW_LINE> end = time.time() <NEW_LINE> for i, (input, target) in enumerate(val_loader): <NEW_LINE> <INDENT> if (is_cuda): <NEW_LINE> <INDENT> target = target.cuda() <NEW_LINE> input = input.cuda() <NEW_LINE> model = model.cuda() <NEW_LINE> <DEDENT> input_var = torch.autograd.Variable(input, volatile=True) <NEW_LINE> target_var = torch.autograd.Variable(target, volatile=True) <NEW_LINE> with torch.no_grad(): <NEW_LINE> <INDENT> output = model(input_var) <NEW_LINE> loss = criterion(output, target_var) <NEW_LINE> prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) <NEW_LINE> losses.update(loss.item(), input.size(0)) <NEW_LINE> top1.update(prec1[0], input.size(0)) <NEW_LINE> top5.update(prec5[0], input.size(0)) <NEW_LINE> batch_time.update(time.time() - end) <NEW_LINE> end = time.time() <NEW_LINE> if i % print_freq == 0: <NEW_LINE> <INDENT> print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> with open("/home/hongwu/python/Image/" + experience_id + "/" + filename + ".txt", "a+") as f: <NEW_LINE> <INDENT> f.write('save 1, percent: * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} \n'.format( top1=top1, top5=top5)) <NEW_LINE> <DEDENT> print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5)) <NEW_LINE> return top1.avg, top5.avg
使用验证集对当前模型进行评估 :param val_loader: 使用验证集生成的dataloader :param model: 模型 :param criterion: 评价标准 :param print_freq: 打印频率 :return: 最终的准确率
625941b48a43f66fc4b53e3e
def getMotorCount(self): <NEW_LINE> <INDENT> motorCount = c_int() <NEW_LINE> result = PhidgetLibrary.getDll().CPhidgetMotorControl_getMotorCount(self.handle, byref(motorCount)) <NEW_LINE> if result > 0: <NEW_LINE> <INDENT> raise PhidgetException(result) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return motorCount.value
Returns the number of motors supported by this Phidget. This does not neccesarily correspond to the number of motors actually attached to the board. Returns: The number of supported motors <int>. Exceptions: PhidgetException: If this Phidget is not opened and attached.
625941b482261d6c526ab277
def draw_tilted_uniform_grid(img, thickness, step, bgr_color): <NEW_LINE> <INDENT> h, w, _ = img.shape <NEW_LINE> x, y = uniform(low=-w, high=-w + step, size=2) <NEW_LINE> while x < w: <NEW_LINE> <INDENT> draw_line(img, (x, h - 1), (x + w, 0), bgr_color, thickness) <NEW_LINE> x += step <NEW_LINE> <DEDENT> while y < h: <NEW_LINE> <INDENT> draw_line(img, (0, y), (w, y + h - 1), bgr_color, thickness) <NEW_LINE> y += step
Draws square-cell uniform grid with random shift, tilted on 45 degs
625941b4377c676e91271f7f
def _setPolicySetId(self, value): <NEW_LINE> <INDENT> if not isinstance(value, str): <NEW_LINE> <INDENT> raise TypeError('Expecting string type for "policySetId" ' 'attribute; got %r' % type(value)) <NEW_LINE> <DEDENT> self.__policySetId = value
@param value: policy set id @type value: basestring @raise TypeError: incorrect input type
625941b41b99ca400220a885
def addTwoNumbers(self, l1, l2): <NEW_LINE> <INDENT> if l1 == None and l2 == None: <NEW_LINE> <INDENT> return l1 <NEW_LINE> <DEDENT> if l1 == None: <NEW_LINE> <INDENT> return l2 <NEW_LINE> <DEDENT> if l2 == None: <NEW_LINE> <INDENT> return l1 <NEW_LINE> <DEDENT> carry = 0 <NEW_LINE> head = None <NEW_LINE> curHead = None <NEW_LINE> while (l1 != None or l2 != None): <NEW_LINE> <INDENT> temp = carry <NEW_LINE> if l1 != None: <NEW_LINE> <INDENT> temp = temp + l1.val <NEW_LINE> l1 = l1.next <NEW_LINE> <DEDENT> if l2 != None: <NEW_LINE> <INDENT> temp = temp + l2.val <NEW_LINE> l2 = l2.next <NEW_LINE> <DEDENT> newNode = ListNode(temp % 10) <NEW_LINE> if head != None: <NEW_LINE> <INDENT> curHead.next = newNode <NEW_LINE> curHead = newNode <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> head = newNode <NEW_LINE> curHead = newNode <NEW_LINE> <DEDENT> carry = temp / 10 <NEW_LINE> <DEDENT> if carry == 1: <NEW_LINE> <INDENT> newNode = ListNode(1) <NEW_LINE> curHead.next = newNode <NEW_LINE> <DEDENT> return head
:type l1: ListNode :type l2: ListNode :rtype: ListNode
625941b499fddb7c1c9de168
def fillListWithVideoResults(l, result): <NEW_LINE> <INDENT> l[RECALL].append(result.Recall()) <NEW_LINE> l[SPECIFICITY].append(result.Specificity()) <NEW_LINE> l[FPR].append(result.FPR()) <NEW_LINE> l[FNR].append(result.FNR()) <NEW_LINE> l[PBC].append(result.PBC()) <NEW_LINE> l[FMEASURE].append(result.FMeasure()) <NEW_LINE> l[PRECISION].append(result.Precision())
Append a stats list with a CategoryResult.
625941b4ec188e330fd5a57c
def get_adjacency_matrix(self): <NEW_LINE> <INDENT> resultList = [] <NEW_LINE> for x in range(len(self.nodes) + 1): <NEW_LINE> <INDENT> rowList = [] <NEW_LINE> for y in range(len(self.nodes) + 1): <NEW_LINE> <INDENT> insertValue = 0 <NEW_LINE> for edge in self.edges: <NEW_LINE> <INDENT> if(x == edge.node_from.value and y == edge.node_to.value): <NEW_LINE> <INDENT> insertValue = edge.value <NEW_LINE> <DEDENT> <DEDENT> rowList.append(insertValue) <NEW_LINE> <DEDENT> resultList.append(rowList) <NEW_LINE> <DEDENT> return resultList
Return a matrix, or 2D list. Row numbers represent from nodes, column numbers represent to nodes. Store the edge values in each spot, and a 0 if no edge exists.
625941b4fff4ab517eb2f20d
@login_required(login_url='admin/') <NEW_LINE> def myadmin_object_list(request, model_name): <NEW_LINE> <INDENT> view = ListView <NEW_LINE> view.model = get_model(model_name) <NEW_LINE> view.template_name = "myadmin/object_list.html" <NEW_LINE> def get_context_data(self, **kwargs): <NEW_LINE> <INDENT> context = super(view, self).get_context_data(**kwargs) <NEW_LINE> context['model_name'] = model_name <NEW_LINE> return context <NEW_LINE> <DEDENT> view.get_context_data = get_context_data <NEW_LINE> return view.as_view()(request)
Lists objects of selected model
625941b473bcbd0ca4b2be51
def set_forced_first_scattering(self, forced_first_scattering): <NEW_LINE> <INDENT> self.forced_first_scattering = forced_first_scattering
Set whether to ensure that photons scatter at least once before escaping the grid. Parameters ---------- forced_first_scattering : bool Whether to force at least one scattering before escaping the grid References ---------- Wood & Reynolds, 1999, The Astrophysical Journal, 525, 799
625941b4e1aae11d1e749a88
def getDebug(): <NEW_LINE> <INDENT> global _DEBUG <NEW_LINE> return _DEBUG
Returns the currently active DEBUG string.
625941b4de87d2750b85fb62
def print_all(self, filename=None): <NEW_LINE> <INDENT> from pprint import pprint <NEW_LINE> for k in self.data_list: <NEW_LINE> <INDENT> print("============== {} ==================".format(k)) <NEW_LINE> if filename is None: <NEW_LINE> <INDENT> pprint(self.defs[k]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pprint(self.file_defs[filename][k])
Print everything parsed from files. Useful for debugging. Parameters ---------- filename : unicode, optional Name of the file whose definition should be printed.
625941b416aa5153ce36224d
def test_delivery_reports_confirmed_post(self): <NEW_LINE> <INDENT> pass
Test case for delivery_reports_confirmed_post Confirm the receipt of delivery reports.
625941b44f6381625f11481b
def train(self, sess, encoder_inputs, encoder_inputs_length, decoder_inputs, decoder_inputs_length): <NEW_LINE> <INDENT> if self.mode != 'train': <NEW_LINE> <INDENT> raise ValueError('Train step can only be operated in train mode') <NEW_LINE> <DEDENT> input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length, decoder_inputs, decoder_inputs_length, False) <NEW_LINE> input_feed[self.keep_prob_placeholder.name] = self.keep_prob <NEW_LINE> output_feed = [ self.train_op, self.loss, self.summary_op ] <NEW_LINE> outputs = sess.run(output_feed, input_feed) <NEW_LINE> return outputs[1], outputs[2]
Run a train step of the model feeding the given inputs. Args: session: tensorflow session to use. encoder_inputs: a numpy int matrix of [batch_size, max_source_time_steps] to feed as encoder inputs encoder_inputs_length: a numpy int vector of [batch_size] to feed as sequence lengths for each element in the given batch decoder_inputs: a numpy int matrix of [batch_size, max_target_time_steps] to feed as decoder inputs decoder_inputs_length: a numpy int vector of [batch_size] to feed as sequence lengths for each element in the given batch Returns: A triple consisting of gradient norm (or None if we did not do backward), average perplexity, and the outputs.
625941b497e22403b379cd6d
def to_variable(x,requires_grad = False,cuda = False): <NEW_LINE> <INDENT> x = torch.from_numpy(x) <NEW_LINE> if(cuda): <NEW_LINE> <INDENT> x = x.cuda() <NEW_LINE> <DEDENT> x = Variable(x, requires_grad = requires_grad) <NEW_LINE> return x
Convert numpy array into a pytorch tensor
625941b4046cf37aa974cb20
def get_my_spout(self): <NEW_LINE> <INDENT> if self.is_spout: <NEW_LINE> <INDENT> return self._my_spbl <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
Returns spout instance, or ``None`` if bolt is assigned
625941b43346ee7daa2b2b3d
def oddEvenList(self, head): <NEW_LINE> <INDENT> if not head: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> o = ListNode(-2) <NEW_LINE> odd = ListNode(-1) <NEW_LINE> even = o <NEW_LINE> cur = head <NEW_LINE> while cur: <NEW_LINE> <INDENT> odd.next = cur <NEW_LINE> even.next = cur.next <NEW_LINE> odd = cur <NEW_LINE> even = cur.next <NEW_LINE> if cur.next: <NEW_LINE> <INDENT> cur = cur.next.next <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> odd.next = o.next <NEW_LINE> return head
:type head: ListNode :rtype: ListNode
625941b40a366e3fb873e5eb
def gaussquad(n): <NEW_LINE> <INDENT> i = np.arange(n-1) <NEW_LINE> b = (i+1.) / np.sqrt(4.*(i+1)**2 - 1.) <NEW_LINE> J = np.diag(b, -1) + np.diag(b, 1) <NEW_LINE> x, ev = np.linalg.eigh(J) <NEW_LINE> w = 2 * ev[0,:]**2 <NEW_LINE> return x, w
Berechnet die Knoten und Gewichte für Gauss-Legendre Quadratur.
625941b491af0d3eaac9b7e8
def put_Description(self, desc): <NEW_LINE> <INDENT> return super(IServerObjectExtensionType2, self).put_Description(desc)
Method IServerObjectExtensionType.put_Description (from IServerObjectExtensionType) INPUT desc : BSTR
625941b48c0ade5d55d3e794
def instantiated(self): <NEW_LINE> <INDENT> return bool(super(Panel, self).widget())
Return True if the tool already has been loaded.
625941b4cdde0d52a9e52e03
def __get_ew_pos_fft(self, times, evec_stream, f, dec, pad_fac=pad_fac_EW): <NEW_LINE> <INDENT> n = len(times) <NEW_LINE> apod = np.hanning(n) <NEW_LINE> dt = np.radians(ephemeris.lsa(times[1]) - ephemeris.lsa(times[0])) <NEW_LINE> spec = np.fft.fft(apod * evec_stream, n=n * pad_fac) <NEW_LINE> freq = np.fft.fftfreq(n * pad_fac, dt) <NEW_LINE> x_loc = freq[np.argmax(np.abs(spec))] <NEW_LINE> conv_fac = -2.99792e2 / f / np.cos(dec) <NEW_LINE> position = x_loc * conv_fac <NEW_LINE> position_resolution = np.abs(freq[1] - freq[0]) * np.abs(conv_fac) <NEW_LINE> return position, position_resolution
Routine that gets feed positions from the eigenvector data via an FFT. The eigenvector is first apodized with ahannings window function and then fourier transformed along the time axis. Parameters ---------- times : np.ndarray Unix time of the data evec_stream : np.ndarray The eigenvector data for a single frequency as a function of time. f : float The selected frequency dec : float The declination of the source in radians. pad_fac: integer The multiplicative factor by which we want to pad the data. Returns ------- positions: np.ndarray(ninput) The East-West positions referenced to 2 feeds on the first cylinder. position_resolution: float Position resolution, determined by number of time samples times padding factor.
625941b47b25080760e39231
@main.before_request <NEW_LINE> @rate_limit(42, 10) <NEW_LINE> def before_request(): <NEW_LINE> <INDENT> pass
All routes in this blueprint require rate limiting.
625941b4796e427e537b0397
def parse_fastq(f): <NEW_LINE> <INDENT> name = f.readline().strip() <NEW_LINE> read = f.readline().strip() <NEW_LINE> plus = f.readline().strip() <NEW_LINE> qual = f.readline().strip() <NEW_LINE> return [name,read,plus,qual]
parse every 4 lines of fastq file
625941b4283ffb24f3c556e3
def symmetric5(input_function, x, h): <NEW_LINE> <INDENT> f_minus_2 = input_function(x - (2.0 * h)) <NEW_LINE> f_minus_1 = input_function(x - h) <NEW_LINE> f_0 = input_function(x) <NEW_LINE> f_1 = input_function(x + h) <NEW_LINE> f_2 = input_function(x + (2.0 * h)) <NEW_LINE> return (f_minus_2 - (8.0 * f_minus_1) + (8.0 * f_1) - f_2) / (12.0 * h)
Performs the symmetric 5-point method on the input_function Input: x -- independent variable h -- step-size Output: dependent variable
625941b45f7d997b87174870
@render_to('googlead6c3c617c310b08.html') <NEW_LINE> def google_verify(request): <NEW_LINE> <INDENT> return {}
page used to verify for chrome store
625941b4097d151d1a222c39
def previous(self): <NEW_LINE> <INDENT> return _BLISS_swig.SwigPyIterator_previous(self)
previous(self) -> PyObject
625941b4377c676e91271f80
def __init__( self, *, id: Optional[str] = None, timestamp: Optional[datetime.datetime] = None, action: Optional[str] = None, target: Optional["Target"] = None, request: Optional["Request"] = None, actor: Optional["Actor"] = None, source: Optional["Source"] = None, **kwargs ): <NEW_LINE> <INDENT> super(EventContent, self).__init__(**kwargs) <NEW_LINE> self.id = id <NEW_LINE> self.timestamp = timestamp <NEW_LINE> self.action = action <NEW_LINE> self.target = target <NEW_LINE> self.request = request <NEW_LINE> self.actor = actor <NEW_LINE> self.source = source
:keyword id: The event ID. :paramtype id: str :keyword timestamp: The time at which the event occurred. :paramtype timestamp: ~datetime.datetime :keyword action: The action that encompasses the provided event. :paramtype action: str :keyword target: The target of the event. :paramtype target: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Target :keyword request: The request that generated the event. :paramtype request: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Request :keyword actor: The agent that initiated the event. For most situations, this could be from the authorization context of the request. :paramtype actor: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Actor :keyword source: The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it. :paramtype source: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Source
625941b430bbd722463cbb98
def show_topic_words(self, id_words): <NEW_LINE> <INDENT> for i, t in enumerate(self.phi.value): <NEW_LINE> <INDENT> print("Topic %i: " % i, ", ".join(id_words[w_] for w_ in np.argsort(t[0])[-10:] if w_ < (self.vocabulary - 1 - 1)))
For each topic show a list of representative words. :param id_words: the mapping from ids to its words :return: topic number and its representative words
625941b499fddb7c1c9de169
def discover_filtered_tests(self, filter_name, top_level_directory=None, pattern='test*.py'): <NEW_LINE> <INDENT> if top_level_directory is None: <NEW_LINE> <INDENT> top_level_directory = find_top_level_directory( os.getcwd()) <NEW_LINE> <DEDENT> logger.debug('Discovering filtered tests: filter_name=%r, ' 'top_level_directory=%r, pattern=%r', top_level_directory, top_level_directory, pattern) <NEW_LINE> suite = self.discover_by_directory( top_level_directory, top_level_directory=top_level_directory, pattern=pattern) <NEW_LINE> return self._loader.create_suite( filter_test_suite(suite, filter_name=filter_name))
Find all tests whose package, module, class or method names match the ``filter_name`` string. Parameters ---------- filter_name : str A subsection of the full dotted test name. This can be simply a test method name (e.g. ``test_some_method``), the TestCase class name (e.g. ``TestMyClass``), a module name (e.g. ``test_module``), a subpackage (e.g. ``tests``). It may also be a dotted combination of the above (e.g. ``TestMyClass.test_some_method``). top_level_directory : str The path to the top-level directoy of the project. This is the parent directory of the project'stop-level Python package. pattern : str The glob pattern to match the filenames of modules to search for tests.
625941b42eb69b55b151c67f
def _createIntegratedWorkspace(self, InputWorkspace=None, OutputWorkspace=None, proton_charge=None, from_pixel=0, to_pixel=303): <NEW_LINE> <INDENT> x_axis = InputWorkspace.readX(0)[:] <NEW_LINE> x_size = to_pixel - from_pixel + 1 <NEW_LINE> y_axis = zeros((self.alpha_pixel_nbr, len(x_axis) - 1)) <NEW_LINE> y_error_axis = zeros((self.alpha_pixel_nbr, len(x_axis) - 1)) <NEW_LINE> y_range = arange(x_size) + from_pixel <NEW_LINE> for x in range(self.beta_pixel_nbr): <NEW_LINE> <INDENT> for y in y_range: <NEW_LINE> <INDENT> index = int(self.alpha_pixel_nbr * x + y) <NEW_LINE> y_axis[y, :] += InputWorkspace.readY(index)[:] <NEW_LINE> y_error_axis[y, :] += ((InputWorkspace.readE(index)[:]) * (InputWorkspace.readE(index)[:])) <NEW_LINE> <DEDENT> <DEDENT> y_axis = y_axis.flatten() <NEW_LINE> y_error_axis = sqrt(y_error_axis) <NEW_LINE> y_error_axis = y_error_axis.flatten() <NEW_LINE> y_axis /= (proton_charge * 1e-12) <NEW_LINE> CreateWorkspace(OutputWorkspace=OutputWorkspace, DataX=x_axis, DataY=y_axis, DataE=y_error_axis, Nspec=self.alpha_pixel_nbr)
This creates the integrated workspace over the second pixel range (beta_pixel_nbr here) and returns the new workspace handle
625941b4be383301e01b526b
def _extend_height(glyph, carry_last): <NEW_LINE> <INDENT> if carry_last: <NEW_LINE> <INDENT> return bytematrix.vstack((glyph, glyph[-1, :])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return glyph.vextend(1)
Extend the character height by a row.
625941b40a50d4780f666c65
def _lookup(obj, prop): <NEW_LINE> <INDENT> if isinstance(prop, ast.Attribute): <NEW_LINE> <INDENT> return obj.get(prop.attr) <NEW_LINE> <DEDENT> elif isinstance(prop, ast.Name): <NEW_LINE> <INDENT> return obj.get(prop.id) <NEW_LINE> <DEDENT> elif isinstance(prop, ast.Subscript): <NEW_LINE> <INDENT> return obj[_lookup_subscript(prop.slice.value)] <NEW_LINE> <DEDENT> raise NotImplementedError(f"Node is not supported: {prop}")
Lookup a given property on the object. Parameters obj (Dict): A dict to lookup for properties or index prop (ast Node): An ast.Attribute, ast.Name, or ast.Subscript node to lookup Returns An object result of the lookup
625941b421a7993f00bc7abe
def test_automatic_startup_params(self): <NEW_LINE> <INDENT> self.assert_initialize_driver() <NEW_LINE> self.assert_get(SBE37Parameter.INTERVAL, 1)
Verify that startup params are applied automatically when the driver is started.
625941b485dfad0860c3ac2e
def cbind(*dfseq): <NEW_LINE> <INDENT> df_res = DataFrame({}, []) <NEW_LINE> for df in dfseq: <NEW_LINE> <INDENT> for cn in df.colnameseq: <NEW_LINE> <INDENT> df_res.add_column(cn, df[cn]) <NEW_LINE> <DEDENT> <DEDENT> return df_res
行数が同じである複数のデータフレームを渡された順番に結合した 1つのデータフレームにして返す。
625941b4a17c0f6771cbde2a
def email(args): <NEW_LINE> <INDENT> if args.name: <NEW_LINE> <INDENT> add_user(name=args.name, email_address=args.email) <NEW_LINE> <DEDENT> if args.add_term: <NEW_LINE> <INDENT> Feed(Config.database).add_search_term(email_address=args.email, search_term=args.add_term.upper()) <NEW_LINE> <DEDENT> if args.terms_from_file: <NEW_LINE> <INDENT> with open(args.terms_from_file) as file: <NEW_LINE> <INDENT> for line in file: <NEW_LINE> <INDENT> Feed(Config.database).add_search_term(email_address=args.email, search_term=line.strip().upper()) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if args.remove_term: <NEW_LINE> <INDENT> Feed(Config.database).remove_search_term(email_address=args.email, term=args.remove_term)
--email flag is used with several different options. This is split off to make it easier to read :param args: parser.parse_args() namespace :return: None
625941b48e71fb1e9831d586
def get_latitude_longitude(self): <NEW_LINE> <INDENT> send_url = 'http://freegeoip.net/json' <NEW_LINE> r = requests.get(send_url) <NEW_LINE> j = json.loads(r.text) <NEW_LINE> lat = j['latitude'] <NEW_LINE> lon = j['longitude'] <NEW_LINE> return lat,lon
Returns the current latitude and longitude of the user. Do Not Use!
625941b4596a8972360898a1
def dos_introspect(resPreCalc,graph_metric='median_rankpt',graph=True): <NEW_LINE> <INDENT> specFrm =pd.io.parsers.read_csv(resPreCalc,sep='\t') <NEW_LINE> specFrm.index = specFrm['pert_id'] <NEW_LINE> goldSet = countSerGold.index.values <NEW_LINE> iSpecSet = set(specFrm['pert_id']) <NEW_LINE> spectGold = iSpecSet.intersection(goldSet) <NEW_LINE> dosFrm = specFrm[specFrm.pert_id.isin(spectGold)] <NEW_LINE> if graph: <NEW_LINE> <INDENT> iRnkpt = dosFrm[graph_metric] <NEW_LINE> h1 = plt.hist(specFrm[graph_metric],30,color='b',range=[-80,100],label=['all_introspect_results'],alpha=.4,normed=True) <NEW_LINE> h2 = plt.hist(iRnkpt,30,color='r',range=[-80,100],label='DOS_results',alpha=.3,normed=True) <NEW_LINE> plt.legend() <NEW_LINE> plt.ylabel('freq',fontweight='bold') <NEW_LINE> plt.xlabel(graph_metric,fontweight='bold') <NEW_LINE> outF = os.path.join(wkdir, 'DOS_sig_introspect_'+ graph_metric+ '.png') <NEW_LINE> plt.savefig(outF, bbox_inches='tight',dpi=200) <NEW_LINE> plt.close() <NEW_LINE> <DEDENT> return specFrm, dosFrm
1) load pre-calculated sig_introspect results
625941b494891a1f4081b87d
def update_speed(self, space, next_car): <NEW_LINE> <INDENT> if space <= 2: <NEW_LINE> <INDENT> self.speed = 0 <NEW_LINE> <DEDENT> elif space >= self.speed and self.speed < self.max_speed: <NEW_LINE> <INDENT> self.speed += self.acceleration <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.speed = next_car.speed
Change the car's speed based on space ahead of car.
625941b41d351010ab8558fb
def test_back_navigation(self): <NEW_LINE> <INDENT> self.nested_views.press_back_navigation() <NEW_LINE> for __ in range(self.NEXT_LEVEL_CLICKS): <NEW_LINE> <INDENT> self.nested_views.press_next_level() <NEW_LINE> <DEDENT> self.assertEquals(self.nested_views.get_counter(), self.BEFORE_COUNTER) <NEW_LINE> for __ in range(self.BACK_BUTTON_CLICKS): <NEW_LINE> <INDENT> self.nested_views.press_back_button() <NEW_LINE> <DEDENT> self.assertEquals(self.nested_views.get_counter(), self.AFTER_COUNTER)
Get initial page counter, press next level a NEXT_LEVEL_CLICKS times, verifies counter. Press back button BACK_BUTTON_CLICKS times, verifies counter.
625941b46fece00bbac2d510
def load_gs_to_json_schema( key: str, schema_path: str, cell_range: str = None, sheet_name: str = "schema", sheet_index: int = 0, ): <NEW_LINE> <INDENT> if cell_range: <NEW_LINE> <INDENT> df = load_gs_range_to_dataframe( key=key, cell_range=cell_range, sheet_name=sheet_name, sheet_index=sheet_index, evaluate_formulas=True ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> df = load_gs_to_dataframe( key=key, sheet_name=sheet_name, sheet_index=sheet_index, evaluate_formulas=True ) <NEW_LINE> <DEDENT> df = df.dropna(how="all", axis=1).dropna(how="all", axis=0) <NEW_LINE> if any("Unnamed" in x for x in df.columns): <NEW_LINE> <INDENT> df = df.rename(columns=df.iloc[0]).drop(df.index[0]) <NEW_LINE> <DEDENT> assert set(df.columns).issubset(["name", "type", "mode", "description"]), ( "columns should be 'name', 'type', 'mode', 'description'" ) <NEW_LINE> assert set(["name", "type"]).issubset(list(df.columns)), ( "'name' and 'type' are mandatory columns" ) <NEW_LINE> if "mode" not in df.columns: <NEW_LINE> <INDENT> df["mode"] = "NULLABLE" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> df["mode"] = df["mode"].fillna("NULLABLE") <NEW_LINE> <DEDENT> if "description" not in df.columns: <NEW_LINE> <INDENT> df["description"] = "" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> df["description"] = df["description"].fillna("") <NEW_LINE> <DEDENT> df = df[[ "name", "type", "mode", "description" ]] <NEW_LINE> f_path, f_name = split_full_path(schema_path) <NEW_LINE> if f_path: <NEW_LINE> <INDENT> create_folder(f_path) <NEW_LINE> <DEDENT> json_data = [] <NEW_LINE> for _, row in df.iterrows(): <NEW_LINE> <INDENT> json_data.append( { "name": row["name"], "type": row["type"], "mode": row["mode"], "description": row["description"] } ) <NEW_LINE> <DEDENT> with open(schema_path, "w") as f: <NEW_LINE> <INDENT> json.dump(json_data, f, indent=4) <NEW_LINE> f.write("\n") <NEW_LINE> <DEDENT> print(f"Table schema file created at '{schema_path}.'")
Load table schema from google spreadsheet and write as json. Args: key: 44-digit spreadsheet id from the url e.g. '1DR1...k-xDU' schema_path: path to the schema file (the output to be written) e.g. 'path/schema.json' cell_range: data range including column names e.g. 'B1:C10' sheet_name: name of the sheet within the spreadsheet e.g. 'Sheet1' sheet_index: zero-based index where the sheet is within the spreadsheet column_order: the order of columns given for the schema, can only contain any of Notes: - if no 'cell_range' is defined, the sheet has to be otherwise empty - only one of sheet_name/sheet_index is required, defaults to the first sheet but prioritizing sheet_name if both are given - the gs load required the python data types otherwise it's making incorrect transformations - 'column mode' will be set as 'NULLABLE' Requirements: - share gs with the service account - gs api enabled for the project
625941b4be8e80087fb20a25
def CloneVM_Task(self, folder, name, spec): <NEW_LINE> <INDENT> return self.delegate("CloneVM_Task")(folder, name, spec)
Creates a clone of this virtual machine. If the virtual machine is used as a template, this method corresponds to the deploy command.Creates a clone of this virtual machine. If the virtual machine is used as a template, this method corresponds to the deploy command.Creates a clone of this virtual machine. If the virtual machine is used as a template, this method corresponds to the deploy command.Creates a clone of this virtual machine. If the virtual machine is used as a template, this method corresponds to the deploy command.Creates a clone of this virtual machine. If the virtual machine is used as a template, this method corresponds to the deploy command.Creates a clone of this virtual machine. If the virtual machine is used as a template, this method corresponds to the deploy command. :param folder: The location of the new virtual machine. :param name: The name of the new virtual machine. :param spec: Specifies how to clone the virtual machine.
625941b4dd821e528d63af82
def __init__(self): <NEW_LINE> <INDENT> self.TotalCount = None <NEW_LINE> self.MaterialInfoSet = None <NEW_LINE> self.RequestId = None
:param TotalCount: 符合记录总条数。 :type TotalCount: int :param MaterialInfoSet: 素材信息,仅返回基础信息。 :type MaterialInfoSet: list of MaterialInfo :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
625941b4a8370b7717052677
def canCompleteCircuit(self, gas, cost): <NEW_LINE> <INDENT> length, cache, index = len(gas), {}, 0 <NEW_LINE> while index < length: <NEW_LINE> <INDENT> current, distance = 0, 0 <NEW_LINE> while current >= 0: <NEW_LINE> <INDENT> offset = (index + distance) % length <NEW_LINE> if offset in cache and cache[offset][1] + current >= 0: <NEW_LINE> <INDENT> current = cache[offset][1] + current <NEW_LINE> distance += cache[offset][0] <NEW_LINE> offset = (index + distance) % length <NEW_LINE> <DEDENT> if distance >= length: <NEW_LINE> <INDENT> return index <NEW_LINE> <DEDENT> current += gas[offset] <NEW_LINE> current -= cost[offset] <NEW_LINE> distance += 1 <NEW_LINE> <DEDENT> cache[index] = (distance, current) <NEW_LINE> index += distance <NEW_LINE> <DEDENT> return -1
:type gas: List[int] :type cost: List[int] :rtype: int
625941b4f548e778e58cd351
def bulk_add_data( orgs: List[dict], org_courseid_pairs: List[Tuple[dict, str]], dry_run: bool, activate: bool, ): <NEW_LINE> <INDENT> adding_phrase = "Dry-run of bulk-adding" if dry_run else "Bulk-adding" <NEW_LINE> created_phrase = "Will create" if dry_run else "Created" <NEW_LINE> reactivated_phrase = "Will reactivate" if dry_run else "Reactivated" <NEW_LINE> print("------------------------------------------------------") <NEW_LINE> print(f"{adding_phrase} organizations...") <NEW_LINE> orgs_created, orgs_reactivated = organizations_api.bulk_add_organizations( orgs, dry_run=dry_run, activate=activate ) <NEW_LINE> print(f"{created_phrase} {len(orgs_created)} organizations:") <NEW_LINE> for org_short_name in sorted(orgs_created): <NEW_LINE> <INDENT> print(f" {org_short_name}") <NEW_LINE> <DEDENT> print(f"{reactivated_phrase} {len(orgs_reactivated)} organizations:") <NEW_LINE> for org_short_name in sorted(orgs_reactivated): <NEW_LINE> <INDENT> print(f" {org_short_name}") <NEW_LINE> <DEDENT> print("------------------------------------------------------") <NEW_LINE> print(f"{adding_phrase} organization-course linkages...") <NEW_LINE> linkages_created, linkages_reactivated = organizations_api.bulk_add_organization_courses( org_courseid_pairs, dry_run=dry_run, activate=activate ) <NEW_LINE> print(f"{created_phrase} {len(linkages_created)} organization-course linkages:") <NEW_LINE> for org_short_name, course_id in sorted(linkages_created): <NEW_LINE> <INDENT> print(f" {org_short_name},{course_id}") <NEW_LINE> <DEDENT> print(f"{reactivated_phrase} {len(linkages_reactivated)} organization-course linkages:") <NEW_LINE> for org_short_name, course_id in sorted(linkages_reactivated): <NEW_LINE> <INDENT> print(f" {org_short_name},{course_id}") <NEW_LINE> <DEDENT> print("------------------------------------------------------")
Bulk-add the organizations and organization-course linkages. Print out list of organizations and organization-course linkages, one per line. We distinguish between records that are added by being created vs. those that are being added by just reactivating an existing record. Arguments: orgs: org data dictionaries to bulk-add. should each have a "short_name" and "name" key. org_courseid_pairs list of (org data dictionary, course key string) links to bulk-add. each org data dictionary should have a "short_name" key. dry_run: Whether or not this run should be "dry" (ie, don't apply changes). activate: Whether newly-added organizations and organization-course linkages should be activated, and whether existing-but-inactive organizations/linkages should be reactivated.
625941b42eb69b55b151c680
def _tmp_access_rule(method, ip=None, ttl=None, port=None, direction='in', port_origin='d', ip_origin='d', comment=''): <NEW_LINE> <INDENT> if _status_csf(): <NEW_LINE> <INDENT> if ip is None: <NEW_LINE> <INDENT> return {'error': 'You must supply an ip address or CIDR.'} <NEW_LINE> <DEDENT> if ttl is None: <NEW_LINE> <INDENT> return {'error': 'You must supply a ttl.'} <NEW_LINE> <DEDENT> args = _build_tmp_access_args(method, ip, ttl, port, direction, comment) <NEW_LINE> return __csf_cmd(args)
Handles the cmd execution for tempdeny and tempallow commands.
625941b48c3a873295158194
def find_qemu_pid(vm_name): <NEW_LINE> <INDENT> logging.info('Finding QEMU pid for domain %s', vm_name) <NEW_LINE> libvirt_vm_pid_file = '/var/run/libvirt/qemu/{}.pid'.format(vm_name) <NEW_LINE> try: <NEW_LINE> <INDENT> with open(libvirt_vm_pid_file, 'r') as f: <NEW_LINE> <INDENT> content = f.read() <NEW_LINE> pid = int(content) <NEW_LINE> return pid <NEW_LINE> <DEDENT> <DEDENT> except IOError: <NEW_LINE> <INDENT> for proc in psutil.process_iter(): <NEW_LINE> <INDENT> cmdline = proc.cmdline()[1:] <NEW_LINE> if proc.name() == "qemu-system-x86_64" and next((True for k, v in zip(cmdline, cmdline[1:]) if k == "-name" and vm_name in v), False): <NEW_LINE> <INDENT> return proc.pid <NEW_LINE> <DEDENT> <DEDENT> logging.critical('Cannot find QEMU') <NEW_LINE> raise QEMUNotFoundError('Cannot find QEMU')
Find QEMU's PID that is associated with a given virtual machine :param str vm_name: libvirt domain name :rtype: int
625941b4dc8b845886cb530a
def __get_gamma_corrected_value(original_value): <NEW_LINE> <INDENT> return _gamma_correction_arr[original_value]
INTERNAL. Converts a brightness value from 0-255 to the value that produces an approximately linear scaling to the human eye.
625941b4d10714528d5ffab4
def to_natural(self, *args, **kwargs): <NEW_LINE> <INDENT> return self._arrow.humanize(*args, **kwargs)
:see::meth:arrow.Arrow.humanize
625941b463f4b57ef0000efa
def get_offers_from_category(url): <NEW_LINE> <INDENT> markup = BeautifulSoup(domporta.utils.get_content_from_source(url), 'html.parser') <NEW_LINE> offers_urls = [] <NEW_LINE> offers = markup.find_all('article', class_='sneakpeak') <NEW_LINE> for offer in offers: <NEW_LINE> <INDENT> offers_urls.append(offer.find('a').get('href')) <NEW_LINE> <DEDENT> return offers_urls, markup
Parses available offer urls from given category from given page :param url: Defined url for Domiporta page with offers :type url: str :return: List of urls from given page :rtype: list
625941b438b623060ff0abc8
def test_choice(self): <NEW_LINE> <INDENT> contact = Contact(speaker=self.speaker, kind='A', value='B') <NEW_LINE> self.assertRaises(ValidationError, contact.full_clean)
Contact kind should be limited to E or P
625941b47047854f462a11e4
def test_no_common(self): <NEW_LINE> <INDENT> self.assertIs( None, _latest_common_snapshot( [Snapshot(name=b"a")], [Snapshot(name=b"b")]))
If there are no common ``Snapshot`` instances in the two ``list``\ s, ``_latest_common_snapshot`` returns ``None``.
625941b457b8e32f52483276
def p_expression_list1(p): <NEW_LINE> <INDENT> p[0] = ASTNode('expression_list', p[1])
expression_list : expression
625941b4498bea3a759b9888
def get_state(self): <NEW_LINE> <INDENT> if self.is_interactive is False: <NEW_LINE> <INDENT> raise Exception('Managed engine is not active') <NEW_LINE> <DEDENT> return (self.busy, self.debug, self.profile)
Returns a tuple of engine state flags=(busy, debug, profile)
625941b4b7558d58953c4cf2
def _process_data(self, data): <NEW_LINE> <INDENT> word_index=dict((w,i) for i,w in enumerate(self.vocab)) <NEW_LINE> x=[[word_index.get(w[0], 1) for w in s] for s in data] <NEW_LINE> y_chunk=[[self.tags.index(w[1]) for w in s] for s in data] <NEW_LINE> x= pad_sequences(x,self.max_len) <NEW_LINE> y_chunk=pad_sequences(y_chunk,self.max_len,value=-1) <NEW_LINE> y_chunk=np.expand_dims(y_chunk,2) <NEW_LINE> return x,y_chunk
数据处理
625941b40a366e3fb873e5ec
def listar_using_get51(self, **kwargs): <NEW_LINE> <INDENT> all_params = ['sort', 'page', 'limit', 'id', 'id_produto', 'data_entrada', 'vencimento'] <NEW_LINE> all_params.append('callback') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method listar_using_get51" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> resource_path = '/api/taxas-refinanciamento'.replace('{format}', 'json') <NEW_LINE> path_params = {} <NEW_LINE> query_params = {} <NEW_LINE> if 'sort' in params: <NEW_LINE> <INDENT> query_params['sort'] = params['sort'] <NEW_LINE> <DEDENT> if 'page' in params: <NEW_LINE> <INDENT> query_params['page'] = params['page'] <NEW_LINE> <DEDENT> if 'limit' in params: <NEW_LINE> <INDENT> query_params['limit'] = params['limit'] <NEW_LINE> <DEDENT> if 'id' in params: <NEW_LINE> <INDENT> query_params['id'] = params['id'] <NEW_LINE> <DEDENT> if 'id_produto' in params: <NEW_LINE> <INDENT> query_params['idProduto'] = params['id_produto'] <NEW_LINE> <DEDENT> if 'data_entrada' in params: <NEW_LINE> <INDENT> query_params['dataEntrada'] = params['data_entrada'] <NEW_LINE> <DEDENT> if 'vencimento' in params: <NEW_LINE> <INDENT> query_params['vencimento'] = params['vencimento'] <NEW_LINE> <DEDENT> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client. select_header_accept(['application/json']) <NEW_LINE> if not header_params['Accept']: <NEW_LINE> <INDENT> del header_params['Accept'] <NEW_LINE> <DEDENT> header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json']) <NEW_LINE> auth_settings = [] <NEW_LINE> response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageTaxasRefinanciamentoResponse', auth_settings=auth_settings, callback=params.get('callback')) <NEW_LINE> return response
{{{taxas_refinanciamento_listar}}} {{{taxas_refinanciamento_listar_notes}}} This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.listar_using_get51(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] sort: {{{global_menssagem_sort_sort}}} :param int page: {{{global_menssagem_sort_page_value}}} :param int limit: {{{global_menssagem_sort_limit}}} :param int id: {{{taxas_refinanciamento_request_id_value}}} :param int id_produto: {{{taxas_refinanciamento_request_id_produto_value}}} :param str data_entrada: {{{taxas_refinanciamento_request_data_entrada_value}}} :param str vencimento: {{{taxas_refinanciamento_request_vencimento_value}}} :return: PageTaxasRefinanciamentoResponse If the method is called asynchronously, returns the request thread.
625941b40383005118ecf3bb
def split_train_test(train_start, train_end, test_start, test_end, data): <NEW_LINE> <INDENT> train_start = dealWith_business_day(train_start).strftime('%Y-%m-%d') <NEW_LINE> train_end = dealWith_business_day(train_end).strftime('%Y-%m-%d') <NEW_LINE> test_start = dealWith_business_day(test_start).strftime('%Y-%m-%d') <NEW_LINE> test_end = dealWith_business_day(test_end).strftime('%Y-%m-%d') <NEW_LINE> train = data.loc[train_start:train_end] <NEW_LINE> test = data.loc[test_start:test_end] <NEW_LINE> return train, test
Split the data in train set and test set
625941b455399d3f05588489
def testCreateLine(self): <NEW_LINE> <INDENT> a = e1 + e2* 3.451 <NEW_LINE> b = 2 * e3 - e2 * 0.4 + e1 * 0.1 <NEW_LINE> actual = createLine(up(a), up(b)) <NEW_LINE> expected = ((a ^ b ^ ninf) + ((a - b) ^ ninf ^ no)).normal() <NEW_LINE> expected2 = (up(a) ^up(b)^ninf).normal() <NEW_LINE> assert(actual == expected == expected2) <NEW_LINE> a, b = createRandomVectors(2) <NEW_LINE> actual = createLine(up(a), up(b)) <NEW_LINE> expected = ((a ^ b ^ ninf) + ((a - b) ^ ninf ^ no)).normal() <NEW_LINE> expected2 = (up(a) ^up(b)^ninf).normal() <NEW_LINE> assert(actual == expected == expected2)
Test that the create line function indeed produces the desired line, and that it equals the representation using GA(3) vectors $ L \propto ((a ^ b ^ n_{inf}) + ((a - b) ^ n_{\inf} ^ n_0)) $
625941b482261d6c526ab279
def test_inc_season_out_of_bounds(self): <NEW_LINE> <INDENT> tracked_show = tracker.TrackedShow( title='Game of Thrones', _next_episode='S10E01' ) <NEW_LINE> with self.assertRaises(SeasonOutOfBoundsError): <NEW_LINE> <INDENT> tracked_show._set_next_prev(self.database)
Test that we detect an invalid season
625941b45510c4643540f1d0
def moveleft(self, scene): <NEW_LINE> <INDENT> if self.status == 0: <NEW_LINE> <INDENT> chk = clashcheck(scene, self, self.x, self.y - self.step) <NEW_LINE> if chk == 0: <NEW_LINE> <INDENT> self.setPos(scene, self.x, self.y - self.step) <NEW_LINE> <DEDENT> elif chk == 2: <NEW_LINE> <INDENT> killenemy(scene, self.y-self.step, Enemy1.enemies) <NEW_LINE> self.setPos(scene, self.x, self.y - self.step) <NEW_LINE> <DEDENT> elif chk == 3: <NEW_LINE> <INDENT> killenemy(scene, self.y-self.step, Enemy1.enemies) <NEW_LINE> Lives.lives -= 1 <NEW_LINE> self.setPos(scene, self.x, self.y - self.step) <NEW_LINE> <DEDENT> elif chk == 4: <NEW_LINE> <INDENT> collect_coin(scene, self.y, Coins.coins) <NEW_LINE> self.setPos(scene, self.x, self.y - self.step) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> chk = clashcheck(scene, self, self.x + self.gravity, self.y - self.step) <NEW_LINE> if chk == 0: <NEW_LINE> <INDENT> self.setPos(scene, self.x + self.gravity, self.y - self.step) <NEW_LINE> <DEDENT> elif chk == 2: <NEW_LINE> <INDENT> killenemy(scene, self.y-self.step, Enemy1.enemies) <NEW_LINE> self.setPos(scene, self.x + self.gravity, self.y - self.step) <NEW_LINE> <DEDENT> elif chk == 3: <NEW_LINE> <INDENT> killenemy(scene, self.y-self.step, Enemy1.enemies) <NEW_LINE> Lives.lives -= 1 <NEW_LINE> self.setPos(scene, self.x + self.gravity, self.y - self.step) <NEW_LINE> <DEDENT> elif chk == 4: <NEW_LINE> <INDENT> collect_coin(scene, self.y, Coins.coins) <NEW_LINE> self.setPos(scene, self.x + self.gravity, self.y - self.step)
Make mario move left after making necessary checks
625941b467a9b606de4a7c93
def _dynamodb_types_to_py(self, d): <NEW_LINE> <INDENT> if not isinstance(d, (dict, list)): <NEW_LINE> <INDENT> if isinstance(d, Decimal): <NEW_LINE> <INDENT> if abs(d % 1) > 0: <NEW_LINE> <INDENT> return float(d) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return int(d) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return d <NEW_LINE> <DEDENT> <DEDENT> if isinstance(d, list): <NEW_LINE> <INDENT> return [v for v in (self._dynamodb_types_to_py(v) for v in d)] <NEW_LINE> <DEDENT> return {k: v for k, v in ((k, self._dynamodb_types_to_py(v)) for k, v in d.items())}
Convert DynamoDB types to python - recurse
625941b407f4c71912b1125c
def generate(env): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> bld = env['BUILDERS']['Gch'] <NEW_LINE> bldsh = env['BUILDERS']['GchSh'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> bld = GchBuilder <NEW_LINE> bldsh = GchShBuilder <NEW_LINE> env['BUILDERS']['Gch'] = bld <NEW_LINE> env['BUILDERS']['GchSh'] = bldsh <NEW_LINE> <DEDENT> env['GCHCOM'] = GCHCOMString <NEW_LINE> env['GCHSHCOM'] = GCHSHCOMString <NEW_LINE> for suffix in SCons.Util.Split('.c .C .cc .cxx .cpp .c++'): <NEW_LINE> <INDENT> env['BUILDERS']['StaticObject'].add_emitter( suffix, static_pch_emitter ) <NEW_LINE> env['BUILDERS']['SharedObject'].add_emitter( suffix, shared_pch_emitter )
Add builders and construction variables for gcc PCH to an Environment.
625941b45fdd1c0f98dc0008
def get_section(config, section): <NEW_LINE> <INDENT> configtree = config_from_list(config) <NEW_LINE> return get_section_recursive(configtree, section)
return config starting from dict section with the desired matches
625941b45e10d32532c5ed06
def opengl(self, dev_id=0): <NEW_LINE> <INDENT> return self.context(11, dev_id)
Construct remote OpenGL device.
625941b421bff66bcd68472c
def print_tree(self, traversal_type): <NEW_LINE> <INDENT> answer = None <NEW_LINE> if traversal_type.lower() == "preorder": <NEW_LINE> <INDENT> answer = self.preorder_print(self.root,[]) <NEW_LINE> <DEDENT> elif traversal_type.lower() == "inorder": <NEW_LINE> <INDENT> answer = self.inorder_print(self.root,[]) <NEW_LINE> <DEDENT> elif traversal_type.lower() == "postorder": <NEW_LINE> <INDENT> answer = self.postorder_print(self.root,[]) <NEW_LINE> <DEDENT> return "-".join(list(map(str, answer)))
Print out all tree nodes as they are visited in a pre-order, inorder or post-order traversal
625941b4d7e4931a7ee9dcf2
def __init__(self, username: str, password: str, apiKey: str, clientCertificatePath: str, certificateKeyPath: str): <NEW_LINE> <INDENT> self.username = username <NEW_LINE> self.password = password <NEW_LINE> self.apiKey = apiKey <NEW_LINE> self.clientCertificatePath = clientCertificatePath <NEW_LINE> self.certificateKeyPath = certificateKeyPath <NEW_LINE> self.sessionToken = None <NEW_LINE> self.login()
Client for non-interactive connections to the betfair API. Non-interactive (bot) logins require self-signed certificates: https://docs.developer.betfair.com/display/1smk3cen4v3lu3yomq5qye0ni/Non-Interactive+%28bot%29+login :param username: (str) :param password: (str) :param apiKey: (str) :param clientCertificatePath: (str) Path to self-signed client certificate. :param certificateKeyPath: (str) Path to self-signed client certificate key.
625941b4ec188e330fd5a57e
def find_who_shared_post(self,html): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> aria_labelled_by = html.find('div',{"aria-labelledby" : True}) <NEW_LINE> h4_label = aria_labelled_by.find('h4',id=aria_labelled_by['aria-labelledby']) <NEW_LINE> who_shared_post = h4_label.find('span',{"class" : None}).string <NEW_LINE> post_id = self.find_post_id(aria_labelled_by.find('h4',id=aria_labelled_by['aria-labelledby']).parent.parent.parent) <NEW_LINE> return {"who_shared_post":who_shared_post,"post_id":post_id} <NEW_LINE> <DEDENT> except TypeError as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> return None
Returns id and name of who shared the post
625941b416aa5153ce36224e
def writePySet(self, out, names=None): <NEW_LINE> <INDENT> raise AssertionError('Lists do not have a set method.')
Write code for setter. Raise an exception in order to ensure that our inherited "PySet" code generation is used.
625941b4462c4b4f79d1d4a6
def render_properties(self, mode, context, props): <NEW_LINE> <INDENT> return filter(None, [self.render_property(name, mode, context, props) for name in sorted(props)])
Prepare rendering of a collection of properties.
625941b401c39578d7e74c1b
def _initialize(self, *args, **kwargs): <NEW_LINE> <INDENT> return _VISHNU.FileStat__initialize(self, *args, **kwargs)
_initialize(self)
625941b4b57a9660fec33655
def can_blind (self): <NEW_LINE> <INDENT> return 0
can_blind() : bool Return a Boolean value recording whether this algorithm can blind data. (This does not imply that this particular key object has the private information required to to blind a message.)
625941b42ae34c7f2600cf08
def reset_regs(self): <NEW_LINE> <INDENT> for reg in self.lifter.arch.regs.all_regs_ids_no_alias: <NEW_LINE> <INDENT> self.symbols.symbols_id[reg] = m2_expr.ExprInt(0, size=reg.size)
Set registers value to 0. Ignore register aliases
625941b4460517430c393f67
def StreamingRecognize(self, request_iterator, context): <NEW_LINE> <INDENT> context.set_code(grpc.StatusCode.UNIMPLEMENTED) <NEW_LINE> context.set_details('Method not implemented!') <NEW_LINE> raise NotImplementedError('Method not implemented!')
Performs asynchronous speech recognition: receive results via the google.longrunning.Operations interface. Returns either an `Operation.error` or an `Operation.response` which contains a `LongRunningRecognizeResponse` message. rpc LongRunningRecognize(LongRunningRecognizeRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/speech:longrunningrecognize" body: "*" }; } Performs bidirectional streaming speech recognition: receive results while sending audio. This method is only available via the gRPC API (not REST).
625941b4ab23a570cc24ff56
def run_asp_program(program): <NEW_LINE> <INDENT> my_env = os.environ.copy() <NEW_LINE> process = subprocess.Popen(["java", "-jar", cur_dir + "solvers/sparc.jar", "-solver", "clingo", "-A"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, stdin = subprocess.PIPE,env = my_env) <NEW_LINE> stdout, stderr = process.communicate(input = program.encode('ascii')) <NEW_LINE> return stdout
take clingo program and return the set of its answer sets
625941b45166f23b2e1a4f2f
def class_seven(self): <NEW_LINE> <INDENT> __f = open(os.path.join(self.corpus_path, 'class_seven'), 'w+', encoding='utf-8') <NEW_LINE> first_word = ['个鼓', '个鼓的'] <NEW_LINE> second_word = ['分析', '市场分析', '价值', '最新推荐', '推荐', '评级', '市场评级', '评分', '分数', '研究报告', '研报', '推荐次数', '行业报告'] <NEW_LINE> for i in first_word: <NEW_LINE> <INDENT> for j in second_word: <NEW_LINE> <INDENT> word = i + j + '\n' <NEW_LINE> __f.write(word) <NEW_LINE> <DEDENT> <DEDENT> one_word = ['板快', '板快行业的'] <NEW_LINE> two_word = ['股票推荐', '研究', '涨跌幅', '研报', '开盘价', '收盘价', '振幅', '交易额'] <NEW_LINE> for i in one_word: <NEW_LINE> <INDENT> for j in two_word: <NEW_LINE> <INDENT> word = i + j + '\n' <NEW_LINE> __f.write(word) <NEW_LINE> <DEDENT> <DEDENT> __f.write('热门的股票推荐\n') <NEW_LINE> __f.write('热门的股票\n') <NEW_LINE> __f.write('热门的股票有哪些\n') <NEW_LINE> __f.write('很火的股票\n') <NEW_LINE> __f.write('好的股票\n') <NEW_LINE> __f.write('哪些股票能赚钱\n') <NEW_LINE> __f.write('大家关注哪些股票\n') <NEW_LINE> __f.write('大家关心哪些股票\n') <NEW_LINE> __f.write('主流的股票\n') <NEW_LINE> __f.write('人们都在看的股票\n') <NEW_LINE> __f.write('看好的股票\n') <NEW_LINE> __f.write('推荐次数最多的股票\n') <NEW_LINE> __f.write('推荐次数很多的股票\n') <NEW_LINE> __f.write('推荐次数多的股票\n') <NEW_LINE> __f.write('热捧的股票\n') <NEW_LINE> __f.write('建议次数最多的股票\n') <NEW_LINE> __f.write('最热门的行业推荐\n') <NEW_LINE> __f.write('最热门的行业\n') <NEW_LINE> __f.write('好的的行业推荐\n') <NEW_LINE> __f.write('很火的行业\n') <NEW_LINE> __f.write('哪些行业能赚钱\n') <NEW_LINE> __f.write('最被关注的行业\n') <NEW_LINE> __f.write('人们关心哪些行业\n') <NEW_LINE> __f.write('主流的行业\n') <NEW_LINE> __f.write('推荐次数最多的行业\n') <NEW_LINE> __f.write('推荐次数很多的行业\n') <NEW_LINE> __f.write('推荐次数多的行业\n') <NEW_LINE> __f.write('热捧的行业\n') <NEW_LINE> __f.write('建议次数最多的行业\n') <NEW_LINE> __f.write('看好的行业\n') <NEW_LINE> __f.write('最新的研究报告\n') <NEW_LINE> __f.write('最新的研报\n') <NEW_LINE> __f.write('股票最新的情况\n') <NEW_LINE> __f.write('股票最新的消息\n') <NEW_LINE> __f.write('建议次数最多的行业\n') <NEW_LINE> __f.write('看好的行业\n') <NEW_LINE> __f.write('最准的分析师\n') <NEW_LINE> __f.write('最好分析师推荐\n') <NEW_LINE> __f.write('分析师的排名\n') <NEW_LINE> __f.write('分析师有哪些\n') <NEW_LINE> __f.write('研报最多的分析师\n') <NEW_LINE> __f.write('评分最高的研究员\n') <NEW_LINE> __f.write('分析师怎么样') <NEW_LINE> __f.close()
生成研报模块的语料 :return:
625941b48a43f66fc4b53e41
def listify(value): <NEW_LINE> <INDENT> if isinstance(value, str): <NEW_LINE> <INDENT> return value.replace(',', ' ').split() <NEW_LINE> <DEDENT> elif isinstance(value, list): <NEW_LINE> <INDENT> out_list = [] <NEW_LINE> for val in value: <NEW_LINE> <INDENT> if isinstance(val, list): <NEW_LINE> <INDENT> out_list.extend(listify(val)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> out_list.append(val) <NEW_LINE> <DEDENT> <DEDENT> return out_list
Convert an option specified as a string to a list. Allow both comma and space as delimiters. Passes lists transparently.
625941b4cdde0d52a9e52e05
def append_genre_film_work(self, film_id: str, genre_id_list: List[str]): <NEW_LINE> <INDENT> for genre_id in genre_id_list: <NEW_LINE> <INDENT> id_ = str(uuid4()) <NEW_LINE> row = { "id": id_, "film_work_id": film_id, "genre_id": genre_id, "created": datetime.now().astimezone() } <NEW_LINE> self.genre_film_work.append(row)
Добавляет несколько строк в локальную таблицу genre_film_work.
625941b44e4d5625662d41b5
def merge(self, intervals): <NEW_LINE> <INDENT> res = [] <NEW_LINE> for i in sorted(intervals, key=lambda x: x.start): <NEW_LINE> <INDENT> if res and i.start <= res[-1].end: <NEW_LINE> <INDENT> res[-1].end = max(i.end, res[-1].end) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> res.append(i) <NEW_LINE> <DEDENT> <DEDENT> return res
:type intervals: List[Interval] :rtype: List[Interval]
625941b45f7d997b87174872
def names(as_object=True, p5_connection=None): <NEW_LINE> <INDENT> method_name = "names" <NEW_LINE> result = exec_nsdchat([module_name, method_name], p5_connection) <NEW_LINE> if not as_object: <NEW_LINE> <INDENT> return result <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return resourcelist(result, Device, p5_connection)
Syntax: Device names Description: Returns a list of single tape device resources. Return Values: -On Success: the list of device names the string "<empty>" if no devices are configured
625941b4711fe17d82542154
def test_analyze_document_cluster_get(self): <NEW_LINE> <INDENT> pass
Test case for analyze_document_cluster_get
625941b410dbd63aa1bd2988