code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def test_user_change_page(self): <NEW_LINE> <INDENT> url = reverse('admin:core_user_change', args=[self.user.id]) <NEW_LINE> res = self.client.get(url) <NEW_LINE> self.assertEqual(res.status_code, 200) | Test that user change page works | 625941b985dfad0860c3accc |
def process_event(self, event, event_type): <NEW_LINE> <INDENT> if event.get('user', '') == self.uid: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.logger.debug("Received {} event".format(event_type)) <NEW_LINE> for integration in self.integrations.get(event_type, []): <NEW_LINE> <INDENT> integration.update(event) | Process each message type event
Pass the event on to any registered integration for that event type. | 625941b99b70327d1c4e0c47 |
def fetch(self, chrom, start=None, end=None): <NEW_LINE> <INDENT> if not pysam: <NEW_LINE> <INDENT> raise Exception('pysam not available, try "pip install pysam"?') <NEW_LINE> <DEDENT> if not self.filename: <NEW_LINE> <INDENT> raise Exception('Please provide a filename (or a "normal" fsock)') <NEW_LINE> <DEDENT> if not self._tabix: <NEW_LINE> <INDENT> self._tabix = pysam.Tabixfile(self.filename, encoding=self.encoding) <NEW_LINE> <DEDENT> if self._prepend_chr and chrom[:3] == 'chr': <NEW_LINE> <INDENT> chrom = chrom[3:] <NEW_LINE> <DEDENT> self.reader = self._tabix.fetch(chrom, start, end) <NEW_LINE> return self | Fetches records from a tabix-indexed VCF file and returns an
iterable of ``_Record`` instances
chrom must be specified.
The start and end coordinates are in the zero-based,
half-open coordinate system, similar to ``_Record.start`` and
``_Record.end``. The very first base of a chromosome is
index 0, and the the region includes bases up to, but not
including the base at the end coordinate. For example
``fetch('4', 10, 20)`` would include all variants
overlapping a 10 base pair region from the 11th base of
through the 20th base (which is at index 19) of chromosome
4. It would not include the 21st base (at index 20). See
http://genomewiki.ucsc.edu/index.php/Coordinate_Transforms
for more information on the zero-based, half-open coordinate
system.
If end is omitted, all variants from start until the end of
the chromosome chrom will be included.
If start and end are omitted, all variants on chrom will be
returned.
requires pysam | 625941b921bff66bcd6847c9 |
def commitPost(post): <NEW_LINE> <INDENT> values = (post.inits, post.recipient, post.topic, post.comment) <NEW_LINE> db = connect.connect() <NEW_LINE> cursor = db.cursor() <NEW_LINE> cursor.execute(INSERT_QUERY, values) <NEW_LINE> ix = db.insert_id() <NEW_LINE> if post.parent_ix: <NEW_LINE> <INDENT> cursor.execute(INSERT_PARENT_QUERY, (ix, post.parent_ix)) <NEW_LINE> cursor.execute(ANCESTORS_QUERY, (ix,)) <NEW_LINE> <DEDENT> cursor.execute(READPOSTS_UPDATE_QUERY, (post.inits, ix)) <NEW_LINE> db.commit() <NEW_LINE> return ix | commit a post to the database, and mark it as read by the person posting it | 625941b90383005118ecf458 |
def genListRand(L): <NEW_LINE> <INDENT> import random <NEW_LINE> p = dummy = ListNode(0) <NEW_LINE> for i in range(L): <NEW_LINE> <INDENT> p.next = ListNode(random.randint(1, 100)) <NEW_LINE> p = p.next <NEW_LINE> <DEDENT> return dummy.next | Generate a singly linked list by L random numbers in a range of 1-100.
:param L: length of linked list to generate
:return: head | 625941b90a50d4780f666d03 |
def exec_for_segment(self, child_info, segment, addtl_env = None): <NEW_LINE> <INDENT> template_args, environ = {}, {} <NEW_LINE> self.update_args_env_iter(template_args, environ, segment.n_iter) <NEW_LINE> self.update_args_env_segment(template_args, environ, segment) <NEW_LINE> environ.update(addtl_env or {}) <NEW_LINE> return self.exec_child_from_child_info(child_info, template_args, environ) | Execute a child process with environment and template expansion from the given
segment. | 625941b9442bda511e8be299 |
def partition( self, wsp_darr, wdir_darr, dep_darr, swells=3, agefac=1.7, wscut=0.3333, ): <NEW_LINE> <INDENT> if not {attrs.FREQNAME, attrs.DIRNAME}.issubset(self._obj.dims): <NEW_LINE> <INDENT> raise ValueError(f"(freq, dir) dims required, only found {self._obj.dims}") <NEW_LINE> <DEDENT> for darr in (wsp_darr, wdir_darr, dep_darr): <NEW_LINE> <INDENT> if set(darr.dims) != self._non_spec_dims: <NEW_LINE> <INDENT> raise ValueError( f"{darr.name} dims {list(darr.dims)} need matching " f"non-spectral dims in SpecArray {self._non_spec_dims}" ) <NEW_LINE> <DEDENT> <DEDENT> return partition( dset=self._obj, wspd=wsp_darr, wdir=wdir_darr, dpt=dep_darr, swells=swells, agefac=agefac, wscut=wscut, ) | Partition wave spectra using Hanson's watershed algorithm.
This method is not lazy, make sure array will fit into memory.
Args:
- wsp_darr (DataArray): wind speed (m/s).
- wdir_darr (DataArray): Wind direction (degree).
- dep_darr (DataArray): Water depth (m).
- swells (int): Number of swell partitions to compute.
- agefac (float): Age factor.
- wscut (float): Wind speed cutoff.
Returns:
- part_spec (SpecArray): partitioned spectra with one extra dimension
representig partition number.
Note:
- Input DataArrays must have same non-spectral dims as SpecArray.
References:
- Hanson, Jeffrey L., et al. "Pacific hindcast performance of three
numerical wave models." JTECH 26.8 (2009): 1614-1633. | 625941b9a219f33f346287e8 |
def sign_won(self, sign): <NEW_LINE> <INDENT> pos_all = np.transpose(np.where(self.state==self.sign2num.get(sign))) <NEW_LINE> for pos in pos_all: <NEW_LINE> <INDENT> dir_all = np.array([ [1,-1], [1,0], [1,1], [0,1] ]) <NEW_LINE> for dir in dir_all: <NEW_LINE> <INDENT> pos_cur = pos.copy() <NEW_LINE> i = 1 <NEW_LINE> while next_is_sign(pos_cur, pos_all, dir): <NEW_LINE> <INDENT> i += 1 <NEW_LINE> pos_cur += dir <NEW_LINE> if i == self.len2win: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return False | check whether the player that plays 'sign' has won
sign (string) sign of a player on the board game
should be a sign in self.sign_play | 625941b9d18da76e23532346 |
@cli.command() <NEW_LINE> @click.option("-h", "--hostname", type=str, default="localhost", help="localhost") <NEW_LINE> @click.option("-p", "--port", type=int, default=5000, help="5000") <NEW_LINE> @click.option("--no-reloader", is_flag=True, default=False) <NEW_LINE> @click.option("--debugger", is_flag=True) <NEW_LINE> @click.option("--no-evalex", is_flag=True, default=False) <NEW_LINE> @click.option("--threaded", is_flag=True) <NEW_LINE> @click.option("--processes", type=int, default=1, help="1") <NEW_LINE> def runserver(hostname, port, no_reloader, debugger, no_evalex, threaded, processes): <NEW_LINE> <INDENT> reloader = not no_reloader <NEW_LINE> evalex = not no_evalex <NEW_LINE> run_simple( hostname, port, app, use_reloader=reloader, use_debugger=debugger, use_evalex=evalex, threaded=threaded, processes=processes, ) | Start a new development server. | 625941b9baa26c4b54cb0f97 |
def test_used_tip_offset(self): <NEW_LINE> <INDENT> self.rack.set_tips_used(1) <NEW_LINE> self.assertEqual( self.rack.tip('A2').position, self.rack.get_next_tip().position ) <NEW_LINE> self.rack.set_tips_used(12) <NEW_LINE> self.assertEqual( self.rack.tip('B1').position, self.rack.get_next_tip().position ) | Account for used tip offset. | 625941b95fdd1c0f98dc00a5 |
def _update_state(self): <NEW_LINE> <INDENT> self.state_changed(self.state) | Emit the current state of the turnout | 625941b926238365f5f0ecdd |
def test_notify_with_http_info(self, id, **kwargs): <NEW_LINE> <INDENT> all_params = ['id'] <NEW_LINE> all_params.append('callback') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method test_notify" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('id' not in params) or (params['id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `id` when calling `test_notify`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> resource_path = '/api/v2/integrations/{id}/test_notify.json_api'.replace('{format}', 'json_api') <NEW_LINE> path_params = {} <NEW_LINE> if 'id' in params: <NEW_LINE> <INDENT> path_params['id'] = params['id'] <NEW_LINE> <DEDENT> query_params = {} <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client. select_header_accept(['application/vnd.api+json']) <NEW_LINE> header_params['Content-Type'] = self.api_client. select_header_content_type(['application/vnd.api+json']) <NEW_LINE> auth_settings = [] <NEW_LINE> return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Meta', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) | Test an Integration
This will test any type of Integration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_notify_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Integration ID (required)
:return: Meta
If the method is called asynchronously,
returns the request thread. | 625941b996565a6dacc8f549 |
def _gui_notify(self, _, signal=None): <NEW_LINE> <INDENT> if signal: <NEW_LINE> <INDENT> signal.emit({self.PASSED_KEY: True, self.ERROR_KEY: ""}) | Callback used to notify the GUI of a success. Will emit signal
if specified
NOTE: This method is final, it should not be redefined.
:param _: IGNORED. Returned from the previous callback
:type _: IGNORED
:param signal: Signal to emit if it fails here first
:type signal: QtCore.SignalInstance | 625941b955399d3f05588527 |
def get_income_data(file): <NEW_LINE> <INDENT> data = pd.read_csv(file) <NEW_LINE> data = data.iloc[5:57, 0:2] <NEW_LINE> data.columns = ['state', 'Median HouseHold Income'] <NEW_LINE> data = data.reset_index() <NEW_LINE> data = data.drop('index', axis=1) <NEW_LINE> for row in data.index: <NEW_LINE> <INDENT> s = data['Median HouseHold Income'][row] <NEW_LINE> s = float(s.translate(str.maketrans('', '', string.punctuation))) <NEW_LINE> data['Median HouseHold Income'][row] = s <NEW_LINE> <DEDENT> data = data.drop(0) <NEW_LINE> data = data.drop(9) <NEW_LINE> return data | Get income DataFrame from income data.
:param str file: Filename of income data
:return: Income DataFrame
:rtype: pd.DataFrame | 625941b956b00c62f0f144d2 |
def _initialize_memory_block(self, memory_size=None, memory_dim=None): <NEW_LINE> <INDENT> memory_size = self.memory_size if memory_size is None else memory_size <NEW_LINE> memory_dim = self.memory_dim if memory_dim is None else memory_dim <NEW_LINE> return torch.ones([memory_size, memory_dim], dtype=torch.float32) * 1e-6 | Initialize a memory block
# Arguments
memory_size [int]: number of memory slots
memory_dim [int]: the dimension of each memory slot
# Returns
[Tensor]: the memory block of shape [memory_size x memory_dim] | 625941b963f4b57ef0000f96 |
def p_limit_offset(p): <NEW_LINE> <INDENT> handler = _handler(p) <NEW_LINE> handler.startPagination() <NEW_LINE> if len(p) == 3: <NEW_LINE> <INDENT> getattr(handler, p[1].lower())(int(p[2])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> handler.limit(int(p[2])) <NEW_LINE> handler.offset(int(p[4])) <NEW_LINE> <DEDENT> handler.endPagination() | limit_offset : KW_OFFSET INTEGER
| KW_LIMIT INTEGER
| KW_LIMIT INTEGER KW_OFFSET INTEGER | 625941b98a43f66fc4b53edd |
def autoajuda_short_form(**kwargs): <NEW_LINE> <INDENT> return AutoajudaShortForm(**kwargs) | Function to get Autoajuda's short form. just a subset of autoajuda's properties
:param kwargs: form properties
:return: Form | 625941b9377c676e9127201e |
def create(self): <NEW_LINE> <INDENT> check_dir = Directory(self.dir_name) <NEW_LINE> check_dir.present() <NEW_LINE> create_book = input("Please enter a name for new book: ") <NEW_LINE> while not create_book: <NEW_LINE> <INDENT> create_book = input("Name can't be blank. Please enter a valid name: ") <NEW_LINE> <DEDENT> while os.path.exists(f"./{self.dir_name}/{create_book}{extension}"): <NEW_LINE> <INDENT> create_book = input("Book already exist. Please enter different name: ") <NEW_LINE> while not create_book: <NEW_LINE> <INDENT> create_book = input("Name can't be blank. Please enter a valid name: ") <NEW_LINE> <DEDENT> <DEDENT> open(f"./{self.dir_name}/{create_book}{extension}", 'a').close() <NEW_LINE> print(f"Book with name \"{create_book}\" was created.") | Create a book in directory. | 625941b9507cdc57c6306b48 |
def build_vol_rows(self, vol_list): <NEW_LINE> <INDENT> volume_rows = [] <NEW_LINE> for vol in vol_list: <NEW_LINE> <INDENT> entries = '' <NEW_LINE> entries += "<entry><para>%s</para></entry>" % vol.parent.name <NEW_LINE> for attr in [ 'name', 'type' ]: <NEW_LINE> <INDENT> entries += "<entry><para>%s</para></entry>" % getattr(vol, attr) <NEW_LINE> pass <NEW_LINE> <DEDENT> for attr in [ 'snapreserve', 'raw', 'usable']: <NEW_LINE> <INDENT> entries += "<entry><para>%.1f</para></entry>" % getattr(vol, attr) <NEW_LINE> <DEDENT> volume_rows.append("<row>%s</row>" % entries) <NEW_LINE> pass <NEW_LINE> <DEDENT> return '\n'.join(volume_rows) | Take a list of Volumes and build a list of <row/>s to be inserted
into a table body. | 625941b9be383301e01b5301 |
def _make_table_cell(self, cell) -> Tuple[str, Any]: <NEW_LINE> <INDENT> from odf.table import TableCell <NEW_LINE> attributes = self._make_table_cell_attributes(cell) <NEW_LINE> val, fmt = self._value_with_fmt(cell.val) <NEW_LINE> pvalue = value = val <NEW_LINE> if isinstance(val, bool): <NEW_LINE> <INDENT> value = str(val).lower() <NEW_LINE> pvalue = str(val).upper() <NEW_LINE> <DEDENT> if isinstance(val, datetime.datetime): <NEW_LINE> <INDENT> value = val.isoformat() <NEW_LINE> pvalue = val.strftime("%c") <NEW_LINE> return ( pvalue, TableCell(valuetype="date", datevalue=value, attributes=attributes), ) <NEW_LINE> <DEDENT> elif isinstance(val, datetime.date): <NEW_LINE> <INDENT> value = val.strftime("%Y-%m-%d") <NEW_LINE> pvalue = val.strftime("%x") <NEW_LINE> return ( pvalue, TableCell(valuetype="date", datevalue=value, attributes=attributes), ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> class_to_cell_type = { str: "string", int: "float", float: "float", bool: "boolean", } <NEW_LINE> return ( pvalue, TableCell( valuetype=class_to_cell_type[type(val)], value=value, attributes=attributes, ), ) | Convert cell data to an OpenDocument spreadsheet cell
Parameters
----------
cell : ExcelCell
Spreadsheet cell data
Returns
-------
pvalue, cell : Tuple[str, TableCell]
Display value, Cell value | 625941b94527f215b584c2cf |
def __init__(self, num_type, mu_dict, alpha_dict, kernel_dict, activation, loss_type, cost_type, use_cuda): <NEW_LINE> <INDENT> super(HawkesProcessModel_OT, self).__init__(num_type, mu_dict, loss_type, use_cuda) <NEW_LINE> self.model_name = 'A Hawkes Process' <NEW_LINE> self.activation = activation <NEW_LINE> exogenousIntensity = getattr(model.ExogenousIntensityFamily, mu_dict['model_name']) <NEW_LINE> endogenousImpacts = getattr(model.EndogenousImpactFamily, alpha_dict['model_name']) <NEW_LINE> decayKernel = getattr(model.DecayKernelFamily, kernel_dict['model_name']) <NEW_LINE> mu_model = exogenousIntensity(num_type, mu_dict['parameter_set']) <NEW_LINE> kernel_para = kernel_dict['parameter_set'].to(self.device) <NEW_LINE> kernel_model = decayKernel(kernel_para) <NEW_LINE> alpha_model = endogenousImpacts(num_type, kernel_model, alpha_dict['parameter_set']) <NEW_LINE> self.lambda_model = HawkesProcessIntensity(mu_model, alpha_model, self.activation) <NEW_LINE> self.print_info() <NEW_LINE> self.dgw = GromovWassersteinDiscrepancy(loss_type=cost_type) <NEW_LINE> self.dw = WassersteinDiscrepancy(loss_type=cost_type) | Initialize generalized Hawkes process
:param num_type: int, the number of event types.
:param mu_dict: the dictionary of exogenous intensity's setting
mu_dict = {'model_name': the name of specific subclass of exogenous intensity,
'parameter_set': a dictionary contains necessary parameters}
:param alpha_dict: the dictionary of endogenous intensity's setting
alpha_dict = {'model_name': the name of specific subclass of endogenous impact,
'parameter_set': a dictionary contains necessary parameters}
:param kernel_dict: the dictionary of decay kernel's setting
kernel_dict = {'model_name': the name of specific subclass of decay kernel,
'parameter_set': a ndarray contains necessary parameters}
:param activation: str, the type of activation function
:param loss_type: str, the type of loss functions
:param cost_type: str, the type of cost matrix for calculating optimal transport
The length of the list is the number of modalities of the model
Each element of the list is the number of event categories for each modality | 625941b945492302aab5e134 |
def update_edition( *, edition: Edition, build: Optional[Build] = None, title: Optional[str] = None, slug: Optional[str] = None, tracking_mode: Optional[str] = None, tracked_ref: Optional[str] = None, pending_rebuild: Optional[bool] = None, ) -> Edition: <NEW_LINE> <INDENT> logger = get_logger(__name__) <NEW_LINE> logger.info( "Updating edition", edition=edition.slug, new_build=build.slug if build else None, ) <NEW_LINE> if tracked_ref is not None: <NEW_LINE> <INDENT> edition.tracked_refs = [tracked_ref] <NEW_LINE> edition.tracked_ref = tracked_ref <NEW_LINE> <DEDENT> if tracking_mode is not None: <NEW_LINE> <INDENT> edition.set_mode(tracking_mode) <NEW_LINE> <DEDENT> if title is not None: <NEW_LINE> <INDENT> edition.title = title <NEW_LINE> <DEDENT> if slug is not None: <NEW_LINE> <INDENT> request_edition_rename(edition=edition, slug=slug) <NEW_LINE> <DEDENT> product = edition.product <NEW_LINE> if pending_rebuild is not None: <NEW_LINE> <INDENT> logger.warning( "Manual reset of Edition.pending_rebuild", edition_slug=edition.slug, project_slug=product.slug, prev_pending_rebuild=edition.pending_rebuild, new_pending_rebuild=pending_rebuild, ) <NEW_LINE> edition.pending_rebuild = pending_rebuild <NEW_LINE> <DEDENT> db.session.add(edition) <NEW_LINE> db.session.commit() <NEW_LINE> if build is not None: <NEW_LINE> <INDENT> request_edition_rebuild(edition=edition, build=build) <NEW_LINE> <DEDENT> request_dashboard_build(product) <NEW_LINE> return edition | Update the metadata of an existing edititon or to point at a new
build. | 625941b9c432627299f04ab8 |
def __rmul__(self, count): <NEW_LINE> <INDENT> return LazyConcatenation([self] * count) | Return a list concatenating self with itself ``count`` times. | 625941b94f88993c3716bee8 |
def test_object_post_product(self): <NEW_LINE> <INDENT> pass | Test case for object_post_product
CRUD: Create Product # noqa: E501 | 625941b96e29344779a62489 |
def get_fremont_data(filename='freemont-data.csv', url=FREMONT_URL, force_download=False): <NEW_LINE> <INDENT> if force_download or not os.path.exists(filename): <NEW_LINE> <INDENT> urlretrieve(URL, filename) <NEW_LINE> <DEDENT> data = pd.read_csv(filename, index_col='Date') <NEW_LINE> try: <NEW_LINE> <INDENT> data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p') <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> data.index = pd.to_datetime(data.index) <NEW_LINE> <DEDENT> data.columns = ['West', 'East'] <NEW_LINE> data['Total'] = data['West'] + data['East'] <NEW_LINE> return data | Download and cache the freemont data
Parameters
----------
filename : string(optional)
location to save the data in csv format
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force download of data
Returns
-------
data : pandas.DataFrame
The freemont bike bridge data | 625941b9d10714528d5ffb54 |
def get_tree(self): <NEW_LINE> <INDENT> return self.tree_classifier | After calling method explain, you can get tree classifier, which was trained.
Returns
Trained sklearn.tree.DecisionTreeClassifier | 625941b9ec188e330fd5a61a |
def SVD(prot_sub): <NEW_LINE> <INDENT> S = prot_sub['S'] <NEW_LINE> X = prot_sub['X'] <NEW_LINE> N = X.shape[1] <NEW_LINE> M = S.shape[0] <NEW_LINE> K = S.shape[1] <NEW_LINE> A1 = np.kron(S, np.eye(N)) <NEW_LINE> A2 = block_diag(*X).T <NEW_LINE> A = np.concatenate((A1, -A2), axis=1) <NEW_LINE> U, s, V = np.linalg.svd(A, full_matrices=True) <NEW_LINE> w = V[-1,:] <NEW_LINE> eigen_spacing = (s[-2]-s[-1])/(s[-2]+s[-1]) <NEW_LINE> if np.sum(w < 0) > np.sum(w > 0): <NEW_LINE> <INDENT> w = -1 * w <NEW_LINE> <DEDENT> p_vect = w[0: K * N] <NEW_LINE> U_hat = p_vect.reshape(K, N) <NEW_LINE> L_hat = w[K * N::] <NEW_LINE> Z_hat = 1/L_hat <NEW_LINE> protein = U_hat <NEW_LINE> X_rec = np.diag(Z_hat).dot(S).dot(U_hat) <NEW_LINE> Model = X_rec * np.median(X_rec[:]/X[:]) <NEW_LINE> X_norm = hf.norm_mean(X,0) <NEW_LINE> Rsq = 1- (np.sum(X[:] - Model[:])**2)/np.sum(X_norm**2) <NEW_LINE> opt = dict() <NEW_LINE> opt['model'] = Model <NEW_LINE> opt['rsq'] = Rsq <NEW_LINE> opt['Z'] = np.diag(Z_hat) <NEW_LINE> opt['V'] = w <NEW_LINE> opt['eigen_spacing'] = eigen_spacing <NEW_LINE> return protein, opt | SVD sovler:
Return the inferred protein level, Z, r^2 | 625941b992d797404e303ffe |
def confirm_page(self): <NEW_LINE> <INDENT> self._webd_wrap.wait.until(EC.text_to_be_present_in_element((By.CLASS_NAME, 'subheader-1'), 'Booksellers'), 'Bookseller list not present') <NEW_LINE> _actual_url = self._webd_wrap._driver.current_url <NEW_LINE> _actual_title = self._webd_wrap._driver.title <NEW_LINE> _url = self._webd_wrap._baseURL + '/people/booksellers' <NEW_LINE> _title = 'Zola Books | ebook |' <NEW_LINE> if _url != _actual_url or _title != _actual_title: <NEW_LINE> <INDENT> raise AssertionError("Not on the Booksellers list page.") | raises AssertionError if page is incorrect | 625941b9be8e80087fb20ac4 |
def search(self, nums, target): <NEW_LINE> <INDENT> ans=None <NEW_LINE> low=0 <NEW_LINE> high=len(nums)-1 <NEW_LINE> while (low<=high): <NEW_LINE> <INDENT> mid =low+(high-low)/2 <NEW_LINE> if nums[mid]==target: <NEW_LINE> <INDENT> return mid <NEW_LINE> <DEDENT> elif nums[mid]<target: <NEW_LINE> <INDENT> low=mid+1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> high=mid-1 <NEW_LINE> <DEDENT> <DEDENT> return -1 | :type nums: List[int]
:type target: int
:rtype: int | 625941b9460517430c394002 |
def position(self, q=None, ee_only=False, rotate=0.0): <NEW_LINE> <INDENT> if q is None: <NEW_LINE> <INDENT> q0 = self.q[0] <NEW_LINE> q1 = self.q[1] <NEW_LINE> q2 = self.q[2] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> q0 = q[0] <NEW_LINE> q1 = q[1] <NEW_LINE> q2 = q[2] <NEW_LINE> <DEDENT> q0 += rotate <NEW_LINE> x = np.cumsum([0, self.L[0] * np.cos(q0), self.L[1] * np.cos(q0 + q1), self.L[2] * np.cos(q0 + q1 + q2)]) <NEW_LINE> y = np.cumsum([0, self.L[0] * np.sin(q0), self.L[1] * np.sin(q0 + q1), self.L[2] * np.sin(q0 + q1 + q2)]) <NEW_LINE> if ee_only: <NEW_LINE> <INDENT> return np.array([x[-1], y[-1]]) <NEW_LINE> <DEDENT> return (x, y) | Compute x,y position of the hand
q np.array: a set of angles to return positions for
ee_only boolean: only return the (x,y) of the end-effector
rotate float: how much to rotate the first joint by | 625941b9d8ef3951e32433b2 |
def serialize(expr): <NEW_LINE> <INDENT> result = None <NEW_LINE> if isinstance(expr, string_types): <NEW_LINE> <INDENT> result = expr <NEW_LINE> <DEDENT> elif expr is not None: <NEW_LINE> <INDENT> result = '=py:{0}'.format(expr) <NEW_LINE> <DEDENT> return result | Serialize input expr into a parsable value.
:rtype: str | 625941b97b25080760e392cf |
def __init__(self, *args, file_name, path=None, mask='circular', crop=True, fill_value=0): <NEW_LINE> <INDENT> if len(args) == 2 and (isinstance(args[0], tuple) or isinstance(args[0], list)): <NEW_LINE> <INDENT> y0 = args[0][0] <NEW_LINE> x0 = args[0][1] <NEW_LINE> radius = args[1] <NEW_LINE> <DEDENT> elif len(args) == 3: <NEW_LINE> <INDENT> y0 = args[0] <NEW_LINE> x0 = args[1] <NEW_LINE> radius = args[2] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"Aperture expects either 2 or 3 positional args, but {len(args)} have been provided!") <NEW_LINE> <DEDENT> if isinstance(y0, int) and isinstance(x0, int): <NEW_LINE> <INDENT> self.y0 = y0 <NEW_LINE> self.x0 = x0 <NEW_LINE> self.y_offset = 0 <NEW_LINE> self.x_offset = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> y0 = float(y0) <NEW_LINE> x0 = float(x0) <NEW_LINE> self.y0 = np.rint(y0).astype(int) <NEW_LINE> self.x0 = np.rint(x0).astype(int) <NEW_LINE> self.y_offset = y0 - self.y0 <NEW_LINE> self.x_offset = x0 - self.x0 <NEW_LINE> <DEDENT> if isinstance(radius, int): <NEW_LINE> <INDENT> self.radius = radius <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"Aperture radius must be integer type, but is {radius})") <NEW_LINE> <DEDENT> self.fill_value = fill_value <NEW_LINE> if isinstance(file_name, str): <NEW_LINE> <INDENT> logger.debug(f"Aperture argument data '{file_name}' is interpreted as file name.") <NEW_LINE> self.data = fits.get_data(file_name) <NEW_LINE> self.var = fits.get_data(file_name=file_name, path=path, extension=self.variance_extension, ignore_missing_extension=True) <NEW_LINE> data_mask = fits.get_data(file_name=file_name , path=path, extension=self.mask_extension, dtype=bool, ignore_missing_extension=True) <NEW_LINE> if data_mask is not None: <NEW_LINE> <INDENT> self.data[data_mask] = self.fill_value <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError <NEW_LINE> <DEDENT> self.cropped = False <NEW_LINE> if crop: <NEW_LINE> <INDENT> self.crop() <NEW_LINE> <DEDENT> self.mask_mode = mask <NEW_LINE> mask = self.make_mask(mode=self.mask_mode) <NEW_LINE> self.data = np.ma.masked_array(self.data, mask=mask) <NEW_LINE> self.power_spectrum_cube = None | Instantiate Aperture object.
Long description...
Args:
*args (int, float):
Can be either two or three arguments, where the last is always interpreted as the aperture radius,
which must be int type. The other arguments can be either a coordinate tuple or two individual
coordinates.
file_name (np.ndarray, str):
2D or 3D np.ndarray that the aperture shall be extracted of. If provided as str type, this is assumed
to be a path and the objects tries to read the FITS file.
mask (str, optional):
Mode that is describing, how the aperture is masked. Can be 'circular' or 'rectangular'. If 'circular',
then it creates a circular mask and the data become a np.ma.masked_array. Default is 'circular'.
crop (bool, optional):
If set to True, then the object only stores a copy of the data with radius around the center. Otherwise
all the data beyond the limits of the aperture are masked. Default is True.
fill_value (int or float, optional):
Value for filling masked pixels. Default is `0`. | 625941b97d847024c06be135 |
def test_lux_settings(self): <NEW_LINE> <INDENT> self.assertEqual(DPTLux().value_min, 0) <NEW_LINE> self.assertEqual(DPTLux().value_max, 670760) <NEW_LINE> self.assertEqual(DPTLux().unit, "lx") <NEW_LINE> self.assertEqual(DPTLux().resolution, 1) | Test attributes of DPTLux. | 625941b90a50d4780f666d04 |
def _checkResp(self, resp): <NEW_LINE> <INDENT> parsed = json.loads(resp.text) <NEW_LINE> if resp.status_code == 200 or resp.status_code == 201: <NEW_LINE> <INDENT> if self.verbose: <NEW_LINE> <INDENT> print(json.dumps(parsed, indent=4, sort_keys=True)) <NEW_LINE> <DEDENT> return parsed <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('\nOOps, somethings went Wrong!\n') <NEW_LINE> try: <NEW_LINE> <INDENT> print(parsed['error']['name']) <NEW_LINE> print(parsed['error']['message']) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print(parsed) | Check whenever an response return an error | 625941b9aad79263cf3908b0 |
def delete_received_message(self,recipient_key,message_key): <NEW_LINE> <INDENT> response = self.received_message_resource.delete(params={'recipient_key':recipient_key,'message_key':message_key}) <NEW_LINE> return response['message-deleted'] | Deletes the received message. | 625941b930bbd722463cbc37 |
def t_BOLD(t): <NEW_LINE> <INDENT> return t | \*\* | 625941b945492302aab5e135 |
def save_as_temp_file(f): <NEW_LINE> <INDENT> data = f.read() <NEW_LINE> name = f.name <NEW_LINE> from tempfile import mkstemp <NEW_LINE> from django.core.files import File <NEW_LINE> fid, temp_filename = mkstemp() <NEW_LINE> new_f = os.fdopen(fid,'wb') <NEW_LINE> new_f.write(data) <NEW_LINE> new_f.close() <NEW_LINE> f = File(open(temp_filename)) <NEW_LINE> f.name = name <NEW_LINE> return (f, temp_filename) | takes django memory uploaded file and saves to a temp file. | 625941b97cff6e4e811177fa |
def private_shed(request): <NEW_LINE> <INDENT> user = request.user <NEW_LINE> user_privatesheds = Sheds.objects.filter(user=user, shed_type=2) <NEW_LINE> return render(request, 'sheds/private_shed.html', {'privatesheds': user_privatesheds}) | renders the view of all the private sheds a user knows about | 625941b9d53ae8145f87a0eb |
def decomposeFilename(fn): <NEW_LINE> <INDENT> r = re.match("lte(\d+)([+-])(\d+\.\d+)([-+]\d+\.\d+)(.*)", fn) <NEW_LINE> if r is None: <NEW_LINE> <INDENT> PE.warn(PE.PyAValError("Cannot decompose PHOENIX filename: " + str(fn))) <NEW_LINE> return None <NEW_LINE> <DEDENT> result = {} <NEW_LINE> result["fn"] = fn <NEW_LINE> result["teff"] = int(r.group(1)) * 100 <NEW_LINE> result["logg"] = float(r.group(3)) <NEW_LINE> if r.group(2) == "+": <NEW_LINE> <INDENT> result["logg"] *= -1.0 <NEW_LINE> <DEDENT> result["met"] = float(r.group(4)) <NEW_LINE> result["notParsed"] = r.group(5) <NEW_LINE> return result | Decompose PHOENIX filename.
Parameters
----------
fn : string
The filename.
Returns
-------
Parameters : dictionary
A dictionary with the following keys:
- teff: The effective temperature in K
- logg: Log(g [cm/s**2])
- met: Metallicity (M/H)
- fn: Complete filename
- notParsed: Part of the filename not parsed for teff, logg, and metallicity.
Note that `None` is returned if the filename could not be
parsed. | 625941b9d164cc6175782bc3 |
def get_sentences(pathnames, group_on, keep_init=False, keep_inv=False): <NEW_LINE> <INDENT> assert group_on in ['C', 'E', 'M', 'P'] <NEW_LINE> sen_dict = { } <NEW_LINE> for path_name in pathnames: <NEW_LINE> <INDENT> mac_list = process_path(path_name) <NEW_LINE> if group_on == 'E': <NEW_LINE> <INDENT> mdict = get_event_sentences(mac_list, keep_init, keep_inv) <NEW_LINE> <DEDENT> elif group_on == 'C': <NEW_LINE> <INDENT> mdict = get_context_sentences(mac_list) <NEW_LINE> <DEDENT> elif group_on == 'M': <NEW_LINE> <INDENT> mdict = get_machine_sentences(mac_list, keep_init, keep_inv) <NEW_LINE> <DEDENT> elif group_on == 'P': <NEW_LINE> <INDENT> mdict = get_ps_sentences(mac_list) <NEW_LINE> <DEDENT> sen_dict.update(mdict) <NEW_LINE> <DEDENT> return sen_dict | Returns a dictionary of sentence-lists; key is machine/event name | 625941b9fff4ab517eb2f2ae |
def grad_eta(Hij, ek, fn, T, kw, mo): <NEW_LINE> <INDENT> kT = kb * T <NEW_LINE> g_eta_1 = -1/kT * diag(diag(Hij) - kw*ek) * fn * (mo-fn) <NEW_LINE> dFdmu = np.sum(np.real( 1/kT * einsum('i,i', (diag(Hij) - kw*ek).asarray().flatten(), fn * (mo-fn)))) <NEW_LINE> sumfn = np.sum(kw*fn*(mo-fn)) <NEW_LINE> if np.abs(sumfn) < 1e-10: <NEW_LINE> <INDENT> g_eta_2 = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> g_eta_2 = diag(kw * fn * (mo-fn) / sumfn * dFdmu) <NEW_LINE> <DEDENT> II = diag(ca.ones_like(fn)) <NEW_LINE> Eij = ek-ek.T + II <NEW_LINE> Fij = fn-fn.T <NEW_LINE> for k in Eij.keys(): <NEW_LINE> <INDENT> EEc = np.abs(Eij[k]) < 1e-10 <NEW_LINE> Eij[k] = np.where(EEc, 1, Eij[k]) <NEW_LINE> Fij[k] = np.where(EEc, 0, Fij[k]) <NEW_LINE> <DEDENT> g_eta_3 = Fij / Eij * Hij * (1-II) <NEW_LINE> g_eta = (g_eta_1 + g_eta_2 + g_eta_3) <NEW_LINE> return g_eta | Computes ∂L/∂η
Arguments:
Hij -- subspace Hamiltonian
ek -- Fermi parameters ϵ_n
fn -- occupation numbers
T -- temperature
kw -- kpoint weights
mo -- max occupancy, mo=1 in magnetic case
Returns:
g_eta -- gradient wrt η of the free-energy Lagrangian | 625941b9a4f1c619b28afeb6 |
def getattr(self, attrname): <NEW_LINE> <INDENT> return self.attrib[attrname] | gets an attribute of the tag | 625941b97047854f462a1282 |
def save_inventory(self, inventory): <NEW_LINE> <INDENT> with SimpleFlock( self.lockfile, timeout=3 ): <NEW_LINE> <INDENT> with open( self.json_path, 'w' ) as inv_file: <NEW_LINE> <INDENT> if self.pretty: <NEW_LINE> <INDENT> inv_file.write( json.dumps( inventory, sort_keys=True, indent=4 ) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> inv_file.write( json.dumps( inventory ) ) | Saves the inventory from a dictionary with the inventory contents from the Inventory class | 625941b99f2886367277a706 |
def step(self, t, u): <NEW_LINE> <INDENT> return self._solver(self.rhs, t, u, usereverse=True) | Computes the new step for whole system
Args:
t := current time
u := current system vec
Returns:
new system vec, storage_z, storage_w | 625941b95fcc89381b1e1539 |
def getQValue(self, state, action): <NEW_LINE> <INDENT> qValue = 0 <NEW_LINE> for nextstate in self.mdp.getTransitionStatesAndProbs(state,action): <NEW_LINE> <INDENT> qValue += nextstate[1]*(self.mdp.getReward(state,action,nextstate[0]) + self.discount*self.values[nextstate[0]]) <NEW_LINE> <DEDENT> return qValue | The q-value of the state action pair
(after the indicated number of value iteration
passes). Note that value iteration does not
necessarily create this quantity and you may have
to derive it on the fly. | 625941b99c8ee82313fbb5ea |
def __or__(self, other): <NEW_LINE> <INDENT> if isinstance(other, self.__class__): <NEW_LINE> <INDENT> return _Buttons(self.union(other)) <NEW_LINE> <DEDENT> elif isinstance(other, Button): <NEW_LINE> <INDENT> return _Buttons(self.union({other})) <NEW_LINE> <DEDENT> return NotImplemented | Add another Button of _Buttons set to the set
:param other: Button or _Buttons to add to this set of Buttons
:return: The superset of this set of buttons and the other set of buttons | 625941b9fb3f5b602dac3504 |
def __init__(self, *args): <NEW_LINE> <INDENT> this = _blocks_swig5.new_unpacked_to_packed_bb_sptr(*args) <NEW_LINE> try: self.this.append(this) <NEW_LINE> except: self.this = this | __init__(boost::shared_ptr<(gr::blocks::unpacked_to_packed_bb)> self) -> unpacked_to_packed_bb_sptr
__init__(boost::shared_ptr<(gr::blocks::unpacked_to_packed_bb)> self, unpacked_to_packed_bb p) -> unpacked_to_packed_bb_sptr | 625941b921a7993f00bc7b5f |
def convolve_rebin_and_convert(self, unit=None, **kwargs): <NEW_LINE> <INDENT> new_frames = convolve_rebin_and_convert(*self.values, unit=unit, names=self.filter_names, **kwargs) <NEW_LINE> self.remove_all() <NEW_LINE> for frame in new_frames: self.append(frame) | This function ...
:param unit:
:param kwargs:
:return: | 625941b94527f215b584c2d0 |
def mkdir_override_symlink(pkg_dir): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> pkg_dir.mkdir() <NEW_LINE> <DEDENT> except FileExistsError: <NEW_LINE> <INDENT> if pkg_dir.is_symlink(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> pkg_dir.unlink() <NEW_LINE> continue <NEW_LINE> <DEDENT> except IsADirectoryError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> break | Make *pkg_dir*, replacing any existing symlink in its place. See the
notes in :meth:`TheScribe.write_package_index` for more information. | 625941b92c8b7c6e89b35638 |
def test_change_title_case(self): <NEW_LINE> <INDENT> data = new_document_data() <NEW_LINE> new_title = 'TeST DoCuMent' <NEW_LINE> data.update(title=new_title) <NEW_LINE> data.update(form='doc') <NEW_LINE> response = post(self.client, 'wiki.edit_document', data, args=[self.d.full_path]) <NEW_LINE> eq_(200, response.status_code) <NEW_LINE> doc = Document.uncached.get(pk=self.d.pk) <NEW_LINE> eq_(new_title, doc.title) | Changing the case of some letters in the title should work. | 625941b923e79379d52ee3dd |
def get_at_index(self, index): <NEW_LINE> <INDENT> if not (0 <= index < self.size): <NEW_LINE> <INDENT> raise ValueError('List index out of range: {}'.format(index)) <NEW_LINE> <DEDENT> node = self.head <NEW_LINE> while index > 0: <NEW_LINE> <INDENT> node = node.next <NEW_LINE> index -= 1 <NEW_LINE> <DEDENT> return node.data | Return the item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO] | 625941b921bff66bcd6847ca |
def community_topic_delete(self, id, **kwargs): <NEW_LINE> <INDENT> api_path = "/api/v2/community/topics/{id}.json" <NEW_LINE> api_path = api_path.format(id=id) <NEW_LINE> return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/topics#delete-topic | 625941b95e10d32532c5eda4 |
def start_read(self, max_size=1024, timeout=None): <NEW_LINE> <INDENT> if self.__reader: <NEW_LINE> <INDENT> self.__reader = None <NEW_LINE> <DEDENT> self.__reader = self.__Reader(self.read, max_size, timeout) <NEW_LINE> self.__reader.start() | Generate a reader instance and start it to start reading data
from serial port. | 625941b95166f23b2e1a4fce |
def __init__(self, host, port): <NEW_LINE> <INDENT> self.__clients = {} <NEW_LINE> self.__host = host <NEW_LINE> self.__port = port <NEW_LINE> self.__limit = 10 | initializes the server with a dictionary of clients | 625941b931939e2706e4cce5 |
def download(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> filename = request.args[0] <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> session.error("Need to specify the file to download!") <NEW_LINE> redirect(URL(f="index")) <NEW_LINE> <DEDENT> tablename = filename.split(".", 1)[0] <NEW_LINE> if "_" in tablename: <NEW_LINE> <INDENT> table = s3db.table(tablename) <NEW_LINE> <DEDENT> return response.download(request, db) | Download a file | 625941b92eb69b55b151c720 |
def test_world_to_pixels_vlass(): <NEW_LINE> <INDENT> header_filename = os.path.join(TESTDATA_DIR, VLASS_4D_CUBE_HEADER) <NEW_LINE> header = fits.Header.fromtextfile(header_filename) <NEW_LINE> world = ['circle 168.34719985367971 76.18699791158396 0.01', 'BAND 0.04456576 0.11662493', 'POL I'] <NEW_LINE> test_subject = Transform() <NEW_LINE> pixel_cutout_hdu = test_subject.world_to_pixels(world, header) <NEW_LINE> assert pixel_cutout_hdu is not None <NEW_LINE> ranges = pixel_cutout_hdu.get_ranges() <NEW_LINE> assert len(ranges) == 4 <NEW_LINE> assert ranges[1] == (4193, 4314) <NEW_LINE> assert ranges[2] == (1, 3) <NEW_LINE> assert ranges[3] == (1, 1) | CIRCLE 168.34719985367971 76.18699791158396 0.01
BAND 0.04456576 0.11662493 POL I
cutout=[0][2938:3062,4191:4316,1:2,1:1] | 625941b976d4e153a657e9a6 |
def deposit_into_liquidity_pool(self, pool, amount_a, amount_b, account=None, **kwargs): <NEW_LINE> <INDENT> if not account: <NEW_LINE> <INDENT> if "default_account" in self.config: <NEW_LINE> <INDENT> account = self.config["default_account"] <NEW_LINE> <DEDENT> <DEDENT> if not account: <NEW_LINE> <INDENT> raise ValueError("You need to provide an account") <NEW_LINE> <DEDENT> account = Account(account, blockchain_instance=self) <NEW_LINE> pool_id = self._find_liquidity_pool(pool) <NEW_LINE> num_id_a = int(amount_a.asset["id"].split(".")[-1]) <NEW_LINE> num_id_b = int(amount_b.asset["id"].split(".")[-1]) <NEW_LINE> if(num_id_b < num_id_a): <NEW_LINE> <INDENT> amount_a, amount_b = amount_b, amount_a <NEW_LINE> <DEDENT> op = operations.Liquidity_pool_deposit( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "account": account["id"], "pool": pool_id, "amount_a": amount_a.json(), "amount_b": amount_b.json(), "extensions": [], } ) <NEW_LINE> return self.finalizeOp(op, account, "active", **kwargs) | Deposit assets into a liquidity pool
:param str,Asset pool: The liquidity pool to use. Can be the pool id
as a string, or can be an Asset, asset_id, or symbol of the
share asset for the pool.
:param Amount amount_a:
:param Amount amount_b: | 625941b91f5feb6acb0c49ca |
def tap_ad(self): <NEW_LINE> <INDENT> log.logger.info("开始点击广告") <NEW_LINE> self.ad_item.tap() <NEW_LINE> log.logger.info("完成点击") <NEW_LINE> if self.wait_activity(activities.ActivityNames.WEBVIEW, 10): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False | Summary:
点击广告
Returns: | 625941b963b5f9789fde6f5b |
def inc_dist(): <NEW_LINE> <INDENT> global distance <NEW_LINE> t.pencolor(random.random(), random.random(), random.random()) <NEW_LINE> distance += 5 <NEW_LINE> t.forward(distance) | Increase the distance turtle moves | 625941b9d6c5a10208143ebd |
def display_pokemon_by_type(pokemon_by_type): <NEW_LINE> <INDENT> pokemon_types = list(pokemon_by_type.keys()) <NEW_LINE> prompt = ('enter a type from one of the following: {0}\n'.format( ', '.join(pokemon_types))) <NEW_LINE> warning = 'Unrecognized type' <NEW_LINE> pokemon_type = input(prompt) <NEW_LINE> while pokemon_type.lower() not in pokemon_types: <NEW_LINE> <INDENT> print(warning) <NEW_LINE> pokemon_type = input(prompt) <NEW_LINE> <DEDENT> pokemons = pokemon_by_type[pokemon_type] <NEW_LINE> print('\n'.join(pokemons)) | Asks the user for a type of pokemon and displays the names of all
pokemon with that type.
If the user enters an invalid type, warns the user and repeats until they
enter a valid one.
Args:
pokemon_by_type: a dict where:
-each key is a pokemon type (str)
-each value is a list of all pokemon (strs) with the given type
Returns:
None | 625941b9293b9510aa2c310e |
def get_owner_object(self): <NEW_LINE> <INDENT> return None | While the model has a user foreign key, this is editable by all
managers in the gym. | 625941b97b25080760e392d0 |
def stop_traffic(self, extra_kwargs: Optional[dict] = None) -> str: <NEW_LINE> <INDENT> return self.execute_command("stop_traffic", extra_kwargs or {}) | Execute a command stop traffic for the service. | 625941b926238365f5f0ecdf |
def update(self, **kwargs: Any) -> "UpdateQuery": <NEW_LINE> <INDENT> return UpdateQuery( db=self._db, model=self.model, update_kwargs=kwargs, q_objects=self._q_objects, annotations=self._annotations, custom_filters=self._custom_filters, limit=self._limit, orderings=self._orderings, ) | Update all objects in QuerySet with given kwargs.
.. admonition: Example:
.. code-block:: py3
await Employee.filter(occupation='developer').update(salary=5000)
Will instead of returning a resultset, update the data in the DB itself. | 625941b9baa26c4b54cb0f99 |
def fit(self, matrix = None, dataframe:pd.DataFrame() = None, corpora:List[str] = None, features:List[str] = None): <NEW_LINE> <INDENT> self.__set_properties(matrix, dataframe, corpora, features) <NEW_LINE> self.__optimize_kmeans() <NEW_LINE> self.__fit_kmeans() <NEW_LINE> self.__cluster_top_terms() <NEW_LINE> self.__plot_kmeans() | user callable method | 625941b94d74a7450ccd4039 |
def main(): <NEW_LINE> <INDENT> convert(**vars(parse_args())) | main
Entry point. | 625941b9a4f1c619b28afeb7 |
def category_from_fixed_mappings(labels): <NEW_LINE> <INDENT> if not labels: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> category_map = read_category_map_as_json() <NEW_LINE> category_scores = {} <NEW_LINE> top_category = None <NEW_LINE> for label in labels: <NEW_LINE> <INDENT> for k, v in category_map['categorymaps'].iteritems(): <NEW_LINE> <INDENT> if label['description'] in v: <NEW_LINE> <INDENT> if k in category_scores.keys(): <NEW_LINE> <INDENT> category_scores[k] += label['score'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> category_scores[k] = label['score'] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> num_category_scores = len(category_scores) <NEW_LINE> if num_category_scores > 0: <NEW_LINE> <INDENT> sorted_scores = sorted([(v, k) for (k, v) in category_scores.iteritems()], reverse=True) <NEW_LINE> category_tuple = sorted_scores[0] <NEW_LINE> if category_tuple[0] > 0: <NEW_LINE> <INDENT> top_category = category_tuple[1] <NEW_LINE> <DEDENT> <DEDENT> return top_category | Gets the top category from fixed label to category mappings. | 625941b9ad47b63b2c509dff |
def get_request(self, cls=None): <NEW_LINE> <INDENT> if cls is None: <NEW_LINE> <INDENT> cls = self.request_class <NEW_LINE> <DEDENT> return cls(self.get_environ()) | Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use. | 625941b997e22403b379ce0f |
def breadcrumbs(self, builder=None, ln=None): <NEW_LINE> <INDENT> ln = cfg.get('CFG_SITE_LANG') if ln is None else ln <NEW_LINE> breadcrumbs = [] <NEW_LINE> if self.most_specific_dad is not None: <NEW_LINE> <INDENT> breadcrumbs = self.most_specific_dad.breadcrumbs(builder=builder, ln=ln) <NEW_LINE> <DEDENT> if builder is not None: <NEW_LINE> <INDENT> crumb = builder(self) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> crumb = dict( text=self.name_ln, url=url_for('search.collection', name=self.name)) <NEW_LINE> <DEDENT> breadcrumbs.append(crumb) <NEW_LINE> return breadcrumbs | Return breadcrumbs for collection. | 625941b950812a4eaa59c19b |
def brick_bridge(bridge, brick_l, brick_s): <NEW_LINE> <INDENT> ans1 = bridge//5 <NEW_LINE> if ans1 > brick_l: <NEW_LINE> <INDENT> ans1 = brick_l <NEW_LINE> <DEDENT> if brick_s - ans1*5 > bridge: <NEW_LINE> <INDENT> print("-1") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(brick_s-(ans1*5)) | a | 625941b9236d856c2ad44654 |
def doDataMap(self): <NEW_LINE> <INDENT> dtype = MrcMode2dtype(self.hdr.PixelType) <NEW_LINE> shape = shapeFromHdr(self.hdr) <NEW_LINE> self.data = self.d.view() <NEW_LINE> self.data.dtype = dtype <NEW_LINE> n0 = self.data.shape[0] <NEW_LINE> n1 = N.prod(shape) <NEW_LINE> if n0 < n1: <NEW_LINE> <INDENT> print('** WARNING **: file truncated - shape from header: %s ' 'expected to get %s but got %s' % (str(shape), str(N.prod(shape)), str(n0))) <NEW_LINE> n1 = n1 // shape[0] <NEW_LINE> s0 = n0 // n1 <NEW_LINE> shape = (s0,) + shape[1:] <NEW_LINE> self.data = self.data[:(s0*n1)] <NEW_LINE> <DEDENT> elif n0 > n1: <NEW_LINE> <INDENT> self.data = self.data[:n1] <NEW_LINE> <DEDENT> self.data.shape = shape <NEW_LINE> if self.isByteSwapped: <NEW_LINE> <INDENT> self.data = self.data.newbyteorder() | Map a NumPy array to the highest resolution data set in the file.
Creates self.data as the mapped array. | 625941b98a349b6b435e7fea |
def dfs_paths(graph: dict, start, goal): <NEW_LINE> <INDENT> stack = [(start, [start])] <NEW_LINE> while stack: <NEW_LINE> <INDENT> vertex, path = stack.pop() <NEW_LINE> if vertex in graph.keys(): <NEW_LINE> <INDENT> for next_ in graph[vertex] - set(path): <NEW_LINE> <INDENT> if next_ == goal: <NEW_LINE> <INDENT> yield path + [next_] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stack.append((next_, path + [next_])) | Функция поиска всех возможных маршрутов
из одной вершины графа в другой. Вариант
посещения одной и той же вершины не рассматривается.
:param graph:
:param start:
:param goal:
:return: | 625941b9d10714528d5ffb55 |
def latest_view(self, request): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> data = serializers.serialize('json',(self.klass.objects.latest('pk'),)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> data = None <NEW_LINE> <DEDENT> return HttpResponse(data, mimetype="application/json") | This view will return which is the latest entry in the table. | 625941b9507cdc57c6306b4a |
def log_model(self, path, opt, epoch, fitness_score, best_model=False): <NEW_LINE> <INDENT> model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, 'save period': opt.save_period, 'project': opt.project, 'total_epochs': opt.epochs, 'fitness_score': fitness_score }) <NEW_LINE> model_artifact.add_file(str(path / 'last.pt'), name='last.pt') <NEW_LINE> wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) <NEW_LINE> LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") | Log the model checkpoint as W&B artifact
arguments:
path (Path) -- Path of directory containing the checkpoints
opt (namespace) -- Command line arguments for this run
epoch (int) -- Current epoch number
fitness_score (float) -- fitness score for current epoch
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. | 625941b9dd821e528d63b021 |
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fduhole.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv) | Run administrative tasks. | 625941b932920d7e50b28042 |
def file_import_info(filename: str, importer: Any) -> FileImportInfo: <NEW_LINE> <INDENT> file = cache.get_file(filename) <NEW_LINE> try: <NEW_LINE> <INDENT> account = importer.file_account(file) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> account = "" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> date = importer.file_date(file) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> date = datetime.date.today() <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> name = importer.file_name(file) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> name = path.basename(filename) <NEW_LINE> <DEDENT> return FileImportInfo(importer.name(), account, date, name) | Generate info about a file with an importer. | 625941b9e8904600ed9f1d9f |
def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True): <NEW_LINE> <INDENT> if ie_key: <NEW_LINE> <INDENT> ies = [self.get_info_extractor(ie_key)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ies = self._ies <NEW_LINE> <DEDENT> for ie in ies: <NEW_LINE> <INDENT> if not ie.suitable(url): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if not ie.working(): <NEW_LINE> <INDENT> self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> ie_result = ie.extract(url) <NEW_LINE> if ie_result is None: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if isinstance(ie_result, list): <NEW_LINE> <INDENT> ie_result = { '_type': 'compat_list', 'entries': ie_result, } <NEW_LINE> <DEDENT> self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) <NEW_LINE> if process: <NEW_LINE> <INDENT> return self.process_ie_result(ie_result, download, extra_info) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ie_result <NEW_LINE> <DEDENT> <DEDENT> except ExtractorError as de: <NEW_LINE> <INDENT> self.report_error(compat_str(de), de.format_traceback()) <NEW_LINE> break <NEW_LINE> <DEDENT> except MaxDownloadsReached: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> if self.params.get('ignoreerrors', False): <NEW_LINE> <INDENT> self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.report_error('no suitable InfoExtractor: %s' % url) | Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
| 625941b94e696a04525c92ca |
def plusOne(self, digits): <NEW_LINE> <INDENT> list_to_num = int("".join([str(x) for x in digits])) + 1 <NEW_LINE> num_to_list = [int(x) for x in str(list_to_num)] <NEW_LINE> print(num_to_list) <NEW_LINE> return num_to_list | :type digits: List[int]
:rtype: List[int] | 625941b9d8ef3951e32433b3 |
def to_native(self, value): <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> return value <NEW_LINE> <DEDENT> if isinstance(value, GEOSGeometry): <NEW_LINE> <INDENT> value = { "latitude": str(value.y), "longitude": str(value.x) } <NEW_LINE> <DEDENT> return value | Transform POINT object to json. | 625941b9cdde0d52a9e52ea5 |
def get_expect_arrive_element(self): <NEW_LINE> <INDENT> return self.get_by_element.get_element(self.section, 'expected_arrive') | 获取预抵的元素信息
:return: | 625941b9cc40096d615957c9 |
def child_parent_relations(self, subj, obj, graph=None): <NEW_LINE> <INDENT> if graph is None: <NEW_LINE> <INDENT> graph = self.get_graph() <NEW_LINE> <DEDENT> preds = set() <NEW_LINE> for _,ea in graph[obj][subj].items(): <NEW_LINE> <INDENT> preds.add(ea['pred']) <NEW_LINE> <DEDENT> logging.debug('{}->{} = {}'.format(subj,obj,preds)) <NEW_LINE> return preds | Get all relationship type ids between a subject and a parent.
Typically only one relation ID returned, but in some cases there may be more than one
Arguments
---------
subj: string
Child (subject) id
obj: string
Parent (object) id
Returns
-------
list | 625941b957b8e32f52483316 |
def require_multigpu(test_case): <NEW_LINE> <INDENT> if not _torch_available: <NEW_LINE> <INDENT> return unittest.skip("test requires PyTorch")(test_case) <NEW_LINE> <DEDENT> import torch <NEW_LINE> if torch.cuda.device_count() < 2: <NEW_LINE> <INDENT> return unittest.skip("test requires multiple GPUs")(test_case) <NEW_LINE> <DEDENT> return test_case | Decorator marking a test that requires a multi-GPU setup (in PyTorch).
These tests are skipped on a machine without multiple GPUs.
To run *only* the multigpu tests, assuming all test names contain multigpu:
$ pytest -sv ./tests -k "multigpu" | 625941b98e71fb1e9831d623 |
def int_func(*args): <NEW_LINE> <INDENT> word = input("Введите текст буквами нижнего регистра: ") <NEW_LINE> print(word.title()) <NEW_LINE> return | Функция переводи первую букву каждого слова в вверхний регистр
:param args: слова
:return: слова с заглавной буквы | 625941b94f88993c3716beea |
def encrypt_secret(secret): <NEW_LINE> <INDENT> encrypted_secret = hashlib.sha1(secret.encode()) <NEW_LINE> encrypted_hexa = encrypted_secret.hexdigest() <NEW_LINE> return encrypted_secret.digest() | This function encrypts the secret and returns encrypted digest
Params: string secret
Returns: string encrypted_hexa | 625941b992d797404e304000 |
def extract_lines(self): <NEW_LINE> <INDENT> self.__lines = [] <NEW_LINE> script = self.script.replace('</p>', '').replace('<p>', '<br>') <NEW_LINE> script = script.replace('<b>', '').replace('</b>', '') <NEW_LINE> script = script.replace('\n.\n', '\n\n') <NEW_LINE> soup = BeautifulSoup(script, 'html.parser') <NEW_LINE> self._reset_dialog() <NEW_LINE> for br in soup.find_all('br'): <NEW_LINE> <INDENT> br.replace_with(self.br_separator_token) <NEW_LINE> <DEDENT> body_text = soup.find('table').text <NEW_LINE> body_text = body_text.replace('\n', ' ') <NEW_LINE> body_text = body_text.replace(self.br_separator_token, '\n\n') <NEW_LINE> in_break = False <NEW_LINE> for line in body_text.split('\n'): <NEW_LINE> <INDENT> line = line.strip() <NEW_LINE> if len(line) == 0: <NEW_LINE> <INDENT> if in_break: <NEW_LINE> <INDENT> self._flush() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> in_break = True <NEW_LINE> <DEDENT> continue <NEW_LINE> <DEDENT> in_break = False <NEW_LINE> if self._is_end_of_script(line): <NEW_LINE> <INDENT> self._flush() <NEW_LINE> break <NEW_LINE> <DEDENT> speaker_semicolon_match = self.speaker_semicolon_matcher.match(line) <NEW_LINE> if speaker_semicolon_match: <NEW_LINE> <INDENT> line = '{}:{}'.format(speaker_semicolon_match.group(1), speaker_semicolon_match.group(2)) <NEW_LINE> <DEDENT> line_with_speaker_match = self.line_with_speaker_matcher.match(line) <NEW_LINE> if line_with_speaker_match: <NEW_LINE> <INDENT> speaker = line_with_speaker_match.group(1).strip() <NEW_LINE> text = line_with_speaker_match.group(2).strip() <NEW_LINE> self._on_speaker_and_text_match(speaker, text) <NEW_LINE> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._on_text_only_match(line) <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> if len(self.__speaker) > 0 and len(self.__dialog) > 0: <NEW_LINE> <INDENT> self._append_line() <NEW_LINE> <DEDENT> return self.__lines | Extract lines of dialog.
Returns:
list: list of tuples containing (speaker name, line of dialog) | 625941b93c8af77a43ae3613 |
def _get_filtered_completed_exp_summaries(exploration_summaries, exploration_ids): <NEW_LINE> <INDENT> nonexistent_completed_exp_ids = [] <NEW_LINE> filtered_completed_exp_summaries = [] <NEW_LINE> for index, exploration_summary in enumerate(exploration_summaries): <NEW_LINE> <INDENT> if exploration_summary is None: <NEW_LINE> <INDENT> nonexistent_completed_exp_ids.append(exploration_ids[index]) <NEW_LINE> <DEDENT> elif exploration_summary.status != feconf.ACTIVITY_STATUS_PUBLIC: <NEW_LINE> <INDENT> nonexistent_completed_exp_ids.append(exploration_ids[index]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> filtered_completed_exp_summaries.append(exploration_summary) <NEW_LINE> <DEDENT> <DEDENT> return filtered_completed_exp_summaries, nonexistent_completed_exp_ids | Returns a list of summaries of the completed exploration ids and the
ids of explorations that are no longer present.
Args:
exploration_summaries: list(ExplorationSummary). The list of exploration
summary domain objects to be filtered.
exploration_ids: list(str). The ids of the explorations corresponding to
the exploration summary domain objects.
Returns:
tuple. A 2-tuple whose elements are as follows:
- list(ExplorationSummary). Filtered list of ExplorationSummary domain
objects of the completed explorations.
- list(str). The ids of the explorations that are no longer present. | 625941b955399d3f0558852a |
def isExpandable(self) -> bool: <NEW_LINE> <INDENT> return self._expandable | Return if is expandable. | 625941b967a9b606de4a7d33 |
def draw_debug(Tower, ship, screen): <NEW_LINE> <INDENT> for sector in getSectors(ship): <NEW_LINE> <INDENT> pygame.draw.rect(screen, (255, 0, 0), sector, 1) | Draw the sectors to the screen. | 625941b90a50d4780f666d06 |
def process_bracketed_csv_list(in_toks): <NEW_LINE> <INDENT> if in_toks[0] != "(": <NEW_LINE> <INDENT> raise SyntaxError("Expected '(' and got %s" % in_toks[0]) <NEW_LINE> <DEDENT> out_toks = [] <NEW_LINE> idx = 1 <NEW_LINE> while idx < len(in_toks): <NEW_LINE> <INDENT> if in_toks[idx] == ")": <NEW_LINE> <INDENT> return out_toks, idx+1 <NEW_LINE> <DEDENT> out_toks.append(in_toks[idx]) <NEW_LINE> if in_toks[idx+1] == ",": <NEW_LINE> <INDENT> idx += 1 <NEW_LINE> <DEDENT> elif in_toks[idx+1] != ")": <NEW_LINE> <INDENT> raise SyntaxError("Error in CSV list definition. Expect ',' or ')' and got '%s'" % in_toks[idx+1]) <NEW_LINE> <DEDENT> idx += 1 <NEW_LINE> <DEDENT> raise SyntaxError("Reached end of input while processing bracketed CSV list without finding ')'") | Assume the set of toks is a bracketed group of comma-separate values | 625941b97c178a314d6ef2cf |
def getall(self, layer_name: str): <NEW_LINE> <INDENT> if not self.has_layer(layer_name): <NEW_LINE> <INDENT> raise ValueError("layer {} does not exist".format(layer_name)) <NEW_LINE> <DEDENT> nodes = self.root.findall('.//{}'.format(layer_name)) <NEW_LINE> return [layers[layer_name].object(node) for node in nodes] | Return a list of layer objects for each layer carrying the given layer-name
| 625941b9d58c6744b4257ad7 |
@login_required <NEW_LINE> def add_to_cart(request, id): <NEW_LINE> <INDENT> quantity = int(request.POST.get('quantity')) <NEW_LINE> cart = request.session.get('cart', {}) <NEW_LINE> cart[id] = cart.get(id, quantity) <NEW_LINE> cart[id] = quantity <NEW_LINE> request.session['cart'] = cart <NEW_LINE> return redirect(get_issue_detail, id) | Add a feature ticket to the cart | 625941b991af0d3eaac9b88b |
def function_with_docstring(): <NEW_LINE> <INDENT> pass | Just a dummy function. | 625941b93539df3088e2e1c2 |
def add_terms_frequencies(self, document_hash: str, freq: dict) -> None: <NEW_LINE> <INDENT> self.__connector.hset(DOCUMENT_PREFIX + document_hash, FREQ_KEY, json.dumps(freq)) | Sets words counter to DB per document hash
:param document_hash:
:param freq:
:return: | 625941b960cbc95b062c63c0 |
def nftTransfer(address, *ids): <NEW_LINE> <INDENT> return Transaction( typeGroup=9000, type=2, asset={ "nftTransfer": { "nftIds": ids[:10], "recipientId": address, } } ) | Build a NFT transfer transaction.
Args:
address (str): recipient address.
ids (list): list of NFT id to send (maximum=10).
Returns:
dposlib.ark.tx.Transaction: orphan transaction. | 625941b90383005118ecf45b |
def __init__(self, movie_title, movie_stryln, poster_image, trailer_utbe): <NEW_LINE> <INDENT> self.title = movie_title <NEW_LINE> self.storyline = movie_stryln <NEW_LINE> self.poster_image_url = poster_image <NEW_LINE> self.trailer_youtube_url = trailer_utbe | __init__() is a special type of function which used to create a
space in memory and it will store the all the details. | 625941b9d164cc6175782bc5 |
def on_btn_add_process__clicked(self, button): <NEW_LINE> <INDENT> if self.project_tree.meta.model_dimension in [1, 3]: <NEW_LINE> <INDENT> self.toast('Only 2d supported') <NEW_LINE> return <NEW_LINE> <DEDENT> if not self.project_tree.get_layers(): <NEW_LINE> <INDENT> self.toast("No layer defined, yet!") <NEW_LINE> return <NEW_LINE> <DEDENT> new_process = Process(name='', rate_constant='') <NEW_LINE> self.project_tree.undo_stack.start_new_action('Add process', new_process) <NEW_LINE> self.project_tree.add_process(new_process) <NEW_LINE> self.project_tree.project_data.expand(self.project_tree.process_list) <NEW_LINE> self.project_tree.project_data.select(new_process) <NEW_LINE> process_form = ProcessForm(new_process, self.project_tree) <NEW_LINE> if self.get_slave('workarea'): <NEW_LINE> <INDENT> self.detach_slave('workarea') <NEW_LINE> <DEDENT> self.attach_slave('workarea', process_form) <NEW_LINE> process_form.focus_topmost() | Add a new process to the model
| 625941b966656f66f7cbc021 |
def open_etherpad(pad, author, validUntil, etherpad=None, **kwargs): <NEW_LINE> <INDENT> if not etherpad: <NEW_LINE> <INDENT> etherpad = get_etherpad_client() <NEW_LINE> <DEDENT> data = etherpad.createSession( groupID=pad.groupid, authorID=author.etherpad_id, validUntil=validUntil ) <NEW_LINE> response = jsonify( success=True, pad='%s/p/%s' % (settings.ETHERPAD_URL, pad.padid), title=pad.title, **kwargs ) <NEW_LINE> set_cookie(response, 'sessionID', data['sessionID']) <NEW_LINE> return response | Opens an etherpad-lite document. The etherpad client is used
to get a cookie for a session on the pad. | 625941b90a366e3fb873e68e |
def date_today(): <NEW_LINE> <INDENT> return date.today().strftime('%A %d %B') | It return a date in accurate for pages format
date_today() --> 'Saturday 14 November' | 625941b9099cdd3c635f0ad3 |
def init_host(self, host): <NEW_LINE> <INDENT> return | Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host. | 625941b9ec188e330fd5a61d |
def return_read_end_pos(read_pos,cigar,flag=False,pattern=regex.compile('([0-9]+)([A-Z])'),cigars_to_ignore=['I','S','H','P']): <NEW_LINE> <INDENT> expanded_cigar = [] <NEW_LINE> bases=0 <NEW_LINE> if flag: <NEW_LINE> <INDENT> cigars_to_ignore = ['S','I','H','P','N'] <NEW_LINE> <DEDENT> for num_bases,cigar_char in regex.findall(pattern,cigar): <NEW_LINE> <INDENT> if cigar_char in cigars_to_ignore: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bases+=int(num_bases) <NEW_LINE> <DEDENT> <DEDENT> return read_pos+bases | Return the end of the read
| 625941b9a4f1c619b28afeb8 |
def testCoefficientOfVariationEmptyValues(self): <NEW_LINE> <INDENT> expected = 125.5 <NEW_LINE> actual = self.emptyValues.coeffVarValue <NEW_LINE> self.assertEqual(actual, expected) | testCountFunction
Used to test coefficient of variation value | 625941b94c3428357757c1a2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.