code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def get_intents(self): <NEW_LINE> <INDENT> return self._intents | A getter for the response's intents.
:return: A list of intents. | 625941b985dfad0860c3acc3 |
def list_bucket_objects(self, bucket): <NEW_LINE> <INDENT> return self.client.list_objects(Bucket=bucket).get('Contents', []) | List objects stored in a bucket
Args:
bucket (str): Name of the bucket
Returns:
list: List of bucket objects | 625941b97b180e01f3dc4670 |
def testNone(self): <NEW_LINE> <INDENT> self._populateIndex() <NEW_LINE> values = self._values <NEW_LINE> self._checkApply(self._none_req, values[-1:]) <NEW_LINE> assert None in self._index.uniqueValues('foo') | make sure None gets indexed | 625941b915fb5d323cde0974 |
def _state_deleted(self, want, have): <NEW_LINE> <INDENT> commands = [] <NEW_LINE> if want: <NEW_LINE> <INDENT> routes = self._get_routes(want) <NEW_LINE> if not routes: <NEW_LINE> <INDENT> for w in want: <NEW_LINE> <INDENT> af = w['address_families'] <NEW_LINE> for item in af: <NEW_LINE> <INDENT> if self.afi_in_have(have, item): <NEW_LINE> <INDENT> commands.append(self._compute_command(afi=item['afi'], remove=True)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> for r in routes: <NEW_LINE> <INDENT> h_route = self.search_route_in_have(have, r['dest']) <NEW_LINE> if h_route: <NEW_LINE> <INDENT> commands.extend(self._render_updates(r, h_route, opr=False)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> routes = self._get_routes(have) <NEW_LINE> if self._is_ip_route_exist(routes): <NEW_LINE> <INDENT> commands.append(self._compute_command(afi='ipv4', remove=True)) <NEW_LINE> <DEDENT> if self._is_ip_route_exist(routes, 'route6'): <NEW_LINE> <INDENT> commands.append(self._compute_command(afi='ipv6', remove=True)) <NEW_LINE> <DEDENT> <DEDENT> return commands | The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects | 625941b9287bf620b61d38d9 |
def recall_all_names(self): <NEW_LINE> <INDENT> if self.cont_info.newcmd_dict.get('rcn')[0]: <NEW_LINE> <INDENT> self._get_cmd_names(self.cont_info.commandsR) | recall_all_names()
scans all cmdids, if the cmdid has been renamed, the rename and cmdid are logged | 625941b94e4d5625662d4247 |
def test_case6(self): <NEW_LINE> <INDENT> bigram_combos = ["dean ovich"] <NEW_LINE> article_content = "this is an article about \ndean ovich\n" <NEW_LINE> self.assertTrue(name_bigram_present(bigram_combos, article_content)) | TC6: valid case. bigram present | 625941b98e7ae83300e4ae36 |
def __init__(self, region, api_key): <NEW_LINE> <INDENT> super(SummonerAPIClient, self).__init__(region, api_key) <NEW_LINE> self.baseURL = "https://%s.api.pvp.net/api/lol/%s/v1.4/summoner" % (region, region) <NEW_LINE> self.byNameURL = "%s/by-name" % (self.baseURL) | Create a client API for a particular region
region -- string : summoner-v1.4 [BR, EUNE, EUW, KR, LAN, LAS, NA, OCE, RU, TR]
api_key -- string : league api key | 625941b97d847024c06be12b |
def select_dates(dates_raw, opts, day0=None, day1=None): <NEW_LINE> <INDENT> dt_today = datetime.datetime.utcnow() <NEW_LINE> if opts['--shift']: <NEW_LINE> <INDENT> dt_today += datetime.timedelta(days=int(opts['--shift'])) <NEW_LINE> <DEDENT> season_start = datetime.datetime(*opts['season_start']) <NEW_LINE> season_end = datetime.datetime(*opts['season_end']) <NEW_LINE> date0 = season_start if day0 is None else dt_today + datetime.timedelta(days=day0) <NEW_LINE> date1 = season_end if day1 is None else dt_today + datetime.timedelta(days=day1) <NEW_LINE> if opts['--debug']: <NEW_LINE> <INDENT> print('select', date0.strftime('%a %m/%d'), date1.strftime('%a %m/%d')) <NEW_LINE> <DEDENT> return [e for e in dates_raw if bool(date0 <= e['date'] <= date1)] | return a subset of the events from today+day0 to today+day1
None in day0 means begining of current ski season
None in day1 means end of current ski season | 625941b96aa9bd52df036c0d |
def get_ts_vals(self, var_name, ts_name, period, length): <NEW_LINE> <INDENT> ts = self._time_series.get((var_name, ts_name)) <NEW_LINE> if ts is None: <NEW_LINE> <INDENT> raise TimeSeriesNotFoundError <NEW_LINE> <DEDENT> start_index = 0 <NEW_LINE> if period[0] is not None: <NEW_LINE> <INDENT> start_index = self.time_manager.get_index(ts_name, period[0]) <NEW_LINE> <DEDENT> end_index = len(ts) <NEW_LINE> if period[1] is not None: <NEW_LINE> <INDENT> end_index = self.time_manager.get_index(ts_name, period[1]) <NEW_LINE> <DEDENT> elif length is not None: <NEW_LINE> <INDENT> end_index = start_index + length <NEW_LINE> <DEDENT> if start_index > end_index or end_index > len(ts): <NEW_LINE> <INDENT> raise PeriodOutOfRangeError <NEW_LINE> <DEDENT> return ts[start_index:end_index + 1] | Get's value of variable for specific period
in ts_name timeseries and length
Args:
(string): var_name - name of variable
(string):ts_name - timeseries name
(tuple): period in timeseries
(int): length of time series
Return:
(list): slice of time series by specific period
:return: | 625941b967a9b606de4a7d27 |
def test_laplace(counts, vectors): <NEW_LINE> <INDENT> probs = [] <NEW_LINE> count_y = [0, 0] <NEW_LINE> for count in counts: <NEW_LINE> <INDENT> prob = [] <NEW_LINE> for i in range(2): <NEW_LINE> <INDENT> denom = 1.0 * (count[i][0] + 1) + (count[i][1] + 1) <NEW_LINE> prob.append([(count[i][0] + 1) / denom, (count[i][1] + 1) / denom]) <NEW_LINE> count_y[i] += (count[i][0] + count[i][1]) <NEW_LINE> <DEDENT> probs.append(prob) <NEW_LINE> <DEDENT> denom = 1.0 * count_y[0] + count_y[1] <NEW_LINE> prob_y = [count_y[0] / denom, count_y[1] / denom] <NEW_LINE> num_out_0_tested = 0 <NEW_LINE> num_out_1_tested = 0 <NEW_LINE> num_out_0_correct = 0 <NEW_LINE> num_out_1_correct = 0 <NEW_LINE> for vect in vectors: <NEW_LINE> <INDENT> p_0 = log(prob_y[0]) <NEW_LINE> p_1 = log(prob_y[1]) <NEW_LINE> index = 0 <NEW_LINE> for inp in vect.inputs: <NEW_LINE> <INDENT> p_0 += log(probs[index][0][inp]) <NEW_LINE> p_1 += log(probs[index][1][inp]) <NEW_LINE> index += 1 <NEW_LINE> <DEDENT> output = 0 <NEW_LINE> if p_1 > p_0: <NEW_LINE> <INDENT> output = 1 <NEW_LINE> <DEDENT> if vect.output == 0: <NEW_LINE> <INDENT> num_out_0_tested += 1 <NEW_LINE> if output == 0: <NEW_LINE> <INDENT> num_out_0_correct += 1 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> num_out_1_tested += 1 <NEW_LINE> if output == 1: <NEW_LINE> <INDENT> num_out_1_correct += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> total_tested = num_out_0_tested + num_out_1_tested <NEW_LINE> total_correct = num_out_0_correct + num_out_1_correct <NEW_LINE> print("Laplace Smoothing Result...") <NEW_LINE> print_results(num_out_0_tested, num_out_1_tested, num_out_0_correct, num_out_1_correct) | Uses maximum likelihood estimation with Laplace smoothing on training data
to predict outputs for new data vectors.
MLE with Laplace smoothing is computed as follows.
p(input=x|output=y) = (<num examples input=x and output=y> + 1)/(<num examples output=y> + 2)
The output is predicted using Naive Bayes. The output that maximizes the
log of the Naive Bayes assumption is chosen.
Parameters:
counts -- multidimensional list containing counts of each input, output
combination
vectors -- list containing test data vector tuples | 625941b97b25080760e392c5 |
def itkLogImageFilterIUL2IUL2_Superclass_cast(*args): <NEW_LINE> <INDENT> return _itkLogImageFilterPython.itkLogImageFilterIUL2IUL2_Superclass_cast(*args) | itkLogImageFilterIUL2IUL2_Superclass_cast(itkLightObject obj) -> itkLogImageFilterIUL2IUL2_Superclass | 625941b955399d3f0558851e |
def __init__( self, *, name: Optional[str] = None, display: Optional["OperationDisplay"] = None, origin: Optional[str] = None, service_specification: Optional["ServiceSpecification"] = None, **kwargs ): <NEW_LINE> <INDENT> super(Operation, self).__init__(**kwargs) <NEW_LINE> self.name = name <NEW_LINE> self.display = display <NEW_LINE> self.origin = origin <NEW_LINE> self.service_specification = service_specification | :keyword name: Operation name: {provider}/{resource}/{operation}.
:paramtype name: str
:keyword display: Display metadata associated with the operation.
:paramtype display: ~azure.mgmt.storage.v2017_10_01.models.OperationDisplay
:keyword origin: The origin of operations.
:paramtype origin: str
:keyword service_specification: One property of operation, include metric specifications.
:paramtype service_specification: ~azure.mgmt.storage.v2017_10_01.models.ServiceSpecification | 625941b938b623060ff0ac59 |
def _fetch_objects(self, doc_type=None): <NEW_LINE> <INDENT> object_map = {} <NEW_LINE> for collection, dbrefs in self.reference_map.items(): <NEW_LINE> <INDENT> ref_document_cls_exists = getattr(collection, "objects", None) is not None <NEW_LINE> if ref_document_cls_exists: <NEW_LINE> <INDENT> col_name = collection._get_collection_name() <NEW_LINE> refs = [ dbref for dbref in dbrefs if (col_name, dbref) not in object_map ] <NEW_LINE> references = collection.objects.in_bulk(refs) <NEW_LINE> for key, doc in references.items(): <NEW_LINE> <INDENT> object_map[(col_name, key)] = doc <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(doc_type, (ListField, DictField, MapField)): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> refs = [ dbref for dbref in dbrefs if (collection, dbref) not in object_map ] <NEW_LINE> if doc_type: <NEW_LINE> <INDENT> references = doc_type._get_db()[collection].find( {"_id": {"$in": refs}}, session=doc_type._get_local_session() ) <NEW_LINE> for ref in references: <NEW_LINE> <INDENT> doc = doc_type._from_son(ref) <NEW_LINE> object_map[(collection, doc.id)] = doc <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> references = get_db()[collection].find( {"_id": {"$in": refs}}, session=get_local_session() ) <NEW_LINE> for ref in references: <NEW_LINE> <INDENT> if "_cls" in ref: <NEW_LINE> <INDENT> doc = get_document(ref["_cls"])._from_son(ref) <NEW_LINE> <DEDENT> elif doc_type is None: <NEW_LINE> <INDENT> doc = get_document( "".join(x.capitalize() for x in collection.split("_")) )._from_son(ref) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> doc = doc_type._from_son(ref) <NEW_LINE> <DEDENT> object_map[(collection, doc.id)] = doc <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return object_map | Fetch all references and convert to their document objects | 625941b950812a4eaa59c190 |
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): <NEW_LINE> <INDENT> self._trial = trial <NEW_LINE> if self.hp_search_backend is None or trial is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.hp_search_backend == HPSearchBackend.OPTUNA: <NEW_LINE> <INDENT> params = self.hp_space(trial) <NEW_LINE> <DEDENT> elif self.hp_search_backend == HPSearchBackend.RAY: <NEW_LINE> <INDENT> params = trial <NEW_LINE> params.pop("wandb", None) <NEW_LINE> <DEDENT> for key, value in params.items(): <NEW_LINE> <INDENT> if not hasattr(self.args, key): <NEW_LINE> <INDENT> raise AttributeError( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`." ) <NEW_LINE> <DEDENT> old_attr = getattr(self.args, key, None) <NEW_LINE> if old_attr is not None: <NEW_LINE> <INDENT> value = type(old_attr)(value) <NEW_LINE> <DEDENT> setattr(self.args, key, value) <NEW_LINE> <DEDENT> if self.hp_search_backend == HPSearchBackend.OPTUNA: <NEW_LINE> <INDENT> logger.info("Trial:", trial.params) | HP search setup code | 625941b9cb5e8a47e48b7919 |
def _tab_completion(self, verbose=True, use_disk_cache=True): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.__tab_completion <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> import sage.misc.persist <NEW_LINE> if use_disk_cache: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.__tab_completion = sage.misc.persist.load(COMMANDS_CACHE) <NEW_LINE> return self.__tab_completion <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> if verbose: <NEW_LINE> <INDENT> print("\nBuilding Maple command completion list (this takes") <NEW_LINE> print("a few seconds only the first time you do it).") <NEW_LINE> print("To force rebuild later, delete %s." % COMMANDS_CACHE) <NEW_LINE> <DEDENT> v = self._commands() <NEW_LINE> self.__tab_completion = v <NEW_LINE> if len(v) > 200: <NEW_LINE> <INDENT> sage.misc.persist.save(v, COMMANDS_CACHE) <NEW_LINE> <DEDENT> return v | Returns a list of all the commands defined in Maple and optionally
(per default) store them to disk.
EXAMPLES::
sage: c = maple._tab_completion(use_disk_cache=False, verbose=False) # optional - maple
sage: len(c) > 100 # optional - maple
True
sage: 'dilog' in c # optional - maple
True | 625941b923e79379d52ee3d2 |
def init_app(self, app): <NEW_LINE> <INDENT> if not hasattr(app, 'extensions'): <NEW_LINE> <INDENT> app.extensions = dict() <NEW_LINE> <DEDENT> app.extensions['rest-api'] = self <NEW_LINE> app.config.setdefault('RESTLY_SERIALIZER', json) <NEW_LINE> app.config.setdefault('RESTLY_API_PREFIX', '/api/rest') <NEW_LINE> app.config.setdefault('RESTLY_PROTOBUF_MIMETYPE', 'application/x-protobuf') <NEW_LINE> app.config.setdefault('RESTLY_RATE_LIMIT_REQUESTS_AMOUNT', 100) <NEW_LINE> app.config.setdefault('RESTLY_RATE_LIMIT_WINDOW_SECONDS', 60) <NEW_LINE> if self._error_handler is not None: <NEW_LINE> <INDENT> app.register_error_handler(Exception, self._error_handler) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> app.register_error_handler(Exception, api_error_handler) <NEW_LINE> <DEDENT> app.after_request(inject_rate_limit_headers) <NEW_LINE> self._app = app | Initializes FlaskRestAPI
:param app: Flask application
:type app: Flask | 625941b98e71fb1e9831d618 |
def exportCurve(self, factor=1., nameCSV='function.csv'): <NEW_LINE> <INDENT> df = pd.DataFrame({'X':np.around(self.time*factor, decimals=0),'Y':np.around(self.func, decimals=3)}) <NEW_LINE> df.to_csv(str(nameCSV),columns=['X', 'Y'], sep=' ', index=False ,header=0) <NEW_LINE> return | Write CSV sea level file following pyReef core requirements:
+ 2 columns file containing time in years (1st column) and environmental parameter (2nd column),
+ time will be in increasing order starting at the oldest time,
+ past times are negative,
+ the separator is a space.
Parameters
----------
variable: curve
Environmental parameter to save.
variable : factor
Factor to convert from given time unit to years (ex: Ma -> a).
variable: nameCSV
Name of the saved CSV file. | 625941b9c432627299f04aae |
def getScriptArgumentParser(description, args=sys.argv): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser( description=description) <NEW_LINE> parser.add_argument('config_file', help="Valid script configuration file. This should be the path to " "the script YAML configuration file. See config_sample.yaml" "for detailed specifications.") <NEW_LINE> parser.add_argument('--verbose', action='store_true', help="Echo verbose messages to stdout.") <NEW_LINE> parser.add_argument('--debug', action='store_true', help="Echo debug messages to stdout.") <NEW_LINE> return parser | Return ArgumentParser object
Args:
description: Text description of application ArgumentParser will be
applied to.
Kwargs:
args (list): list of arguments that will be parsed. The default
is the sys.argv list, and should be correct for most
use cases.
Returns:
ArgumentParser object that can be used to validate and execute the
current script invocation. | 625941b9d53ae8145f87a0e1 |
def getVolumeSize(self): <NEW_LINE> <INDENT> manifest = sdCache.produce_manifest(self.sdUUID) <NEW_LINE> return manifest.getVSize(self.imgUUID, self.volUUID) | Return the volume size in bytes. | 625941b9187af65679ca4f88 |
def test_empty_string(self): <NEW_LINE> <INDENT> self.assertEqual(gen_libs.pascalize(self.data_str), self.data_test) | Function: test_empty_string
Description: Test with an empty string.
Arguments: | 625941b97cff6e4e811177f0 |
def to_dist_matrix(self, graph): <NEW_LINE> <INDENT> n = len(self.graph.vert_dict) <NEW_LINE> mat = [[self.dist(self.graph.get_vertex(i), self.graph.get_vertex(j)) for i in range(n)] for j in range(n)] <NEW_LINE> return mat | Returns nxn nested list from a list of length n
Used as distance matrix: mat[i][j] is the distance between node i and j | 625941b9d164cc6175782bb8 |
def testORCA_ORCA4_1_orca_from_issue_736_out(logfile): <NEW_LINE> <INDENT> assert len(logfile.data.scfvalues) == 23 <NEW_LINE> assert abs(logfile.data.scfvalues[14][0][1] - 537) < 1.0, logfile.data.scfvalues[14][0] | ORCA file with no whitespace between SCF iteration columns. | 625941b945492302aab5e12b |
def systemd_notify(): <NEW_LINE> <INDENT> nofity_socket = os.getenv('NOTIFY_SOCKET') <NEW_LINE> if not nofity_socket: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if nofity_socket.startswith('@'): <NEW_LINE> <INDENT> nofity_socket = '\0' + nofity_socket[1:] <NEW_LINE> <DEDENT> sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) <NEW_LINE> sock.connect(nofity_socket) <NEW_LINE> sock.sendall(b'READY=1') <NEW_LINE> sock.close() | Notify systemd | 625941b9d7e4931a7ee9dd86 |
def add_md(self, cvobj, committer=None, do_commit=True): <NEW_LINE> <INDENT> name = core.outputstorage.ConvertName(cvobj.metadata['id']) <NEW_LINE> self.interface.add(name.md, cvobj.data, committer=committer, do_commit=do_commit) <NEW_LINE> return True | >>> import glob
>>> import shutil
>>> import os.path
>>> import core.basedata
>>> import services.curriculumvitae
>>> import utils.docprocessor.libreoffice
>>> import extractor.information_explorer
>>> root = "core/test"
>>> name = "cv_1.doc"
>>> test_path = "services/test_output"
>>> DIR = 'services/test_repo'
>>> svc_cv = services.curriculumvitae.CurriculumVitae(DIR)
>>> obj = open(os.path.join(root, name))
>>> os.makedirs(test_path)
>>> fp1 = utils.docprocessor.libreoffice.LibreOfficeProcessor(obj, name, test_path)
>>> yamlinfo = extractor.information_explorer.catch_cvinfo(
... stream=fp1.markdown_stream.decode('utf8'), filename=fp1.base.base)
>>> cv1 = core.basedata.DataObject(data=fp1.markdown_stream, metadata=yamlinfo)
>>> svc_cv.add_md(cv1)
True
>>> md_files = glob.glob(os.path.join(svc_cv.path, '*.md'))
>>> len(md_files)
1
>>> yaml_files = glob.glob(os.path.join(svc_cv.path, '*.yaml'))
>>> len(yaml_files)
0
>>> obj.close()
>>> shutil.rmtree(DIR)
>>> shutil.rmtree(test_path) | 625941b926068e7796caeb43 |
def test_sc_bird(self): <NEW_LINE> <INDENT> trex_empty = self.create_trex_object() <NEW_LINE> expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float') <NEW_LINE> try: <NEW_LINE> <INDENT> trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54], [2.34, 1.384, 3.4], [3.]], dtype='object') <NEW_LINE> trex_empty.app_rate_parsing() <NEW_LINE> trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float') <NEW_LINE> trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float') <NEW_LINE> trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float') <NEW_LINE> result = trex_empty.sc_bird() <NEW_LINE> npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> tab = [result, expected_results] <NEW_LINE> print("\n") <NEW_LINE> print(inspect.currentframe().f_code.co_name) <NEW_LINE> print(tabulate(tab, headers='keys', tablefmt='rst')) <NEW_LINE> <DEDENT> return | unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird | 625941b9460517430c393ff9 |
def __finishSucceed(self): <NEW_LINE> <INDENT> for p in self.rule.produces: <NEW_LINE> <INDENT> p.finishSucceed() <NEW_LINE> <DEDENT> for r in self.rule.requires: <NEW_LINE> <INDENT> r.finishRequire() | finish up finish up requires/produces on success, failures here
cause the rule to fail | 625941b9099cdd3c635f0ac7 |
def startTimerPlayer(self): <NEW_LINE> <INDENT> self.interval = 1000 <NEW_LINE> self.startTime = time.time() <NEW_LINE> self.timerPlayer.start(1000) | "
Function to start a player timer with an interval of 1000. | 625941b9956e5f7376d70ce4 |
def handle_directory_patterns(base, file_pattern): <NEW_LINE> <INDENT> splitted = base.split("/") <NEW_LINE> i = 0 <NEW_LINE> basedir = [] <NEW_LINE> for elem in splitted: <NEW_LINE> <INDENT> if re.search(is_pattern, elem): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> basedir.append(elem) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> basedir = "/".join(basedir) <NEW_LINE> directory_pattern = splitted[i] <NEW_LINE> final = "/".join(splitted[i + 1:]) <NEW_LINE> try: <NEW_LINE> <INDENT> fp = helpers.urlopen(basedir) <NEW_LINE> <DEDENT> except urllib2.URLError: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> if not fp: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> data = fp.read() <NEW_LINE> if basedir.startswith("ftp://"): <NEW_LINE> <INDENT> scan_data = generic.scan_ftp(data, basedir, directory_pattern) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> scan_data = generic.scan_html(data, basedir, directory_pattern) <NEW_LINE> <DEDENT> return [("/".join((basedir, path, final)), file_pattern) for _, path in scan_data] | Directory pattern matching
e.g.: base: ftp://ftp.nessus.org/pub/nessus/nessus-([\d\.]+)/src/
file_pattern: nessus-core-([\d\.]+)\.tar\.gz | 625941b963d6d428bbe4435a |
def merge_min_t_arrays(binned_x, binned_extra_x, extended_binned_x, induction_loc, backward_t, forward_t, debug=False): <NEW_LINE> <INDENT> merged_min_t = np.empty_like(binned_extra_x) <NEW_LINE> merged_min_t[:] = np.nan <NEW_LINE> extended_min_t = np.empty_like(extended_binned_x) <NEW_LINE> extended_min_t[:] = np.nan <NEW_LINE> before = np.where(binned_extra_x < induction_loc)[0] <NEW_LINE> if np.any(before): <NEW_LINE> <INDENT> merged_min_t[before] = backward_t[before] <NEW_LINE> extended_min_t[np.add(before[:-1], 2 * len(binned_x))] = forward_t[before[:-1]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> merged_min_t[0] = backward_t[0] <NEW_LINE> if debug: <NEW_LINE> <INDENT> print('merge_min_t_arrays: no before indexes') <NEW_LINE> <DEDENT> <DEDENT> after = np.where(binned_extra_x >= induction_loc)[0] <NEW_LINE> if np.any(after): <NEW_LINE> <INDENT> merged_min_t[after] = forward_t[after] <NEW_LINE> extended_min_t[after[1:-1]] = backward_t[after[1:-1]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if debug: <NEW_LINE> <INDENT> print('merge_min_t_arrays: no after indexes') <NEW_LINE> <DEDENT> <DEDENT> if debug: <NEW_LINE> <INDENT> for i in range(len(merged_min_t)): <NEW_LINE> <INDENT> val = merged_min_t[i] <NEW_LINE> if np.isnan(val): <NEW_LINE> <INDENT> print('merge_min_t_arrays: nan in merged_min_t at index: %i' % i) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> fig4, axes4 = plt.subplots(1) <NEW_LINE> axes4.plot(binned_extra_x, backward_t, binned_extra_x, forward_t) <NEW_LINE> axes4.plot(binned_extra_x, merged_min_t, label='Merged') <NEW_LINE> axes4.legend(loc='best', frameon=False, framealpha=0.5) <NEW_LINE> fig4.show() <NEW_LINE> print('merge_min_t_arrays: val at backward_t[0]: %.2f; val at forward_t[-1]: %.2f' % (backward_t[0], forward_t[-1])) <NEW_LINE> <DEDENT> extended_min_t[len(binned_x):2 * len(binned_x)] = merged_min_t[:-1] <NEW_LINE> return extended_min_t | :param binned_x: array
:param binned_extra_x: array
:param extended_binned_x: array
:param induction_loc:
:param backward_t:
:param forward_t:
:param debug: bool
:return: array | 625941b9fff4ab517eb2f2a4 |
def select_sheet(self, number): <NEW_LINE> <INDENT> tree = ET.parse(self.directory_to_extract_to + "xl/worksheets/sheet" + str(number) + ".xml") <NEW_LINE> self.current_root = tree.getroot() <NEW_LINE> self.current_sheet_number = number <NEW_LINE> self.current_sheet = self.get_child_by_tag(tree.getroot(), "sheetData")[0] | return the root corresponding to the sheet with the parameter number | 625941b99f2886367277a6fc |
def list_all( self, **kwargs: Any ) -> AsyncIterable["_models.ApplicationGatewayListResult"]: <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> api_version = "2019-09-01" <NEW_LINE> accept = "application/json" <NEW_LINE> def prepare_request(next_link=None): <NEW_LINE> <INDENT> header_parameters = {} <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> if not next_link: <NEW_LINE> <INDENT> url = self.list_all.metadata['url'] <NEW_LINE> path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = next_link <NEW_LINE> query_parameters = {} <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> <DEDENT> return request <NEW_LINE> <DEDENT> async def extract_data(pipeline_response): <NEW_LINE> <INDENT> deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response) <NEW_LINE> list_of_elem = deserialized.value <NEW_LINE> if cls: <NEW_LINE> <INDENT> list_of_elem = cls(list_of_elem) <NEW_LINE> <DEDENT> return deserialized.next_link or None, AsyncList(list_of_elem) <NEW_LINE> <DEDENT> async def get_next(next_link=None): <NEW_LINE> <INDENT> request = prepare_request(next_link) <NEW_LINE> pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> raise HttpResponseError(response=response, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> return pipeline_response <NEW_LINE> <DEDENT> return AsyncItemPaged( get_next, extract_data ) | Gets all the application gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError | 625941b95fcc89381b1e152f |
def get_freq(self, token): <NEW_LINE> <INDENT> return self.token_freq[token] | Returns the frequency the specified token has occured in the training corpus.
:param token: The target token.
:type token: str
:return: The token's frequency.
:rtype: int | 625941b915baa723493c3ddd |
def convergence_time(number): <NEW_LINE> <INDENT> t_average = 0.0 <NEW_LINE> data_set = DataSet(number) <NEW_LINE> for i in range(1000): <NEW_LINE> <INDENT> data_set.new_set() <NEW_LINE> w = linear_regression(data_set) <NEW_LINE> temp = pla.pla(w, data_set) <NEW_LINE> t_average = (t_average * i + temp) / (i + 1) <NEW_LINE> <DEDENT> return t_average | Use LR as output for start vector of PLA algorithm. Outputs average time (1000 trials) of pla before convergence
Params: Number of points in data set
Return: Average time of convergence for modified PLA | 625941b9d99f1b3c44c67402 |
def xtime(self, path, *a, **opts): <NEW_LINE> <INDENT> if a: <NEW_LINE> <INDENT> rsc = a[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rsc = self.master <NEW_LINE> <DEDENT> self.make_xtime_opts(rsc == self.master, opts) <NEW_LINE> return self.xtime_low(rsc, path, **opts) | get amended xtime
as of amending, we can create missing xtime, or
determine a valid value if what we get is expired
(as of the volume mark expiry); way of amendig
depends on @opts and on subject of query (master
or slave). | 625941b9435de62698dfdabf |
def del_all_produits(self): <NEW_LINE> <INDENT> if self.produits.count(): <NEW_LINE> <INDENT> for vendu in self.produits.iterator(): <NEW_LINE> <INDENT> vendu.delete() <NEW_LINE> <DEDENT> self.produits.clear() <NEW_LINE> self.montant_alcool = Decimal("0") <NEW_LINE> self.montant_normal = Decimal("0") <NEW_LINE> for paiement in self.paiements.iterator(): <NEW_LINE> <INDENT> paiement.delete() <NEW_LINE> <DEDENT> self.paiements.clear() <NEW_LINE> self.restant_a_payer = Decimal("0") <NEW_LINE> self.save() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logging.debug("la facture ne contient pas de produits") | On supprime tous les produits | 625941b9de87d2750b85fbf9 |
def process_frame(self, img): <NEW_LINE> <INDENT> raise NotImplementedError("subclass must implement process_frame()") | Computes the dense optical flow field for the next frame.
Args:
img: an m x n x 3 image
Returns:
an m x n x 2 array containing the optical flow vectors
in Cartesian (x, y) format | 625941b99c8ee82313fbb5e0 |
def selected_symbol_index(self): <NEW_LINE> <INDENT> return self.recent.GetNextSelected(-1) | returns index (in the list of displayed symbols) of the currently selected
symbol. | 625941b9379a373c97cfa9b5 |
def test_visualize(self): <NEW_LINE> <INDENT> import barrista.design as design <NEW_LINE> if design._draw is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> from barrista.design import ConvolutionLayer, ReLULayer <NEW_LINE> netspec = design.NetSpecification([[10, 3, 51, 51], [10]], inputs=['data', 'annotations'], predict_inputs=['data'], predict_input_shapes=[[2, 3, 2, 2]]) <NEW_LINE> layers = [] <NEW_LINE> conv_params = {'Convolution_kernel_size': 3, 'Convolution_num_output': 32, 'Convolution_pad': 1} <NEW_LINE> layers.append(ConvolutionLayer(**conv_params)) <NEW_LINE> layers.append(ReLULayer()) <NEW_LINE> netspec.layers.extend(layers) <NEW_LINE> net = netspec.instantiate() <NEW_LINE> viz = net.visualize() <NEW_LINE> self.assertEqual(viz.ndim, 3) | Test the ``visualize`` function. | 625941b921bff66bcd6847c0 |
def copyPoolsFromSampler(self, sampler): <NEW_LINE> <INDENT> self._samplerPools = sampler._samplerPools.copy() <NEW_LINE> self._samplerPoolPriorityList = sampler._samplerPoolPriorityList.copy() <NEW_LINE> self._samplerMap = sampler._samplerMap.copy() | Clears this sampler and copies all sampler pools and their
corresponding priority
:param sampler: sampler to copy the sampler pools from | 625941b9fb3f5b602dac34fa |
def p_subsections(self, subsections): <NEW_LINE> <INDENT> self._act_on_list(subsections) | subsections : subsections subsection
| subsection | 625941b967a9b606de4a7d28 |
def make_di_problem(step_len, n_steps, damp, jitter, discount, bounds, cost_radius, actions): <NEW_LINE> <INDENT> (A,action_dim) = actions.shape <NEW_LINE> assert(action_dim == 1) <NEW_LINE> assert(actions[0] == -actions[-1]) <NEW_LINE> state_dim = 2 <NEW_LINE> trans_params = utils.kwargify(step=step_len, num_steps=n_steps, dampening=damp, control_jitter=jitter) <NEW_LINE> trans_fn = DoubleIntegratorTransitionFunction( **trans_params) <NEW_LINE> boundary = TorusBoundary(bounds) <NEW_LINE> cost_state_fn = BallSetFn(np.zeros(2), cost_radius) <NEW_LINE> cost_fn = CostWrapper(cost_state_fn) <NEW_LINE> oob_costs = np.array([100]*2*state_dim) <NEW_LINE> gen_model = GenerativeModel(trans_fn, boundary, cost_fn, state_dim, action_dim, oob_costs) <NEW_LINE> action_boundary = [(actions[0],actions[-1])] <NEW_LINE> problem = Problem(gen_model, action_boundary, discount) <NEW_LINE> return problem | Makes a double integrator problem
TODO: take in parameters | 625941b9dc8b845886cb53a0 |
def _init_es(es, prefix, video_index, comment_index): <NEW_LINE> <INDENT> if es.indices.exists(index=video_index): <NEW_LINE> <INDENT> es.indices.delete(index=video_index) <NEW_LINE> <DEDENT> if es.indices.exists(index=comment_index): <NEW_LINE> <INDENT> es.indices.delete(index=comment_index) <NEW_LINE> <DEDENT> es.indices.create(video_index) <NEW_LINE> es.indices.put_mapping(index=video_index, doc_type=VIDEO_DOC_TYPE, body=VIDEO_MAPPING) <NEW_LINE> es.indices.create(comment_index) <NEW_LINE> es.indices.put_mapping(index=comment_index, doc_type=COMMENT_DOC_TYPE, body=COMMENT_MAPPING) | sets up blank es index and adds doc type mapping information | 625941b9d486a94d0b98dfb8 |
def __contains__(self,obj2): <NEW_LINE> <INDENT> if(isinstance(obj2,DNA_part)): <NEW_LINE> <INDENT> if(obj2.parent==self): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif(obj2.parent==None): <NEW_LINE> <INDENT> new_obj2 = copy.copy(obj2).unclone() <NEW_LINE> uncloned_list = [copy.copy(a).unclone() for a in self.parts_list] <NEW_LINE> return new_obj2 in uncloned_list <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> elif(isinstance(obj2,str)): <NEW_LINE> <INDENT> return obj2 in str(self) | checks if this construct contains a certain part, or a copy of a certain part | 625941b9925a0f43d2549cdf |
def parse_gulp(filename, crystalStruc, path='./'): <NEW_LINE> <INDENT> gulp_lines = util.read_file(filename, path) <NEW_LINE> sysInfo = [] <NEW_LINE> atomsNotFound = True <NEW_LINE> allAtoms = dict() <NEW_LINE> i = 0 <NEW_LINE> for i, line in enumerate(gulp_lines): <NEW_LINE> <INDENT> if line.startswith('#'): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif line.strip() in 'vectors': <NEW_LINE> <INDENT> for j in range(3): <NEW_LINE> <INDENT> temp = gulp_lines[i+1+j].split() <NEW_LINE> cellVector = np.array([float(temp[k]) for k in range(3)]) <NEW_LINE> crystalStruc.setVector(cellVector, j) <NEW_LINE> <DEDENT> <DEDENT> elif line.strip() in 'cell': <NEW_LINE> <INDENT> cellParameters = gulp_lines[i+1].split()[:6] <NEW_LINE> cellParameters = [float(a) for a in cellParameters] <NEW_LINE> cellVectors = cry.cellToCart(cellParameters) <NEW_LINE> for j in range(3): <NEW_LINE> <INDENT> crystalStruc.setVector(cellVectors.getVector(j), j) <NEW_LINE> <DEDENT> <DEDENT> elif line.strip() in 'pcell': <NEW_LINE> <INDENT> cell_height = float(gulp_lines[i+1].strip().split()[0]) <NEW_LINE> crystalStruc.setC(np.array([0., 0., cell_height])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> foundAtoms = atomLine.match(line) <NEW_LINE> if foundAtoms: <NEW_LINE> <INDENT> if atomsNotFound: <NEW_LINE> <INDENT> atomsNotFound = False <NEW_LINE> if gulp_lines[i-1].rstrip().startswith('frac'): <NEW_LINE> <INDENT> frac = True <NEW_LINE> pfrac = False <NEW_LINE> <DEDENT> elif gulp_lines[i-1].rstrip().startswith('pfrac'): <NEW_LINE> <INDENT> frac = False <NEW_LINE> pfrac = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pfrac = False <NEW_LINE> frac = False <NEW_LINE> <DEDENT> <DEDENT> extractAtom(foundAtoms, allAtoms, frac=frac, pfrac=pfrac) <NEW_LINE> <DEDENT> elif (not atomsNotFound) and (not foundAtoms): <NEW_LINE> <INDENT> if ('dump' not in line) and ('switch' not in line): <NEW_LINE> <INDENT> if not re.match('\s*\w+\s*region\s*\d+', line): <NEW_LINE> <INDENT> sysInfo.append(line) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> for element in allAtoms: <NEW_LINE> <INDENT> for atom in allAtoms[element]['atoms']: <NEW_LINE> <INDENT> crystalStruc.addAtom(atom) <NEW_LINE> <DEDENT> <DEDENT> del allAtoms <NEW_LINE> return sysInfo | Parses the file <line> to extract structural information, atomic
potentials, and control parameters. <crystalStruc> must be initialised (with
dummy cell parameters) prior to use. | 625941b931939e2706e4ccdb |
@pytest.mark.usefixtures("f2003_create") <NEW_LINE> @pytest.mark.parametrize('line', [ None, '', ' ', '#ifdfe', '#if', '#ifdef', '#ifdef two macros']) <NEW_LINE> def test_incorrect_if_stmt(line): <NEW_LINE> <INDENT> with pytest.raises(NoMatchError) as excinfo: <NEW_LINE> <INDENT> _ = Cpp_If_Stmt(line) <NEW_LINE> <DEDENT> assert "Cpp_If_Stmt: '{0}'".format(line) in str(excinfo.value) | Test that incorrectly formed #if statements raise exception. | 625941b950485f2cf553cc04 |
def crf_nll(y_true, y_pred): <NEW_LINE> <INDENT> crf, idx = y_pred._keras_history[:2] <NEW_LINE> assert not crf._outbound_nodes, 'When learn_model="join", CRF must be the last layer.' <NEW_LINE> if crf.sparse_target: <NEW_LINE> <INDENT> y_true = K.one_hot(K.cast(y_true[:, :, 0], 'int32'), crf.units) <NEW_LINE> <DEDENT> X = crf._inbound_nodes[idx].input_tensors[0] <NEW_LINE> mask = crf._inbound_nodes[idx].input_masks[0] <NEW_LINE> nloglik = crf.get_negative_log_likelihood(y_true, X, mask) <NEW_LINE> return nloglik | Usual Linear Chain CRF negative log likelihood. Used for CRF "join" mode. See `layers.CRF` for usage. | 625941b915fb5d323cde0975 |
def combinationSum3(self, k, n): <NEW_LINE> <INDENT> if n > 9 * k: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> def __directed_combination(k, n, partial, start): <NEW_LINE> <INDENT> if k == 0 and n == 0: <NEW_LINE> <INDENT> res.append(partial) <NEW_LINE> print(res) <NEW_LINE> <DEDENT> for i in range(start, 10): <NEW_LINE> <INDENT> if i <= n: <NEW_LINE> <INDENT> __directed_combination(k - 1, n - i, partial + [i], i + 1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> res = [] <NEW_LINE> __directed_combination(k, n, [], 1) <NEW_LINE> return res | :type k: int
:type n: int
:rtype: List[List[int]]
Backtracking | 625941b99b70327d1c4e0c3f |
def get_location(lat, lng): <NEW_LINE> <INDENT> path = "http://nominatim.openstreetmap.org/reverse" <NEW_LINE> params = urllib.parse.urlencode({'format': 'json', 'lat': lat, 'lon': lng, 'zoom': 11}) <NEW_LINE> url = "{0}/?{1}".format(path, params) <NEW_LINE> with urllib.request.urlopen(url) as response: <NEW_LINE> <INDENT> json_data = json.loads(response.read().decode('utf8')) <NEW_LINE> <DEDENT> if json_data.get("error"): <NEW_LINE> <INDENT> return "Over the ocean somewhere" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> address = json_data["address"] <NEW_LINE> city = address.get("city") or address.get("village") <NEW_LINE> state = address.get("state") <NEW_LINE> country = address.get("country") <NEW_LINE> if city is not None: <NEW_LINE> <INDENT> return "{0}, {1}, {2}".format(city, state, country) <NEW_LINE> <DEDENT> elif state is not None: <NEW_LINE> <INDENT> return "{0}, {1}".format(state, country) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return country | Returns a string describing the location of a given lat/lng.
Example: 41.878114, -87.629798 would return "Chicago, Illinois, United States of America" | 625941b9be7bc26dc91cd471 |
def test_product_without_images(self): <NEW_LINE> <INDENT> product_page = ProductPage(self.driver) <NEW_LINE> product_page.open(21) <NEW_LINE> expected_product_main_image = 'http://via.placeholder.com/300x300?text=%C5%BD%C3%A1dn%C3%BD%20obr%C3%A1zek' <NEW_LINE> assert product_page.product_main_image() == expected_product_main_image | Test place holder for a product image when product doesn't have any images | 625941b9004d5f362079a1a2 |
def test_get_lookup_codes(self): <NEW_LINE> <INDENT> response = self.client.get( reverse("lookup-codes-all") ) <NEW_LINE> expected_languages = ProgrammingLanguage.objects.all() <NEW_LINE> expected_countries = LocationCountryCode.objects.all() <NEW_LINE> serialised = LookupCodeListSerializer( {'programming_languages': expected_languages, 'countries': expected_countries}) <NEW_LINE> self.assertEqual(response.data, serialised.data) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_200_OK) | This test asserts that all lookup code records added during
setUp will be retrieved and serialized when making a GET request
to the codes/all endpoint. | 625941b9442bda511e8be291 |
def setParams(self, *args): <NEW_LINE> <INDENT> return _MontePython_cxx.PollsJastrow_setParams(self, *args) | setParams(PollsJastrow self, QickArray & params_) | 625941b9a8ecb033257d2f42 |
def cog_unload(self) -> None: <NEW_LINE> <INDENT> self._init_task.cancel() <NEW_LINE> self._init_task.add_done_callback(lambda _: self.scheduler.cancel_all()) | Cancel the init task and scheduled tasks. | 625941b9f7d966606f6a9e73 |
def getRoll(self) -> float: <NEW_LINE> <INDENT> raw = self.ahrs.getRoll() <NEW_LINE> return -math.radians(raw) | Get current roll in radians (rotation around Y axis)
Angles are in the interval [-pi, pi], anticlockwise positive. | 625941b97cff6e4e811177f1 |
def spi_set_dff_16bit(): <NEW_LINE> <INDENT> pass | __NATIVE__
PmReturn_t retval = PM_RET_OK;
pPmObj_t p0;
uint32_t spi;
/* If wrong number of args, raise TypeError */
if (NATIVE_GET_NUM_ARGS() != 1)
{
PM_RAISE_WITH_INFO(retval, PM_RET_EX_TYPE, "incorrect number of arguments");
return retval;
}
p0 = NATIVE_GET_LOCAL(0);
/* If arg is not an int, raise TypeError */
if (OBJ_GET_TYPE(p0) != OBJ_TYPE_INT)
{
PM_RAISE_WITH_INFO(retval, PM_RET_EX_TYPE, "expected int");
return retval;
}
spi = ((pPmInt_t)p0)->val;
spi_set_dff_16bit(spi);
NATIVE_SET_TOS(PM_NONE);
return retval; | 625941b93346ee7daa2b2bd5 |
def deploy(self, target, prefix_dir): <NEW_LINE> <INDENT> prefix_content = [ x for x in os.listdir(prefix_dir) if not os.path.join(prefix_dir, x) in self.skip_names ] <NEW_LINE> os.chdir(prefix_dir) <NEW_LINE> cmd = ['scp', '-r', '-q'] + prefix_content + [target] <NEW_LINE> subproc.monitor_process(cmd, throw_ex=True) | docstring for deploy | 625941b963f4b57ef0000f8d |
def distance(a,b): <NEW_LINE> <INDENT> mx_arr_a = theano.tensor.repeat(a, b.shape[0], axis=0) <NEW_LINE> mx_arr_b = theano.tensor.tile(b, (a.shape[0], 1)) <NEW_LINE> v_d = theano.tensor.sqrt(theano.tensor.sum((mx_arr_a-mx_arr_b)**2, axis=1)) <NEW_LINE> return theano.tensor.reshape(v_d, (a.shape[0], b.shape[0])) | Return a matrix of distances from a matrix of coordinates.
| 625941b95166f23b2e1a4fc5 |
def create(self, tdirNEvents, requireAllHistograms=False): <NEW_LINE> <INDENT> self._histograms = [self._createOne(self._name, i, tdirNEvent[0], tdirNEvent[1]) for i, tdirNEvent in enumerate(tdirNEvents)] <NEW_LINE> if self._fallback is not None: <NEW_LINE> <INDENT> profileX = [self._profileX]*len(self._histograms) <NEW_LINE> for i in range(0, len(self._histograms)): <NEW_LINE> <INDENT> if self._histograms[i] is None: <NEW_LINE> <INDENT> self._histograms[i] = self._createOne(self._fallback["name"], i, tdirNEvents[i][0], tdirNEvents[i][1]) <NEW_LINE> profileX[i] = self._fallback.get("profileX", self._profileX) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self._histogramModifier is not None: <NEW_LINE> <INDENT> self._histograms = self._histogramModifier(self._histograms) <NEW_LINE> <DEDENT> if len(self._histograms) > len(_plotStylesColor): <NEW_LINE> <INDENT> raise Exception("More histograms (%d) than there are plot styles (%d) defined. Please define more plot styles in this file" % (len(self._histograms), len(_plotStylesColor))) <NEW_LINE> <DEDENT> def _modifyHisto(th1, profileX): <NEW_LINE> <INDENT> if th1 is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if profileX: <NEW_LINE> <INDENT> th1 = th1.ProfileX() <NEW_LINE> <DEDENT> if self._fitSlicesY: <NEW_LINE> <INDENT> ROOT.TH1.AddDirectory(True) <NEW_LINE> th1.FitSlicesY() <NEW_LINE> th1 = ROOT.gDirectory.Get(th1.GetName()+"_2") <NEW_LINE> th1.SetDirectory(None) <NEW_LINE> ROOT.TH1.AddDirectory(False) <NEW_LINE> <DEDENT> if self._title is not None: <NEW_LINE> <INDENT> th1.SetTitle(self._title) <NEW_LINE> <DEDENT> if self._scale is not None: <NEW_LINE> <INDENT> th1.Scale(self._scale) <NEW_LINE> <DEDENT> return th1 <NEW_LINE> <DEDENT> if self._fallback is not None: <NEW_LINE> <INDENT> self._histograms = map(_modifyHisto, self._histograms, profileX) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._histograms = map(lambda h: _modifyHisto(h, self._profileX), self._histograms) <NEW_LINE> <DEDENT> if requireAllHistograms and None in self._histograms: <NEW_LINE> <INDENT> self._histograms = [None]*len(self._histograms) | Create histograms from list of TDirectories | 625941b97b25080760e392c6 |
def list_documents(self, col_id): <NEW_LINE> <INDENT> url = f"{self.base_url}/collections/{col_id}/list" <NEW_LINE> response = requests.get(url, cookies=self.login_cookie) <NEW_LINE> if response.ok: <NEW_LINE> <INDENT> return response.json() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return response.ok | Helper function to interact with TRANSKRIBUS collection endpoint to list all documents
:param col_id: The ID of a TRANSKRIBUS Collection
:return: A dict with the default TRANSKRIBUS API return | 625941b963d6d428bbe4435b |
def delete(self, table, row=None, **kw): <NEW_LINE> <INDENT> if table.endswith('*'): <NEW_LINE> <INDENT> table = table[:-1].rstrip() <NEW_LINE> <DEDENT> attnames = self.get_attnames(table) <NEW_LINE> qoid = _oid_key(table) if 'oid' in attnames else None <NEW_LINE> if row is None: <NEW_LINE> <INDENT> row = {} <NEW_LINE> <DEDENT> elif 'oid' in row: <NEW_LINE> <INDENT> del row['oid'] <NEW_LINE> <DEDENT> row.update(kw) <NEW_LINE> if qoid and qoid in row and 'oid' not in row: <NEW_LINE> <INDENT> row['oid'] = row[qoid] <NEW_LINE> <DEDENT> if qoid and 'oid' in row: <NEW_LINE> <INDENT> keyname = ('oid',) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> keyname = self.pkey(table, True) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise _prg_error('Table %s has no primary key' % table) <NEW_LINE> <DEDENT> if not set(keyname).issubset(row): <NEW_LINE> <INDENT> raise KeyError('Missing value for primary key in row') <NEW_LINE> <DEDENT> <DEDENT> params = self.adapter.parameter_list() <NEW_LINE> adapt = params.add <NEW_LINE> col = self.escape_identifier <NEW_LINE> where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) <NEW_LINE> if 'oid' in row: <NEW_LINE> <INDENT> if qoid: <NEW_LINE> <INDENT> row[qoid] = row['oid'] <NEW_LINE> <DEDENT> del row['oid'] <NEW_LINE> <DEDENT> q = 'DELETE FROM %s WHERE %s' % ( self._escape_qualified_name(table), where) <NEW_LINE> self._do_debug(q, params) <NEW_LINE> res = self.db.query(q, params) <NEW_LINE> return int(res) | Delete an existing row in a database table.
This method deletes the row from a table. It deletes based on the
primary key of the table or the OID value as munged by get() or
passed as keyword. The OID will take precedence if provided.
The return value is the number of deleted rows (i.e. 0 if the row
did not exist and 1 if the row was deleted).
Note that if the row cannot be deleted because e.g. it is still
referenced by another table, this method raises a ProgrammingError. | 625941b95fdd1c0f98dc009d |
def skriv_dagalmanacka(dagalma): <NEW_LINE> <INDENT> def skriv_dagalma_intern(mt): <NEW_LINE> <INDENT> skriv_mötestid(mt) <NEW_LINE> print() <NEW_LINE> <DEDENT> för_varje_möte(dagalma, skriv_dagalma_intern) | dagalmanacka -> | 625941b9ad47b63b2c509df5 |
def price_transform(self, price): <NEW_LINE> <INDENT> if (1 == price or 2 == price): <NEW_LINE> <INDENT> return 'Economy hotel' <NEW_LINE> <DEDENT> elif (3 == price or 4 == price ): <NEW_LINE> <INDENT> return 'Commercial hotel' <NEW_LINE> <DEDENT> elif (price == 5): <NEW_LINE> <INDENT> return 'Luxury hotel' | This function tranform the parameter price to three categories: Economy hotel, Commercial hotel,
Luxury hotel.
Parameter:
price: int
Return:
String: Economy hotel, Commercial hotel, Luxury hotel. | 625941b91b99ca400220a91d |
def __init__(__self__, resource_name, opts=None, auth_type=None, cluster_id=None, container_name=None, directory=None, mount_name=None, storage_account_name=None, token_secret_key=None, token_secret_scope=None, __props__=None, __name__=None, __opts__=None): <NEW_LINE> <INDENT> if __name__ is not None: <NEW_LINE> <INDENT> warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) <NEW_LINE> resource_name = __name__ <NEW_LINE> <DEDENT> if __opts__ is not None: <NEW_LINE> <INDENT> warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) <NEW_LINE> opts = __opts__ <NEW_LINE> <DEDENT> if opts is None: <NEW_LINE> <INDENT> opts = pulumi.ResourceOptions() <NEW_LINE> <DEDENT> if not isinstance(opts, pulumi.ResourceOptions): <NEW_LINE> <INDENT> raise TypeError('Expected resource options to be a ResourceOptions instance') <NEW_LINE> <DEDENT> if opts.version is None: <NEW_LINE> <INDENT> opts.version = utilities.get_version() <NEW_LINE> <DEDENT> if opts.id is None: <NEW_LINE> <INDENT> if __props__ is not None: <NEW_LINE> <INDENT> raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') <NEW_LINE> <DEDENT> __props__ = dict() <NEW_LINE> if auth_type is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'auth_type'") <NEW_LINE> <DEDENT> __props__['auth_type'] = auth_type <NEW_LINE> if cluster_id is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'cluster_id'") <NEW_LINE> <DEDENT> __props__['cluster_id'] = cluster_id <NEW_LINE> if container_name is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'container_name'") <NEW_LINE> <DEDENT> __props__['container_name'] = container_name <NEW_LINE> __props__['directory'] = directory <NEW_LINE> if mount_name is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'mount_name'") <NEW_LINE> <DEDENT> __props__['mount_name'] = mount_name <NEW_LINE> if storage_account_name is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'storage_account_name'") <NEW_LINE> <DEDENT> __props__['storage_account_name'] = storage_account_name <NEW_LINE> if token_secret_key is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'token_secret_key'") <NEW_LINE> <DEDENT> __props__['token_secret_key'] = token_secret_key <NEW_LINE> if token_secret_scope is None: <NEW_LINE> <INDENT> raise TypeError("Missing required property 'token_secret_scope'") <NEW_LINE> <DEDENT> __props__['token_secret_scope'] = token_secret_scope <NEW_LINE> <DEDENT> super(AzureBlobMount, __self__).__init__( 'databricks:azure/azureBlobMount:AzureBlobMount', resource_name, __props__, opts) | Create a AzureBlobMount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource. | 625941b966656f66f7cbc016 |
def get_state(self): <NEW_LINE> <INDENT> return self.__running | Returns running state. | 625941b9aad79263cf3908a7 |
def test_chshell(self): <NEW_LINE> <INDENT> mock = MagicMock(return_value={'shell': 'A'}) <NEW_LINE> with patch.object(pw_user, 'info', mock): <NEW_LINE> <INDENT> self.assertTrue(pw_user.chshell('name', 'A')) <NEW_LINE> <DEDENT> mock = MagicMock(return_value=None) <NEW_LINE> with patch.dict(pw_user.__salt__, {'cmd.run': mock}): <NEW_LINE> <INDENT> mock = MagicMock(side_effect=[{'shell': 'B'}, {'shell': 'B'}]) <NEW_LINE> with patch.object(pw_user, 'info', mock): <NEW_LINE> <INDENT> self.assertFalse(pw_user.chshell('name', 'A')) <NEW_LINE> <DEDENT> <DEDENT> mock = MagicMock(return_value=None) <NEW_LINE> with patch.dict(pw_user.__salt__, {'cmd.run': mock}): <NEW_LINE> <INDENT> mock = MagicMock(side_effect=[{'shell': 'A'}, {'shell': 'B'}]) <NEW_LINE> with patch.object(pw_user, 'info', mock): <NEW_LINE> <INDENT> self.assertTrue(pw_user.chshell('name', 'A')) | Test if shell given is same as previous shell | 625941b95fc7496912cc37f2 |
def extract_label(video_name): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return video_name.split('_')[1] <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> raise ValueError('Not a valid video name.') | Extract label from video file name.
>>> extract_label('v_YoYo_g25_c05.avi')
'YoYo'
>>> extract_label('v_Knitting_g16_c02.avi')
'Knitting' | 625941b96aa9bd52df036c0e |
def AcqPremRatio(): <NEW_LINE> <INDENT> pvs = InnerProj(0).PresentValue(0) <NEW_LINE> return ((pvs.PV_ExpsCommTotal(0) + pvs.PV_ExpsAcq(0)) / pvs.PV_PremIncome(0)) | Ratio of PV Acquisiton Cashflows to PV Premiums.
The ratio is determined by the expectation at issue. | 625941b9a79ad161976cbfb2 |
def wall_phase2(self): <NEW_LINE> <INDENT> self.image = pygame.image.load("./Images/wall2.PNG") <NEW_LINE> self.image.set_colorkey((0,0,0)) <NEW_LINE> self.image = self.image.convert() | This method changes the image of the wall | 625941b9287bf620b61d38db |
def test_user_edit_page(self): <NEW_LINE> <INDENT> url = reverse('admin:core_user_change', args=[self.user.id]) <NEW_LINE> resp = self.client.get(url) <NEW_LINE> self.assertEqual(resp.status_code, 200) | Test for user edit page | 625941b91f5feb6acb0c49c1 |
def get_metrics_data(request, project, branch, revision): <NEW_LINE> <INDENT> product_name = request.GET.get("product", "Firefox") <NEW_LINE> os_name = request.GET.get("os_name", None) <NEW_LINE> os_version = request.GET.get("os_version", None) <NEW_LINE> branch_version = request.GET.get("branch_version", None) <NEW_LINE> if not branch_version: <NEW_LINE> <INDENT> branch_version = testdata.get_default_version( project, branch, product_name ) <NEW_LINE> <DEDENT> processor = request.GET.get("processor", None) <NEW_LINE> build_type = request.GET.get("build_type", None) <NEW_LINE> test_name = request.GET.get("test_name", None) <NEW_LINE> page_name = request.GET.get("page_name", None) <NEW_LINE> return HttpResponse( json.dumps(testdata.get_metrics_data( project, branch, revision, product_name=product_name, os_name=os_name, os_version=os_version, branch_version=branch_version, processor=processor, build_type=build_type, test_name=test_name, page_name=page_name, )), content_type=API_CONTENT_TYPE, ) | Apply filters and return all metrics data associated with the revision. | 625941b9d8ef3951e32433aa |
def roll_dice(num_rolls, dice=six_sided): <NEW_LINE> <INDENT> assert type(num_rolls) == int, 'num_rolls must be an integer.' <NEW_LINE> assert num_rolls > 0, 'Must roll at least once.' <NEW_LINE> n, sum, checker = 0, 0, False <NEW_LINE> while num_rolls: <NEW_LINE> <INDENT> num_rolls -= 1 <NEW_LINE> n = dice () <NEW_LINE> if n == 1: <NEW_LINE> <INDENT> checker = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sum += n <NEW_LINE> <DEDENT> <DEDENT> if checker == True: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return sum | Simulate rolling the DICE exactly NUM_ROLLS > 0 times. Return the sum of
the outcomes unless any of the outcomes is 1. In that case, return 1.
num_rolls: The number of dice rolls that will be made.
dice: A function that simulates a single dice roll outcome. | 625941b9dd821e528d63b018 |
@app.cli.command() <NEW_LINE> @click.option('--coverage/--no-coverage', default=False, help='Run tests under code coverage.') <NEW_LINE> def test(coverage): <NEW_LINE> <INDENT> if coverage and not os.environ.get('FLASK_COVERAGE'): <NEW_LINE> <INDENT> os.environ['FLASK_COVERAGE'] = '1' <NEW_LINE> os.execvp(sys.executable, [sys.executable] + sys.argv) <NEW_LINE> <DEDENT> import unittest <NEW_LINE> tests = unittest.TestLoader().discover('tests') <NEW_LINE> unittest.TextTestRunner(verbosity=2).run(tests) <NEW_LINE> if COV: <NEW_LINE> <INDENT> COV.stop() <NEW_LINE> COV.save() <NEW_LINE> print('Coverage summary') <NEW_LINE> COV.report() <NEW_LINE> basedir = os.path.abspath(os.path.dirname(__file__)) <NEW_LINE> covdir = os.path.join(basedir, 'tmp/coverage') <NEW_LINE> COV.html_report(directory=covdir) <NEW_LINE> print('HTML version: file://%s/index.html' % covdir) <NEW_LINE> COV.erase() | Run the unittests | 625941b9cad5886f8bd26e4e |
def should_hide(self, row): <NEW_LINE> <INDENT> return False | row_num is for self.model(). So if there is a proxy, it is the row number
in that! | 625941b9ac7a0e7691ed3f46 |
def baseDistkm(self): <NEW_LINE> <INDENT> self.getDist(False) <NEW_LINE> self.labelStation() <NEW_LINE> self.axss.set_ylabel('Distance [km]') | Set baseline of seismograms as epicentral distance in km. | 625941b932920d7e50b28039 |
def importDefaultGuide(self): <NEW_LINE> <INDENT> io.import_sample_template("biped.sgt") | import mgear template biped
| 625941b9e64d504609d746ad |
def to_ipython(self, filename=None): <NEW_LINE> <INDENT> if filename is None: <NEW_LINE> <INDENT> filename = "reprep-%s.html" % str(id(self)) <NEW_LINE> <DEDENT> self.to_html(filename) <NEW_LINE> from IPython.display import display, HTML <NEW_LINE> display(HTML(open(filename).read())) | Displays in the IPython editor. | 625941b9ab23a570cc24ffec |
def test_helptext(self): <NEW_LINE> <INDENT> sys.argv = [''] <NEW_LINE> self.exec_module() <NEW_LINE> self.assertEqual(self.retcode, 1) <NEW_LINE> self.assertEqual(len(self.stdout), 0) <NEW_LINE> self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1]) | $ pdb_shiftres | 625941b9bde94217f3682c69 |
def create_cluster(self, cluster_create_spec: dict): <NEW_LINE> <INDENT> uri = self._clusters_uri <NEW_LINE> response = self.do_request( uri=uri, method=shared_constants.RequestMethod.POST, accept_type='application/json', media_type='application/json', payload=cluster_create_spec) <NEW_LINE> return common_models.DefEntity(**self.process_response(response)) | Call create native cluster CSE server endpoint.
:param dict cluster_create_spec: Cluster create specification
:return: defined entity object representing the response
:rtype: common_models.DefEntity | 625941b9aad79263cf3908a8 |
def _get_chart_template(self, cr, uid, ids, field_name, arg, context=None): <NEW_LINE> <INDENT> if context is None: <NEW_LINE> <INDENT> context = {} <NEW_LINE> <DEDENT> res={} <NEW_LINE> accounts = self.browse(cr, uid, ids) <NEW_LINE> for account in accounts: <NEW_LINE> <INDENT> id = account.id <NEW_LINE> while account.parent_id: <NEW_LINE> <INDENT> account = self.browse(cr, uid, account.parent_id.id) <NEW_LINE> <DEDENT> template_ids = self.pool.get('account.chart.template').search(cr, uid, [('account_root_id', '=', account.id)], context=context) <NEW_LINE> res[id] = template_ids and template_ids[0] or False <NEW_LINE> <DEDENT> return res | To get the chart template from an account template, we have to search recursively across
its parent_id field until parent_id is null (this is the root account) then select the
chart template which have 'account_root_id' pointing to the root account. | 625941b9f9cc0f698b140472 |
def get_int_dtype(value: int): <NEW_LINE> <INDENT> if value <= np.iinfo(np.uint8).max: <NEW_LINE> <INDENT> return np.uint8 <NEW_LINE> <DEDENT> if value <= np.iinfo(np.uint16).max: <NEW_LINE> <INDENT> return np.uint16 <NEW_LINE> <DEDENT> if value <= np.iinfo(np.uint32).max: <NEW_LINE> <INDENT> return np.int32 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Too many shapes") | Determine appropriate bit precision for indexed image
Parameters
----------
value:int
number of shapes
Returns
-------
dtype:np.dtype
apppropriate data type for index mask | 625941b93539df3088e2e1b8 |
def test_load(self): <NEW_LINE> <INDENT> d = Dialog() <NEW_LINE> self.assertIsNotNone(d) <NEW_LINE> d = Dialog(test_kb_path) <NEW_LINE> self.assertIsNotNone(d) <NEW_LINE> self.assertRaises(Exception, lambda: Dialog('')) | Test constructing a new Dialog instance and loading a KB. | 625941b97c178a314d6ef2c5 |
def p_stringexpression_varlist(p): <NEW_LINE> <INDENT> p[0] = Variable(p[1]) | stringexpression : VAR_LIST | 625941b9090684286d50eb4d |
def resign (self, game): <NEW_LINE> <INDENT> if not game.opponent.adjournment: <NEW_LINE> <INDENT> Log.warn("AdjournManager.resign: no adjourned game vs %s\n" % game.opponent) <NEW_LINE> return <NEW_LINE> <DEDENT> Log.info("AdjournManager.resign: resigning adjourned game=%s\n" % game) <NEW_LINE> self.connection.client.run_command("resign %s" % game.opponent.name) | This is (and draw and abort) are possible even when one's
opponent is not logged on | 625941b9d164cc6175782bba |
def textFormatter_gettext( s ): <NEW_LINE> <INDENT> return textFormatter_lookup.get( s, s ) | Cleans up argparse help information | 625941b98c0ade5d55d3e82c |
def __init__(self, iterable=None): <NEW_LINE> <INDENT> self.head = None <NEW_LINE> self.tail = None <NEW_LINE> self.size = 0 <NEW_LINE> if iterable is not None: <NEW_LINE> <INDENT> for item in iterable: <NEW_LINE> <INDENT> self.append(item) | Initialize this linked list and append the given items if any | 625941b926068e7796caeb45 |
def test_set_handler(self): <NEW_LINE> <INDENT> dummy_method = "dummy" <NEW_LINE> dummy_handler = "handler" <NEW_LINE> self.l2gw_ovsdb._set_handler(dummy_method, dummy_handler) <NEW_LINE> self.assertEqual(self.l2gw_ovsdb.handlers[dummy_method], dummy_handler) | Test case to test _set_handler. | 625941b960cbc95b062c63b6 |
def __init__(self, satellite_alignment_strength=0.8, prim_gal_axis='major', **kwargs): <NEW_LINE> <INDENT> self.gal_type = 'satellites' <NEW_LINE> self._mock_generation_calling_sequence = (['assign_orientation']) <NEW_LINE> self._galprop_dtypes_to_allocate = np.dtype( [(str('galaxy_axisA_x'), 'f4'), (str('galaxy_axisA_y'), 'f4'), (str('galaxy_axisA_z'), 'f4'), (str('galaxy_axisB_x'), 'f4'), (str('galaxy_axisB_y'), 'f4'), (str('galaxy_axisB_z'), 'f4'), (str('galaxy_axisC_x'), 'f4'), (str('galaxy_axisC_y'), 'f4'), (str('galaxy_axisC_z'), 'f4')]) <NEW_LINE> self.list_of_haloprops_needed = ['halo_x', 'halo_y', 'halo_z', 'halo_axisA_x', 'halo_axisA_y', 'halo_axisA_z'] <NEW_LINE> possible_axis = ['major', 'intermediate', 'minor'] <NEW_LINE> if prim_gal_axis in possible_axis: <NEW_LINE> <INDENT> if prim_gal_axis == possible_axis[0]: self.prim_gal_axis = 'A' <NEW_LINE> elif prim_gal_axis == possible_axis[1]: self.prim_gal_axis = 'B' <NEW_LINE> elif prim_gal_axis == possible_axis[2]: self.prim_gal_axis = 'C' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = ('`prim_gal_axis` muyst be one of {0}, but instead is {1}.'.format(possible_axis, prim_gal_axis)) <NEW_LINE> raise ValueError(msg) <NEW_LINE> <DEDENT> self._methods_to_inherit = ( ['assign_orientation']) <NEW_LINE> self.param_dict = ({ 'satellite_alignment_a1': satellite_alignment_a1, 'satellite_alignment_alpha1': satellite_alignment_alpha1}) | Parameters
==========
satellite_alignment_strength : float
parameter between [-1,1] that sets the alignment strength between perfect anti-alignment and perfect alignment
prim_gal_axis : string, optional
string indicating which galaxy principle axis is correlated with the halo alignment axis.
The options are: `major`, `intermediate`, and `minor`
Notes
=====
If the kwargs or table contain a key "satellite_alignment_strength", this will be used instead. | 625941b97c178a314d6ef2c6 |
def plugin_get_policy(self, context, policy): <NEW_LINE> <INDENT> cfgdb = self._get_user_cfgdb(context) <NEW_LINE> pol_info = cfgdb.policy_read(policy['id']) <NEW_LINE> return pol_info | Policy get request | 625941b9956e5f7376d70ce6 |
def apply_filters(self,request): <NEW_LINE> <INDENT> if 'date' in self.selected_fields_constraints: <NEW_LINE> <INDENT> self.details = self.client_details.filter( date_time__range = (self.start_date,self.end_date)) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.details = self.client_details.filter( mode_of_payment = self.mode_of_payment) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> month_start = str(self.year) + '-' + str(self.month)+ '-1' <NEW_LINE> month_end = str(self.year) + '-' + str(self.month)+ '-' + str(monthrange(int(self.year),int(self.month))[1]) <NEW_LINE> self.details = self.client_details.filter( date_time__range = (month_start, month_end)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> if 'gt' in request.GET.getlist('additional_constraints'): <NEW_LINE> <INDENT> gt_amount=request.GET['amount_greater_than'] <NEW_LINE> self.details = self.client_details.filter( bill__total_cost__gt = gt_amount) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> if request.GET['grand_total']: <NEW_LINE> <INDENT> return self.cal_grand_total(request) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> return self.view_register(request) | Applying selected filters. | 625941b926068e7796caeb46 |
def add_nsa(mastercat, nsa=None, matchtolerance=10*u.arcsec, removeduplicatepgcson='ABSMAG_r'): <NEW_LINE> <INDENT> from astropy.coordinates import SkyCoord <NEW_LINE> if nsa is None: <NEW_LINE> <INDENT> nsa = load_nsa() <NEW_LINE> <DEDENT> ral, decl = mastercat['al2000'], mastercat['de2000'] <NEW_LINE> lmsk = (~ral.mask) & (~decl.mask) <NEW_LINE> lcoo = SkyCoord(u.hour * ral[lmsk], u.degree * decl[lmsk], frame='icrs') <NEW_LINE> nsacoo = SkyCoord(u.degree * nsa['RA'], u.degree * nsa['DEC'], frame='icrs') <NEW_LINE> idx, dd, dd3d = nsacoo.match_to_catalog_sky(lcoo) <NEW_LINE> dmsk = dd < matchtolerance <NEW_LINE> matchpgc = np.zeros(len(idx), dtype=int) <NEW_LINE> matchpgc[dmsk] = mastercat['pgc'][lmsk][idx[dmsk]] <NEW_LINE> if removeduplicatepgcson: <NEW_LINE> <INDENT> if removeduplicatepgcson == 'closest': <NEW_LINE> <INDENT> dupval = dd <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dupval = nsa[removeduplicatepgcson] <NEW_LINE> <DEDENT> multipgcs = np.where(np.bincount(matchpgc) > 1)[0] <NEW_LINE> for i, n in enumerate(multipgcs): <NEW_LINE> <INDENT> matchmsk = matchpgc == n <NEW_LINE> idxs = np.where(matchmsk)[0] <NEW_LINE> bestidx = idxs[np.argmin(dupval[matchmsk])] <NEW_LINE> matchpgc[idxs] = 0 <NEW_LINE> matchpgc[bestidx] = n <NEW_LINE> <DEDENT> <DEDENT> matchpgc[matchpgc==0] -= 1 <NEW_LINE> if 'pgc' in nsa.colnames: <NEW_LINE> <INDENT> nsa['pgc'] = matchpgc <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nsa.add_column(table.Column(name='pgc', data=matchpgc)) <NEW_LINE> <DEDENT> if 'pgc_match_dist_asec' in nsa.colnames: <NEW_LINE> <INDENT> nsa['pgc_match_dist_asec'] = dd.to(u.arcsec) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nsa.add_column(table.Column(name='pgc_match_dist_asec', data=dd.to(u.arcsec))) <NEW_LINE> <DEDENT> return table.join(mastercat, nsa, keys=['pgc'], table_names=['leda', 'nsa'], join_type='outer') | Parameters
----------
mastercat
Output from `initial_catalog`
nsa
Output from `load_nsa`
matchtolerance : `Angle`
The distance out to look for matches when assigning PGC#s to NSA objects
removeduplicatepgcson : str or None
If not None, specifies what to use to remove multiple PGC #s: can be an
entry in the NSA catalog, in which case the smallest of those (brightest
mag) will be selected as the one object, or it can be 'closest' to just
pick the closest to the PGC coordinates. | 625941b9d164cc6175782bbb |
def create_client_from_settings(): <NEW_LINE> <INDENT> api_url = environ['JIRA_API_URL'] <NEW_LINE> username = environ['JIRA_USER'] <NEW_LINE> password = wf.get_password(username) <NEW_LINE> return ConcurrentJiraClient(api_url, username, password) | Get user credentials from the keychain. | 625941b9d268445f265b4ce2 |
def test_invalid_participant_get(self): <NEW_LINE> <INDENT> self.client.login(username=self.invalid_user.username, password=self.password) <NEW_LINE> resp = self.client.get(self.endpoint, content_type='application/json') <NEW_LINE> self.assertEqual(resp.status_code, 403) | Non thread-participants can't see the participants | 625941b90a366e3fb873e684 |
def iter_events(self, number=-1): <NEW_LINE> <INDENT> url = self._build_url('events') <NEW_LINE> return self._iter(int(number), url, Event) | Iterate over public events.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of :class:`Event <github3.events.Event>`\ s | 625941b99f2886367277a6fe |
def shopping(family, item_prices, item_weights): <NEW_LINE> <INDENT> f_hold = [] <NEW_LINE> f_win = 0 <NEW_LINE> for person in family: <NEW_LINE> <INDENT> hold = [] <NEW_LINE> p_hold = [] <NEW_LINE> for w in range(person + 1): <NEW_LINE> <INDENT> base = [0] <NEW_LINE> hold.append(base) <NEW_LINE> <DEDENT> for i in range(len(item_prices)): <NEW_LINE> <INDENT> for w in range(person + 1): <NEW_LINE> <INDENT> if item_weights[i] <= w: <NEW_LINE> <INDENT> if item_prices[i] + hold[w-item_weights[i]][i] > hold[w][i]: <NEW_LINE> <INDENT> hold[w].append(item_prices[i] + hold[w-item_weights[i]][i]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> hold[w].append(hold[w][i]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> hold[w].append(hold[w][i]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> f_win += hold[person][len(item_prices)] <NEW_LINE> p_win = hold[person][len(item_prices)] <NEW_LINE> for i in range(len(item_prices), 0, -1): <NEW_LINE> <INDENT> if p_win <= 0: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if p_win == hold[person][i - 1]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p_hold.append(i) <NEW_LINE> p_win = p_win - item_prices[i - 1] <NEW_LINE> person = person - item_weights[i - 1] <NEW_LINE> <DEDENT> <DEDENT> p_hold.sort() <NEW_LINE> f_hold.append(p_hold) <NEW_LINE> <DEDENT> f_hold.insert(0, f_win) <NEW_LINE> return f_hold | Function to determine shopping spree items for provided family and items | 625941b9627d3e7fe0d68cbb |
def moveDown(self, amount, collidable ): <NEW_LINE> <INDENT> if self.rect.bottom < self.area.height: <NEW_LINE> <INDENT> self.rect.top += amount <NEW_LINE> if self.rect.collidelist(collidable) >= 0: <NEW_LINE> <INDENT> self.rect.top -= amount | move down by the given amount, avoiding collisions with the given rects | 625941b94428ac0f6e5ba65f |
@daemon_method <NEW_LINE> def create_account(name): <NEW_LINE> <INDENT> return wallet['obj'].create_account(name) | RPC method to create an account
| 625941b98da39b475bd64de4 |
def page_links(browser): <NEW_LINE> <INDENT> url = browser.geturl() <NEW_LINE> parsed_url = urlparse(url) <NEW_LINE> url_domain = f"{parsed_url.scheme}://{parsed_url.netloc}" <NEW_LINE> links = set() <NEW_LINE> for link in browser.links(): <NEW_LINE> <INDENT> link = make_url_absolute(link.url, url_domain) <NEW_LINE> links.add(link) <NEW_LINE> <DEDENT> return links | Returns all links from given page. | 625941b930bbd722463cbc30 |
def removeWatch(self, id): <NEW_LINE> <INDENT> (nrId, key) = id.split('_') <NEW_LINE> if key in self.watches: <NEW_LINE> <INDENT> remove = lambda x: x[0] == id <NEW_LINE> self.watches[key][:] = [x for x in self.watches[key] if not remove(x)] <NEW_LINE> <DEDENT> log.error("can't remove watch - key does not exist, watchId:", id) | remove watch specified by the given watch id | 625941b915baa723493c3ddf |
def n_check_glow_property(n_tex_prop): <NEW_LINE> <INDENT> nose.tools.assert_is_instance(n_tex_prop, NifFormat.NiTexturingProperty) <NEW_LINE> nose.tools.assert_equal(n_tex_prop.has_glow_texture, True) | Checks the glow settings for the NiTextureProperty | 625941b9627d3e7fe0d68cbc |
def hot(request): <NEW_LINE> <INDENT> context = {} <NEW_LINE> context['themes'] = getMonthHotTheme() <NEW_LINE> return render(request, 'theme/hot.html', context) | 精华主题 | 625941b999fddb7c1c9de200 |
def LollipopGraph(n1, n2): <NEW_LINE> <INDENT> if n1 < 0: <NEW_LINE> <INDENT> raise ValueError("invalid graph description, n1 should be >= 0") <NEW_LINE> <DEDENT> if n2 < 0: <NEW_LINE> <INDENT> raise ValueError("invalid graph description, n2 should be >= 0") <NEW_LINE> <DEDENT> pos_dict = {} <NEW_LINE> for i in range(n1): <NEW_LINE> <INDENT> x = float(cos((pi/4) - ((2*pi)/n1)*i) - n2/2 - 1) <NEW_LINE> y = float(sin((pi/4) - ((2*pi)/n1)*i) - n2/2 - 1) <NEW_LINE> j = n1-1-i <NEW_LINE> pos_dict[j] = (x,y) <NEW_LINE> <DEDENT> for i in range(n1, n1+n2): <NEW_LINE> <INDENT> x = float(i - n1 - n2/2 + 1) <NEW_LINE> y = float(i - n1 - n2/2 + 1) <NEW_LINE> pos_dict[i] = (x,y) <NEW_LINE> <DEDENT> G = Graph(pos=pos_dict, name="Lollipop graph") <NEW_LINE> G.add_edges(((i, j) for i in range(n1) for j in range(i + 1, n1))) <NEW_LINE> G.add_path(list(range(n1, n1 + n2))) <NEW_LINE> if n1 * n2 > 0: <NEW_LINE> <INDENT> G.add_edge(n1 - 1, n1) <NEW_LINE> <DEDENT> return G | Returns a lollipop graph with n1+n2 nodes.
A lollipop graph is a path graph (order n2) connected to a complete
graph (order n1). (A barbell graph minus one of the bells).
PLOTTING: Upon construction, the position dictionary is filled to
override the spring-layout algorithm. By convention, the complete
graph will be drawn in the lower-left corner with the (n1)th node
at a 45 degree angle above the right horizontal center of the
complete graph, leading directly into the path graph.
EXAMPLES:
Construct and show a lollipop graph Candy = 13, Stick = 4::
sage: g = graphs.LollipopGraph(13,4); g
Lollipop graph: Graph on 17 vertices
sage: g.show() # long time
TESTS:
sage: n1, n2 = randint(3, 10), randint(0, 10)
sage: g = graphs.LollipopGraph(n1, n2)
sage: g.num_verts() == n1 + n2
True
sage: g.num_edges() == binomial(n1, 2) + n2
True
sage: g.is_connected()
True
sage: g.girth() == 3
True
sage: graphs.LollipopGraph(n1, 0).is_isomorphic(graphs.CompleteGraph(n1))
True
sage: graphs.LollipopGraph(0, n2).is_isomorphic(graphs.PathGraph(n2))
True
sage: graphs.LollipopGraph(0, 0).is_isomorphic(graphs.EmptyGraph())
True
The input ``n1`` must be `\geq 0`::
sage: graphs.LollipopGraph(-1, randint(0, 10^6))
Traceback (most recent call last):
...
ValueError: invalid graph description, n1 should be >= 0
The input ``n2`` must be `\geq 0`::
sage: graphs.LollipopGraph(randint(2, 10^6), -1)
Traceback (most recent call last):
...
ValueError: invalid graph description, n2 should be >= 0 | 625941b9e8904600ed9f1d96 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.