code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def p_names(self, p): <NEW_LINE> <INDENT> if len(p) == 2: <NEW_LINE> <INDENT> p[0] = [p[1]] <NEW_LINE> return <NEW_LINE> <DEDENT> assert isinstance(p[1], list) <NEW_LINE> for n in p[1]: <NEW_LINE> <INDENT> if n == p[3]: <NEW_LINE> <INDENT> logging.error('[%s:%d] Duplicated state value: %s', self._filename, p.lineno(3), p[3]) <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> p[1].append(p[3]) <NEW_LINE> p[0] = p[1]
names : names ',' NAME | NAME
625941b85fc7496912cc37e0
def test_give_default_raise(self): <NEW_LINE> <INDENT> self.assertEqual(self.my_raise.give_raise(),40000)
проверка стандартной прибавки к окладу
625941b81f037a2d8b946059
def __init__(self, pmf, player, name=''): <NEW_LINE> <INDENT> thinkbayes.Suite.__init__(self, pmf, name=name) <NEW_LINE> self.player = player
Constructs the suite. pmf: prior distribution of price player: Player object name: string
625941b8b830903b967e9771
def post(self, request, task_id): <NEW_LINE> <INDENT> self._validate_teacher_user(request) <NEW_LINE> task = get_object_or_404(Task, id=task_id) <NEW_LINE> responses = task.responses <NEW_LINE> scores = request.POST.getlist('inputs[]') <NEW_LINE> for i in range(len(responses)): <NEW_LINE> <INDENT> response = responses[i] <NEW_LINE> response.score = scores[i] <NEW_LINE> response.save() <NEW_LINE> <DEDENT> return HttpResponse(responses)
Save changes to task scores.
625941b891f36d47f21ac351
def test_change_deposit_schema_fails(app, draft_deposits): <NEW_LINE> <INDENT> with app.app_context(): <NEW_LINE> <INDENT> deposit = Deposit.get_record(draft_deposits[0].deposit_id) <NEW_LINE> del deposit['$schema'] <NEW_LINE> with pytest.raises(AlteredRecordError): <NEW_LINE> <INDENT> deposit.commit()
Test updating the $schema field fails.
625941b8cc40096d615957ae
def create(self, subsection, block_structure=None, read_only=False): <NEW_LINE> <INDENT> self._log_event( log.info, u"create, read_only: {0}, subsection: {1}".format(read_only, subsection.location) ) <NEW_LINE> block_structure = self._get_block_structure(block_structure) <NEW_LINE> subsection_grade = self._get_saved_grade(subsection, block_structure) <NEW_LINE> if not subsection_grade: <NEW_LINE> <INDENT> subsection_grade = SubsectionGrade(subsection, self.course) <NEW_LINE> subsection_grade.init_from_structure( self.student, block_structure, self._submissions_scores, self._csm_scores, ) <NEW_LINE> if PersistentGradesEnabledFlag.feature_enabled(self.course.id): <NEW_LINE> <INDENT> if read_only: <NEW_LINE> <INDENT> self._unsaved_subsection_grades.append(subsection_grade) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> with persistence_safe_fallback(): <NEW_LINE> <INDENT> grade_model = subsection_grade.create_model(self.student) <NEW_LINE> self._update_saved_subsection_grade(subsection.location, grade_model) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return subsection_grade
Returns the SubsectionGrade object for the student and subsection. If block_structure is provided, uses it for finding and computing the grade instead of the course_structure passed in earlier. If read_only is True, doesn't save any updates to the grades.
625941b87d43ff24873a2afe
def culaDeviceDgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt): <NEW_LINE> <INDENT> status = _libcula.culaDeviceDgesvd(jobu, jobvt, m, n, int(a), lda, int(s), int(u), ldu, int(vt), ldvt) <NEW_LINE> culaCheckStatus(status)
SVD decomposition.
625941b821a7993f00bc7b44
def draw_geff(t, title, h_name, h_bins, to_draw, denom_cut, extra_num_cut, opt = "", color = kBlue, marker_st = 1, marker_sz = 1.): <NEW_LINE> <INDENT> t.Draw(to_draw + ">>num_" + h_name + h_bins, TCut("%s && %s" %(denom_cut.GetTitle(), extra_num_cut.GetTitle())), "goff") <NEW_LINE> num = TH1F(gDirectory.Get("num_" + h_name).Clone("eff_" + h_name)) <NEW_LINE> t.Draw(to_draw + ">>denom_" + h_name + h_bins, denom_cut, "goff") <NEW_LINE> den = TH1F(gDirectory.Get("denom_" + h_name).Clone("denom_" + h_name)) <NEW_LINE> eff = TGraphAsymmErrors(num, den) <NEW_LINE> if not "same" in opt: <NEW_LINE> <INDENT> num.Reset() <NEW_LINE> num.GetYaxis().SetRangeUser(0.,1.05) <NEW_LINE> num.SetStats(0) <NEW_LINE> num.SetTitle(title) <NEW_LINE> num.Draw() <NEW_LINE> <DEDENT> eff.SetLineWidth(2) <NEW_LINE> eff.SetLineColor(color) <NEW_LINE> eff.Draw(opt + " same") <NEW_LINE> eff.SetMarkerStyle(marker_st) <NEW_LINE> eff.SetMarkerColor(color) <NEW_LINE> eff.SetMarkerSize(marker_sz) <NEW_LINE> return eff
Make an efficiency plot
625941b8187af65679ca4f78
def get_scrambled(self, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.__get_scrambled_with_http_info(**kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.__get_scrambled_with_http_info(**kwargs) <NEW_LINE> return data
get_scrambled # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_scrambled(async_req=True) >>> result = thread.get() :param async_req: bool :param str value: :return: str If the method is called asynchronously, returns the request thread.
625941b8cdde0d52a9e52e8a
def setup_logging( default_path='logger_config.yaml', default_level=logging.INFO, env_key='LOG_CFG' ): <NEW_LINE> <INDENT> path = default_path <NEW_LINE> value = os.getenv(env_key, None) <NEW_LINE> if value: <NEW_LINE> <INDENT> path = value <NEW_LINE> <DEDENT> if os.path.exists(path): <NEW_LINE> <INDENT> with open(path, 'rt') as f: <NEW_LINE> <INDENT> config = yaml.safe_load(f.read()) <NEW_LINE> <DEDENT> logging.config.dictConfig(config) <NEW_LINE> coloredlogs.install() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logging.basicConfig(level=default_level)
Setup logging configuration
625941b89f2886367277a6ec
def __init__(self, seller_id='00-00000'): <NEW_LINE> <INDENT> self.id = seller_id + hex(timegm(datetime.datetime.now().utctimetuple()))[3:]
Generate a unique id with hexadecimal timestamp
625941b8d7e4931a7ee9dd76
def resetError(self): <NEW_LINE> <INDENT> self.mError = false <NEW_LINE> return
Clear the previous error.
625941b891af0d3eaac9b86f
def findPlant(self, name): <NEW_LINE> <INDENT> if type(name) is str: <NEW_LINE> <INDENT> if name in self._plants: <NEW_LINE> <INDENT> return self._plants[name] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> else : <NEW_LINE> <INDENT> raise TypeError("Param 'name' given is not of type str")
Find the plant in the dictionary having the given name return None if there is no plant with this name in the dictionary
625941b8a934411ee37514f6
def test_spell_activity_location_id_field(self): <NEW_LINE> <INDENT> field = self.spell_activity_record.find('field[@name=\'location_id\']') <NEW_LINE> self.assertEqual(field.attrib['ref'], 'nhc_def_conf_location_wa_b1', 'Incorrect location id on activity')
Make sure the location id field for the spell activity is correct
625941b826068e7796caeb33
def create(self, context, metadata, data=None): <NEW_LINE> <INDENT> image_id = str(metadata.get('id', uuid.uuid4())) <NEW_LINE> metadata['id'] = image_id <NEW_LINE> if image_id in self.images: <NEW_LINE> <INDENT> raise exception.Duplicate() <NEW_LINE> <DEDENT> self.images[image_id] = copy.deepcopy(metadata) <NEW_LINE> if data: <NEW_LINE> <INDENT> self._imagedata[image_id] = data.read() <NEW_LINE> <DEDENT> return self.images[image_id]
Store the image data and return the new image id. :raises: Duplicate if the image already exist.
625941b87c178a314d6ef2b4
def SCP(self, req: "_UPS", context: "PresentationContext") -> None: <NEW_LINE> <INDENT> if isinstance(req, N_CREATE): <NEW_LINE> <INDENT> self._n_create_scp(req, context) <NEW_LINE> <DEDENT> elif isinstance(req, N_EVENT_REPORT): <NEW_LINE> <INDENT> self._n_event_report_scp(req, context) <NEW_LINE> <DEDENT> elif isinstance(req, N_GET): <NEW_LINE> <INDENT> self._n_get_scp(req, context) <NEW_LINE> <DEDENT> elif isinstance(req, N_SET): <NEW_LINE> <INDENT> self._n_set_scp(req, context) <NEW_LINE> <DEDENT> elif isinstance(req, N_ACTION): <NEW_LINE> <INDENT> self._n_action_scp(req, context) <NEW_LINE> <DEDENT> elif isinstance(req, C_FIND): <NEW_LINE> <INDENT> self._c_find_scp(req, context) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError( f"Invalid DIMSE primitive '{req.__class__.__name__}' used " f"with Unified Procedure Step" )
The SCP implementation for Unified Procedure Step Service Class. Parameters ---------- req : dimse_primitives.N_CREATE or C_FIND or N_SET or N_GET or N_EVENT_REPORT or N_ACTION The N-CREATE, C-FIND, N-SET, N-GET, N-ACTION or N-EVENT-REPORT request primitive sent by the peer. context : presentation.PresentationContext The presentation context that the service is operating under.
625941b8a05bb46b383ec688
def get_filelist(self, input_params, settings): <NEW_LINE> <INDENT> target_date = input_params['toi'] <NEW_LINE> access_path1 = settings['dataset.'+input_params['dataset']] <NEW_LINE> pos1 = str.index(access_path1, '://') <NEW_LINE> access_path = access_path1[pos1+3:] <NEW_LINE> base_flist = [] <NEW_LINE> base_mask_flist = [] <NEW_LINE> gfp_flist = [] <NEW_LINE> gfpmask_flist = [] <NEW_LINE> base_fname_syntax = 'SPOT4_*' + target_date + '*_PENTE_*.TIF' <NEW_LINE> gfp_fname_syntax = 'SPOT4_*_PENTE_*.TIF' <NEW_LINE> base_flist = base_flist+sorted(findfile(access_path, base_fname_syntax)) <NEW_LINE> base_mask_flist = self.get_maskname(base_flist) <NEW_LINE> gfp_flist = gfp_flist+sorted(findfile(access_path, gfp_fname_syntax)) <NEW_LINE> gfp_flist = [item for item in gfp_flist if not item in base_flist] <NEW_LINE> gfpmask_flist = self.get_maskname(gfp_flist) <NEW_LINE> return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
gets the listing of filenames of available: Base files, GFP files and Mask files
625941b8bf627c535bc13031
@api_view(['POST']) <NEW_LINE> @permission_classes([IsAuthenticated]) <NEW_LINE> def tweet_create_view(request, *args, **kwargs): <NEW_LINE> <INDENT> serializer = TweetCreateSerializer(data=request.data) <NEW_LINE> if serializer.is_valid(raise_exception=True): <NEW_LINE> <INDENT> serializer.save(user=request.user) <NEW_LINE> return Response(serializer.data, status=201) <NEW_LINE> <DEDENT> return Response({}, status=403)
DRF creates tweet returns tweet created
625941b810dbd63aa1bd2a0a
def mageck_printdict(dict0,args,sgdict,sampledict,sampleids): <NEW_LINE> <INDENT> dfmt="{:.5g}" <NEW_LINE> ofile=open(args.output_prefix+'.normalized.txt','w') <NEW_LINE> mapres_list=['']*len(sampledict) <NEW_LINE> for (k,v) in sampledict.items(): <NEW_LINE> <INDENT> mapres_list[v]=k <NEW_LINE> <DEDENT> if len(sampledict)>0: <NEW_LINE> <INDENT> cntheader=[mapres_list[x] for x in sampleids] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cntheader=None <NEW_LINE> <DEDENT> logging.info('Writing normalized read counts to '+args.output_prefix+'.normalized.txt') <NEW_LINE> if cntheader !=None: <NEW_LINE> <INDENT> print('sgRNA\tGene\t'+'\t'.join(cntheader),file=ofile) <NEW_LINE> <DEDENT> if len(sgdict)==0: <NEW_LINE> <INDENT> for (k,v) in dict0.items(): <NEW_LINE> <INDENT> print(k+'\t'+'None'+'\t'+'\t'.join([str(x) for x in v]),file=ofile) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for (k,v) in dict0.items(): <NEW_LINE> <INDENT> if k not in sgdict: <NEW_LINE> <INDENT> logging.warning(k+' not in the sgRNA list') <NEW_LINE> continue <NEW_LINE> <DEDENT> print('\t'.join([k,sgdict[k]])+'\t'+'\t'.join([str(x) for x in v]),file=ofile) <NEW_LINE> <DEDENT> <DEDENT> ofile.close()
Write the normalized read counts to file Parameters ---------- dict0 : dict a {sgRNA: [read counts]} structure args : class a argparse class sgdict: dict a {sgrna:gene} dictionary sampledict: dict a {sample name: index} dict sampleids: list a list of sample index. Should include control+treatment
625941b8baa26c4b54cb0f7e
def lookup(self, symbol): <NEW_LINE> <INDENT> return self.symbols.get(symbol)
Look up the srcloc for a name in the symbol table.
625941b87047854f462a1268
def _dereification_agenda(g, co_map): <NEW_LINE> <INDENT> agenda = {} <NEW_LINE> variables = g.variables() <NEW_LINE> fixed = {tgt for _, _, tgt in g.edges()}.union([g.top]) <NEW_LINE> for triple in g.triples(relation='instance'): <NEW_LINE> <INDENT> if triple.source not in fixed and triple.target in co_map: <NEW_LINE> <INDENT> rels = {t.relation: t for t in g.triples(source=triple.source) if t.relation != 'instance'} <NEW_LINE> used = set() <NEW_LINE> agendum = [] <NEW_LINE> incoming_triple = None <NEW_LINE> for role, src_role, tgt_role in co_map[triple.target]: <NEW_LINE> <INDENT> if not (src_role in rels and tgt_role in rels): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> src = rels[src_role] <NEW_LINE> tgt = rels[tgt_role] <NEW_LINE> if (src_role in used and tgt_role in used): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif src.target not in variables: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> agendum.append(penman.Triple(src.target, role, tgt.target, inverted=tgt.inverted)) <NEW_LINE> used.add(src_role) <NEW_LINE> used.add(tgt_role) <NEW_LINE> if src.inverted: <NEW_LINE> <INDENT> incoming_triple = src <NEW_LINE> <DEDENT> elif tgt.inverted: <NEW_LINE> <INDENT> incoming_triple = tgt <NEW_LINE> <DEDENT> <DEDENT> if used == set(rels): <NEW_LINE> <INDENT> assert incoming_triple is not None <NEW_LINE> agenda[triple.source] = (incoming_triple, agendum) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return agenda
Find eligible dereifications and return the replacements.
625941b85e10d32532c5ed8a
def get_due(self, index): <NEW_LINE> <INDENT> return self.__getitem__(index)['due']
Return due of task with defined index.
625941b885dfad0860c3acb4
def factorial(n): <NEW_LINE> <INDENT> if n == 1: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return n * factorial(n-1)
Computes the factorial using recursion
625941b8d486a94d0b98dfa8
def delete_list(self, filename_list): <NEW_LINE> <INDENT> remote_list = self.list() <NEW_LINE> for filename in filename_list[:]: <NEW_LINE> <INDENT> c = re.compile(r'%s(?:\.vol[\d+]*)?\.par2' % filename) <NEW_LINE> for remote_filename in remote_list: <NEW_LINE> <INDENT> if c.match(remote_filename): <NEW_LINE> <INDENT> filename_list.append(remote_filename) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return self.wrapped_backend._delete_list(filename_list)
delete given filename_list and all .par2 files that belong to them
625941b84428ac0f6e5ba64d
def test_get_tag(self): <NEW_LINE> <INDENT> expected = self.tags <NEW_LINE> actual = self.post.tags <NEW_LINE> self.assertEqual(expected, actual)
Get list of tags from Blog Post
625941b8a17c0f6771cbdeaf
def record_statistic(self, event, record): <NEW_LINE> <INDENT> if event not in self.statistics: <NEW_LINE> <INDENT> self.statistics[event] = [record] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.statistics[event].append(record)
Record a stat.
625941b830bbd722463cbc1e
def create_crawl_request(session: Session, crawl_uuid: UUID, request: Request): <NEW_LINE> <INDENT> upsert_url(session, request.url) <NEW_LINE> crawl_request = CrawlRequest( crawl_uuid=crawl_uuid, url_uuid=request.url.url_uuid, requested=datetime.utcnow().replace(tzinfo=timezone.utc), got_response=False, ) <NEW_LINE> session.add(crawl_request)
Record a request that was made
625941b876d4e153a657e98b
def sum_counts_by_consensus(otu_table, level, missing_name='Other'): <NEW_LINE> <INDENT> result = {} <NEW_LINE> sample_map = dict([(s,i) for i,s in enumerate(otu_table[0])]) <NEW_LINE> for counts, consensus in zip(otu_table[2], otu_table[3]): <NEW_LINE> <INDENT> n_ranks = len(consensus) <NEW_LINE> if n_ranks > level: <NEW_LINE> <INDENT> consensus = consensus[:level] <NEW_LINE> <DEDENT> elif n_ranks < level: <NEW_LINE> <INDENT> consensus.extend([missing_name for i in range(level - n_ranks)]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> consensus = tuple(consensus) <NEW_LINE> if consensus in result: <NEW_LINE> <INDENT> result[consensus] += counts <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[consensus] = counts.copy() <NEW_LINE> <DEDENT> <DEDENT> return result, sample_map
Returns a dict keyed by consensus, valued by otu counts otu counts are summed together if they have the same consensus if the consensus string doesn't reach to level, missing_name is appended on until the taxonomy string is of length level
625941b856ac1b37e626403b
def log_values(args): <NEW_LINE> <INDENT> args = args.__dict__ <NEW_LINE> for section, name in zip(SECTIONS, NICE_NAMES): <NEW_LINE> <INDENT> entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section])) <NEW_LINE> if entries: <NEW_LINE> <INDENT> LOG.debug(name) <NEW_LINE> for entry in entries: <NEW_LINE> <INDENT> value = args[entry] if args[entry] is not None else "-" <NEW_LINE> LOG.debug(" {:<16} {}".format(entry, value))
Log all values set in the args namespace. Arguments are grouped according to their section and logged alphabetically using the DEBUG log level thus --verbose is required.
625941b8925a0f43d2549ccf
def mocked_requests_post(url, data, *args, **kwargs): <NEW_LINE> <INDENT> class MockResponse: <NEW_LINE> <INDENT> def __init__(self, json_data, status_code): <NEW_LINE> <INDENT> self.json_data = json_data <NEW_LINE> self.status_code = status_code <NEW_LINE> <DEDENT> def json(self): <NEW_LINE> <INDENT> return self.json_data <NEW_LINE> <DEDENT> <DEDENT> if "token" in data and data["token"] and data["token"] != "12345678900": <NEW_LINE> <INDENT> return MockResponse({ "active": True, "scope": "read write dolphin", "client_id": "client_id_{}".format(data["token"]), "username": "{}_user".format(data["token"]), "exp": int(calendar.timegm(exp.timetuple())), }, 200) <NEW_LINE> <DEDENT> return MockResponse({ "active": False, }, 200)
Mock the response from the authentication server
625941b83c8af77a43ae35fa
def create_analysis_result( self, experiment_id: str, result_data: Dict, result_type: str, device_components: Optional[Union[List[Union[str, DeviceComponent]], str, DeviceComponent]] = None, tags: Optional[List[str]] = None, quality: Union[ResultQuality, str] = ResultQuality.UNKNOWN, verified: bool = False, result_id: Optional[str] = None, chisq: Optional[float] = None, json_encoder: Type[json.JSONEncoder] = json.JSONEncoder, **kwargs: Any, ) -> str: <NEW_LINE> <INDENT> if kwargs: <NEW_LINE> <INDENT> logger.info("Keywords %s are not supported by IBM Quantum experiment service " "and will be ignored.", kwargs.keys()) <NEW_LINE> <DEDENT> components = [] <NEW_LINE> if device_components: <NEW_LINE> <INDENT> if not isinstance(device_components, list): <NEW_LINE> <INDENT> device_components = [device_components] <NEW_LINE> <DEDENT> for comp in device_components: <NEW_LINE> <INDENT> components.append(str(comp)) <NEW_LINE> <DEDENT> <DEDENT> if isinstance(quality, str): <NEW_LINE> <INDENT> quality = ResultQuality(quality.upper()) <NEW_LINE> <DEDENT> request = self._analysis_result_to_api( experiment_id=experiment_id, device_components=components, data=result_data, result_type=result_type, tags=tags, quality=quality, verified=verified, result_id=result_id, chisq=chisq ) <NEW_LINE> with map_api_error(f"Analysis result {result_id} already exists."): <NEW_LINE> <INDENT> response = self._api_client.analysis_result_upload( json.dumps(request, cls=json_encoder)) <NEW_LINE> <DEDENT> return response['uuid']
Create a new analysis result in the database. Args: experiment_id: ID of the experiment this result is for. result_data: Result data to be stored. result_type: Analysis result type. device_components: Target device components, such as qubits. tags: Tags to be associated with the analysis result. quality: Quality of this analysis. verified: Whether the result quality has been verified. result_id: Analysis result ID. It must be in the ``uuid4`` format. One will be generated if not supplied. chisq: chi^2 decimal value of the fit. json_encoder: Custom JSON encoder to use to encode the analysis result. kwargs: Additional analysis result attributes that are not supported and will be ignored. Returns: Analysis result ID. Raises: IBMExperimentEntryExists: If the analysis result already exits. IBMQApiError: If the request to the server failed.
625941b87cff6e4e811177e1
def invalidate_memcache(self): <NEW_LINE> <INDENT> current_window = xbmcgui.getCurrentWindowId() <NEW_LINE> window = xbmcgui.Window(current_window) <NEW_LINE> try: <NEW_LINE> <INDENT> window.setProperty('memcache', pickle.dumps({}, protocol=0).decode('latin-1')) <NEW_LINE> <DEDENT> except EOFError: <NEW_LINE> <INDENT> self.nx_common.log(msg='invalidate_memcache failed') <NEW_LINE> pass
Invalidates the memory cache
625941b8097d151d1a222cb7
def _repr_(self): <NEW_LINE> <INDENT> vals = self.augmentation_chain() <NEW_LINE> vals.reverse() <NEW_LINE> vals = [ "v(%s) = %s"%(v._phi, v._mu) if isinstance(v, AugmentedValuation_base) else str(v) for v in vals ] <NEW_LINE> return "[ %s ]"%", ".join(vals)
Return a printable representation of this valuation. EXAMPLES:: sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone sage: R.<u> = Qq(4, 5) sage: S.<x> = R[] sage: v = GaussValuation(S) sage: w = v.augmentation(x^2 + x + u, 1/2) sage: w # indirect doctest [ Gauss valuation induced by 2-adic valuation, v((1 + O(2^5))*x^2 + (1 + O(2^5))*x + u + O(2^5)) = 1/2 ]
625941b85166f23b2e1a4fb5
def collectBlockdev(): <NEW_LINE> <INDENT> data = blockdev() <NEW_LINE> devices = list() <NEW_LINE> try: <NEW_LINE> <INDENT> devlist = DiskUtil.getDevices() <NEW_LINE> cmd = "mount | awk '{if( $3==\"/\" ) print $1}' |" " sed 's/\/dev\///' | sed 's/[0-9]//'" <NEW_LINE> (status, output) = subprocess.getstatusoutput(cmd) <NEW_LINE> if (status != 0): <NEW_LINE> <INDENT> g_logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + " Error: \n%s" % output) <NEW_LINE> <DEDENT> for dev in devlist: <NEW_LINE> <INDENT> if (dev.strip() == output.strip()): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> devices.append("/dev/%s" % dev) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> data.errormsg = e.__str__() <NEW_LINE> <DEDENT> for d in devices: <NEW_LINE> <INDENT> p = subprocess.Popen(["/sbin/blockdev", "--getra", "%s" % d], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) <NEW_LINE> result = p.communicate() <NEW_LINE> data.errormsg += result[1].decode().strip() <NEW_LINE> if p.returncode: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> data.ra[d] = result[0].decode().strip() <NEW_LINE> <DEDENT> return data
function : Collector blockdev input : NA output : Instantion
625941b863f4b57ef0000f7e
def test_worker_api(self): <NEW_LINE> <INDENT> worker_name = "__abipy_worker_unittest__" <NEW_LINE> worker_dir = os.path.join(ABIPY_DIRPATH, "worker_" + worker_name) <NEW_LINE> if os.path.isdir(worker_dir): <NEW_LINE> <INDENT> remove(worker_dir) <NEW_LINE> <DEDENT> worker = AbipyWorker.new_with_name(worker_name=worker_name, scratch_dir="/tmp", scheduler_path=None, manager_path=None, mng_connector=None, verbose=1) <NEW_LINE> assert repr(worker) <NEW_LINE> assert str(worker) <NEW_LINE> assert isinstance(worker.manager, TaskManager) <NEW_LINE> filepath = worker.write_state_file(status="dead", filepath=None) <NEW_LINE> state = WorkerState.from_json_file(filepath) <NEW_LINE> assert state.name == worker_name <NEW_LINE> assert state.status == "dead" <NEW_LINE> clients = WorkerClients.lscan(dirpath=None) <NEW_LINE> assert len(clients) > 0 <NEW_LINE> assert repr(clients) <NEW_LINE> assert str(clients) <NEW_LINE> assert all(isinstance(c, WorkerClient) for c in clients) <NEW_LINE> clients.print_dataframe() <NEW_LINE> assert clients.get_dataframe() is not None <NEW_LINE> d = clients.as_dict() <NEW_LINE> same_clients = WorkerClients.from_dict(d) <NEW_LINE> assert type(clients) is type(same_clients) <NEW_LINE> print("same_clients\n", same_clients) <NEW_LINE> c = clients.select_from_worker_name(worker_name) <NEW_LINE> assert repr(c) <NEW_LINE> assert str(c) <NEW_LINE> assert c.worker_state.name == worker_name <NEW_LINE> assert c.is_local_worker and not c.is_remote_worker <NEW_LINE> d = c.as_dict() <NEW_LINE> same_c = WorkerClient.from_dict(d) <NEW_LINE> assert same_c.worker_state.name == c.worker_state.name <NEW_LINE> if os.path.isdir(worker_dir): <NEW_LINE> <INDENT> remove(worker_dir) <NEW_LINE> <DEDENT> clients = WorkerClients.lscan(dirpath=None) <NEW_LINE> with self.assertRaises(ValueError): <NEW_LINE> <INDENT> clients.select_from_worker_name(worker_name)
Testing AbipyWorker.
625941b8fbf16365ca6f6018
def AddProjectName(self, items): <NEW_LINE> <INDENT> self.projects[int(items[0])] = items[1]
Add a project name to the list of datasets.
625941b8377c676e91272006
def ChkUnicomBarcode(code): <NEW_LINE> <INDENT> code = (code).strip() <NEW_LINE> if code.startswith('123706-') and len(code) == 15: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif len(code)==8: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
验证是否联通公司资产编号 :rtype : Boolean :param code:条形码
625941b830c21e258bdfa2f9
def update_known_hosts( self, node_labels=None, to_add=None, to_add_hosts=None, communication_list=None, name="http.host.update_known_hosts", ): <NEW_LINE> <INDENT> if to_add_hosts and to_add: <NEW_LINE> <INDENT> raise AssertionError( "Cannot specify both 'to_add_hosts' and 'to_add'" ) <NEW_LINE> <DEDENT> if to_add_hosts: <NEW_LINE> <INDENT> to_add = { name: { "dest_list": [ {"addr": name, "port": settings.pcsd_default_port} ] } for name in to_add_hosts } <NEW_LINE> <DEDENT> add_with_token = { name: dict(data, token=None) for name, data in to_add.items() } <NEW_LINE> place_multinode_call( self.__calls, name, node_labels, communication_list, action="remote/known_hosts_change", param_list=[ ( "data_json", json.dumps( dict( known_hosts_add=add_with_token, known_hosts_remove={}, ) ), ) ], )
Create a call for updating known hosts on the hosts. node_labels list -- create success responses from these nodes dict to_add -- records to add: {host_name: {dest_list: [{"addr": , "port": ,}]}} list to_add_hosts -- constructs to_add from host names communication_list list -- create custom responses name string -- the key of this call
625941b831939e2706e4cccc
def toRawData(self, data): <NEW_LINE> <INDENT> pass
If a subclass needs to modify data before it is written to the cache on disk, do it here
625941b8e5267d203edcdafd
def pop_back(self): <NEW_LINE> <INDENT> returnnode = self._tail <NEW_LINE> if self._tail == self._head: <NEW_LINE> <INDENT> self._head, self._tail = None,None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> node = self._head <NEW_LINE> while node.next != self._tail: <NEW_LINE> <INDENT> node = node.next <NEW_LINE> <DEDENT> node.next = None <NEW_LINE> self._tail = node <NEW_LINE> <DEDENT> self._size -=1 <NEW_LINE> return returnnode.data
Removes the last node of the linked list and returns it
625941b8b7558d58953c4d77
def iterate_over(self, path: PathStr, ks001: KS001Str, data_type: DataTypeStr) -> Iterable[Any]: <NEW_LINE> <INDENT> yield from self.get_manager_of(data_type).iterate_over(self, path, ks001, data_type)
Open the resource specified and than perform an iteration of such resource. The semantic of the implementation depends on the resource type loaded :param path: the path of the resource to open :param ks001: :param data_type: :return:
625941b84d74a7450ccd401e
def test_pubkeyhash(): <NEW_LINE> <INDENT> txn = make_synthetic_transaction(5) <NEW_LINE> indices = randomize_list(txn['vout']) <NEW_LINE> for ctr in list(range(len(indices))): <NEW_LINE> <INDENT> val = txn['vout'][indices[ctr]]['ScriptPubKey'][2] <NEW_LINE> val = rcrypt.make_RIPEMD160_hash(rcrypt.make_SHA256_hash(val)) <NEW_LINE> assert rcrypt.validate_RIPEMD160_hash(val) == True
test a public key hash in scriptpubkey for a valid RIPEMD-160 format
625941b84d74a7450ccd401f
def spawnCubes(numCols, numRows, numLayers, center=(0, 0, 0)): <NEW_LINE> <INDENT> tID_cube = addTexturedCubeTemplates(numCols, numRows, numLayers) <NEW_LINE> client = pyazrael.AzraelClient() <NEW_LINE> cube_size = 2 + 0.1 <NEW_LINE> positions = np.array(list(np.ndindex(numCols, numRows, numLayers))) <NEW_LINE> positions = positions - np.mean(positions, axis=0) <NEW_LINE> positions = positions * cube_size + center <NEW_LINE> t0 = time.time() <NEW_LINE> allObjs = [ {'templateID': tID_cube[idx], 'rbs': {'position': pos.tolist()}} for idx, pos in enumerate(positions) ] <NEW_LINE> print('Spawning {} objects: '.format(len(allObjs)), end='', flush=True) <NEW_LINE> ret = client.spawn(allObjs) <NEW_LINE> if not ret.ok: <NEW_LINE> <INDENT> print('** Error:') <NEW_LINE> print(ret) <NEW_LINE> assert False <NEW_LINE> <DEDENT> print(' {:.1f}s'.format(time.time() - t0)) <NEW_LINE> for objID in ret.data: <NEW_LINE> <INDENT> cmd = {objID: {'frag_2': {'op': 'mod', 'scale': 0}}} <NEW_LINE> assert client.setFragments(cmd).ok <NEW_LINE> assert client.setObjectTags({objID: 'asteroid'}).ok
Spawn multiple cubes in a regular grid. The number of cubes equals ``numCols`` * ``numRows`` * ``numLayers``. The center of this "prism" is at ``center``. Every cube has two boosters and two factories. The factories can themselves spawn more (purely passive) cubes.
625941b87b180e01f3dc4661
def _wrapJs(context, jsobj, var_name, val): <NEW_LINE> <INDENT> from javascript import JSContext, JSFunction <NEW_LINE> assert (isinstance(context, JSContext)) <NEW_LINE> if jsobj.IsFunction(context) and not isinstance(jsobj, JSFunction): <NEW_LINE> <INDENT> wrapped = JSFunction(context, obj=jsobj._object(), thisobj=NULL, name=var_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> wrapped = jsobj <NEW_LINE> <DEDENT> return wrapped
Wrap a provided js object as a python object, with a given js name. Can set a flag if js object is callable to make it callable in python too. This is to be used only internally
625941b83346ee7daa2b2bc5
def wait_for_fixation_end(self): <NEW_LINE> <INDENT> pass
desc: | Returns time and gaze position when a fixation has ended; function assumes that a 'fixation' has ended when a deviation of more than self.pxfixtresh from the initial fixation position has been detected (self.pxfixtresh is created in self.calibration, based on self.fixtresh, a property defined in self.__init__). Detection based on Dalmaijer et al. (2013) if EVENTDETECTION is set to 'pygaze', or using native detection functions if EVENTDETECTION is set to 'native' (NOTE: not every system has native functionality; will fall back to ;pygaze' if 'native' is not available!) returns: desc: A `time, gazepos` tuple. Time is the end time in milliseconds (from expstart), gazepos is a (x,y) gaze position tuple of the position from which the fixation was initiated. type: tuple
625941b84428ac0f6e5ba64e
def process_eaf(input_elan_file: str, tier_name: str) -> List[dict]: <NEW_LINE> <INDENT> input_directory, full_file_name = os.path.split(input_elan_file) <NEW_LINE> file_name, extension = os.path.splitext(full_file_name) <NEW_LINE> input_eaf = Eaf(input_elan_file) <NEW_LINE> if os.path.isfile(os.path.join(input_directory, file_name + ".wav")): <NEW_LINE> <INDENT> print("WAV file found for " + file_name, file=sys.stderr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"WAV file not found for {full_file_name}. " f"Please put it next to the eaf file in {input_directory}.") <NEW_LINE> <DEDENT> annotations = sorted(input_eaf.get_annotation_data_for_tier(tier_name)) <NEW_LINE> parameters = input_eaf.get_parameters_for_tier(tier_name) <NEW_LINE> speaker_id = parameters.get("PARTICIPANT", "") <NEW_LINE> annotations_data = [] <NEW_LINE> for annotation in annotations: <NEW_LINE> <INDENT> start = annotation[0] <NEW_LINE> end = annotation[1] <NEW_LINE> annotation = annotation[2] <NEW_LINE> obj = { "audio_file_name": f"{file_name}.wav", "transcript": annotation, "start_ms": start, "stop_ms": end } <NEW_LINE> if "PARTICIPANT" in parameters: <NEW_LINE> <INDENT> obj["speaker_id"] = speaker_id <NEW_LINE> <DEDENT> annotations_data.append(obj) <NEW_LINE> <DEDENT> return annotations_data
Method to process a particular tier in an eaf file (ELAN Annotation Format). It stores the transcriptions in the following format: {'speaker_id': <speaker_id>, 'audio_file_name': <file_name>, 'transcript': <transcription_label>, 'start_ms': <start_time_in_milliseconds>, 'stop_ms': <stop_time_in_milliseconds>} :param input_elan_file: name of input_scripts elan file :param tier_name: name of the elan tier to process. these tiers are nodes from the tree structure in the .eaf file. :return: a list of dictionaries, where each dictionary is an annotation
625941b876e4537e8c3514d3
def _summarize(self, d, level=1): <NEW_LINE> <INDENT> if 'header_key' in d.keys(): <NEW_LINE> <INDENT> print("""{}* header['{}'] '{}'""".format(' '*level, d['header_key'], d['value'])) <NEW_LINE> print('\n') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for k, v in d.items(): <NEW_LINE> <INDENT> func = eval('self._{}'.format(k)) <NEW_LINE> print('{}* {}:'.format(' '*level, self._get_func_docstring(func))) <NEW_LINE> if isinstance(v, dict): <NEW_LINE> <INDENT> self._summarize(v, level+1) <NEW_LINE> <DEDENT> elif v: <NEW_LINE> <INDENT> print('{}{}'.format(' '*level, v))
Summarize function that will either print the lear values or will go one level deeper.
625941b897e22403b379cdf5
@data_set.command('list') <NEW_LINE> @click.option('--count', type=click.IntRange(1, 10000), default=1000, show_default=True, help='Maximum number of data to list') <NEW_LINE> @click.option('--id', help='id') <NEW_LINE> @click.option('--name', help='name') <NEW_LINE> @click.option('--memo', help='memo') <NEW_LINE> @click.option('--created-at', help='created at') <NEW_LINE> def list_datasets(count, id, name, memo, created_at): <NEW_LINE> <INDENT> api = rest.DataSetApi(configuration.get_api_client()) <NEW_LINE> per_page = 1000 <NEW_LINE> command_args = { 'id': id, 'name': name, 'memo': memo, 'created_at': created_at, } <NEW_LINE> args = {key: value for key, value in command_args.items() if value is not None} <NEW_LINE> if count <= per_page: <NEW_LINE> <INDENT> result = api.list_datasets(per_page=count, **args) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> total_pages = (count - 1) // per_page + 1 <NEW_LINE> result = [] <NEW_LINE> for page in range(1, total_pages + 1): <NEW_LINE> <INDENT> page_result = api.list_datasets(page=page, **args) <NEW_LINE> result.extend(page_result) <NEW_LINE> if len(page_result) < per_page: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> pprint.pp_table(['id', 'name', 'created_at', 'memo'], [[x.id, x.name, x.created_at, x.memo] for x in result[:count]])
List datasets filtered by conditions
625941b8283ffb24f3c55768
def readWords(filename): <NEW_LINE> <INDENT> f = open(filename,'r') <NEW_LINE> strings = f.readlines() <NEW_LINE> words = [getWord(s) for s in strings] <NEW_LINE> return words
takes a file name and returns a list of vocabulary words from its data.
625941b8adb09d7d5db6c5ef
def lomo_sk(self, spin, kpoint): <NEW_LINE> <INDENT> return self._electron_state(spin, kpoint, 0)
Returns the LOMO state for the given spin, kpoint. Args: spin: Spin index kpoint: Index of the kpoint or |Kpoint| object.
625941b88e71fb1e9831d609
def _compute_centroids(self, X): <NEW_LINE> <INDENT> centroids=[] <NEW_LINE> for j in range(self.n_clusters): <NEW_LINE> <INDENT> arr = X[self.labels_==j] <NEW_LINE> if len(arr)-np.isnan(arr).sum()==0: <NEW_LINE> <INDENT> arr = X <NEW_LINE> <DEDENT> with warnings.catch_warnings(): <NEW_LINE> <INDENT> warnings.simplefilter("ignore", category=RuntimeWarning) <NEW_LINE> centroids.append(np.nanmean(arr, axis=0)) <NEW_LINE> <DEDENT> <DEDENT> return np.array(centroids)
compute the centroids for the datapoints in X from the current values of self.labels_ Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Data points to assign to clusters based on distance metric returns new centroids
625941b88a349b6b435e7fd0
def find_neighbours(self, cell): <NEW_LINE> <INDENT> delta = [('W', (-1, 0)), ('E', (1, 0)), ('S', (0, 1)), ('N', (0, -1))] <NEW_LINE> neighbours = [] <NEW_LINE> for direction, (dx, dy) in delta: <NEW_LINE> <INDENT> x2, y2 = cell.x + dx, cell.y + dy <NEW_LINE> if (0 <= x2 < self.nx) and (0 <= y2 < self.ny): <NEW_LINE> <INDENT> neighbour = self.cell_at(x2, y2) <NEW_LINE> neighbours.append((direction, neighbour)) <NEW_LINE> <DEDENT> <DEDENT> return neighbours
Return a list of all neighbours to cell.
625941b8d10714528d5ffb3b
def db_for_read(self, model, **hints): <NEW_LINE> <INDENT> if model._meta.app_label == 'WaterSaver': <NEW_LINE> <INDENT> return 'raspi' <NEW_LINE> <DEDENT> return None
Send all read operations on WaterSaver app models to `raspi`.
625941b88e7ae83300e4ae28
def scroll_to_more_bottom(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> js = "var q=document.documentElement.scrollTop=1000" <NEW_LINE> self.script(js) <NEW_LINE> time.sleep(2) <NEW_LINE> <DEDENT> except Exception as msg: <NEW_LINE> <INDENT> return "异常原因%s" % msg
滑动到底部多点 :return:返回异常原因
625941b8dd821e528d63b007
def __auth(self, **kwargs): <NEW_LINE> <INDENT> username = kwargs.get('username') <NEW_LINE> password = kwargs.get('password') <NEW_LINE> if not username or not password: <NEW_LINE> <INDENT> return 99 <NEW_LINE> <DEDENT> user_auth = Auth() <NEW_LINE> result = user_auth.authentication(username, password) <NEW_LINE> if not result: <NEW_LINE> <INDENT> return 98 <NEW_LINE> <DEDENT> self.username = result.get('username') <NEW_LINE> self.user_quota = result.get('quota') <NEW_LINE> self.home_dir = result.get('home_dir') <NEW_LINE> self.current_path = self.home_dir <NEW_LINE> info = self.__os_info() <NEW_LINE> info.update(self.__client_path(self.home_dir)) <NEW_LINE> return {'data': info}
用户登录验证 :param kwargs: :return:
625941b8596a897236089926
def txtweet(self, sender, arguments): <NEW_LINE> <INDENT> if self.is_root(sender): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cmd, pattern = arguments.split() <NEW_LINE> if cmd.lower() in ["start", "stop"]: <NEW_LINE> <INDENT> rows = utils.search_db(self.conn, 'SELECT address, names, hashtags FROM btcaddresses') <NEW_LINE> trkcount = 0 <NEW_LINE> for row in rows: <NEW_LINE> <INDENT> if pattern == row[0] or pattern.lower() in row[1].lower() or pattern.lower() in row[2].lower(): <NEW_LINE> <INDENT> if cmd.lower() == "start": <NEW_LINE> <INDENT> query = 'UPDATE btcaddresses SET dotweet=1 WHERE address="%s"' % (row[0]) <NEW_LINE> utils.populate_db(self.conn, query) <NEW_LINE> trkcount += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> query = 'UPDATE btcaddresses SET dotweet=0 WHERE address="%s"' % (row[0]) <NEW_LINE> utils.populate_db(self.conn, query) <NEW_LINE> trkcount += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if trkcount: <NEW_LINE> <INDENT> return "updated %d rows matching pattern: %s" % (trkcount, pattern) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.error = "could not find any rows matching pattern: %s" % (pattern) <NEW_LINE> return None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.error = "incorrect subcommand for this command: txtweet %s" % (arguments) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> import traceback <NEW_LINE> traceback.print_exc() <NEW_LINE> self.error = "incorrect params for this command: txtweet %s" % (arguments) <NEW_LINE> return None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.error = "user %s is not authorized" % (sender) <NEW_LINE> return None
user: root txtweet start|stop address|name1|hashtag2 will loop over all addresses and enable|disable tweeting live txs for those that match params
625941b8460517430c393fea
def _atoms_in_residues(top, residue_idxs, subset_of_atom_idxs=None, fallback_to_full_residue=True, MDlogger=None): <NEW_LINE> <INDENT> atoms_in_residues = [] <NEW_LINE> if subset_of_atom_idxs is None: <NEW_LINE> <INDENT> subset_of_atom_idxs = np.arange(top.n_atoms) <NEW_LINE> <DEDENT> special_residues = [] <NEW_LINE> for rr in top.residues: <NEW_LINE> <INDENT> if rr.index in residue_idxs: <NEW_LINE> <INDENT> toappend = np.array([aa.index for aa in rr.atoms if aa.index in subset_of_atom_idxs]) <NEW_LINE> if len(toappend) == 0: <NEW_LINE> <INDENT> special_residues.append(rr) <NEW_LINE> if fallback_to_full_residue: <NEW_LINE> <INDENT> toappend = np.array([aa.index for aa in rr.atoms]) <NEW_LINE> <DEDENT> <DEDENT> atoms_in_residues.append(toappend) <NEW_LINE> <DEDENT> <DEDENT> if len(special_residues) != 0 and hasattr(MDlogger, 'warning'): <NEW_LINE> <INDENT> if fallback_to_full_residue: <NEW_LINE> <INDENT> msg = 'the full residue' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = 'emtpy lists' <NEW_LINE> <DEDENT> MDlogger.warning("These residues yielded no atoms in the subset and were returned as %s: %s " % ( msg, ''.join(['%s, ' % rr for rr in special_residues])[:-2])) <NEW_LINE> <DEDENT> return atoms_in_residues
Returns a list of ndarrays containing the atom indices in each residue of :obj:`residue_idxs` :param top: mdtraj.Topology :param residue_idxs: list or ndarray (ndim=1) of integers :param subset_of_atom_idxs : iterable of atom_idxs to which the selection has to be restricted. If None, all atoms considered :param fallback_to_full_residue : it is possible that some residues don't yield any atoms with some subsets. Take all atoms in that case. If False, then [] is returned for that residue :param MDlogger: If provided, a warning will be issued when falling back to full residue :return: list of length==len(residue_idxs)) of ndarrays (ndim=1) containing the atom indices in each residue of residue_idxs
625941b83eb6a72ae02ec337
def p_value_to_stars(p_value, alpha=(0.05, 0.01, 0.001)): <NEW_LINE> <INDENT> return len([_alpha for _alpha in alpha if p_value <= _alpha]) * '*'
Return string containing as many stars as the number of significance levels in alpha (a tuple of significance levels, order-independent) that p_value is less than or equal to. >>> p_value_to_stars(0.075) '' >>> p_value_to_stars(0.05) '*' >>> p_value_to_stars(0.025) '*' >>> p_value_to_stars(0.0099) '**' >>> p_value_to_stars(0.005) '**' >>> p_value_to_stars(0.0025) '**' >>> p_value_to_stars(0.00099) '***'
625941b8d8ef3951e3243399
@deprecate("Call node.split_recursive instead.") <NEW_LINE> def bsp_split_recursive( node: tcod.bsp.BSP, randomizer: Optional[tcod.random.Random], nb: int, minHSize: int, minVSize: int, maxHRatio: int, maxVRatio: int, ) -> None: <NEW_LINE> <INDENT> node.split_recursive( nb, minHSize, minVSize, maxHRatio, maxVRatio, randomizer )
.. deprecated:: 2.0 Use :any:`BSP.split_recursive` instead.
625941b8851cf427c661a377
def switch_mode(self, inference): <NEW_LINE> <INDENT> hasLUT = isinstance(self.layers[0], LookupTable) <NEW_LINE> cur_steps = self.in_shape[0] if hasLUT else self.in_shape[1] <NEW_LINE> if not inference: <NEW_LINE> <INDENT> old_size = cur_steps <NEW_LINE> new_size = self.full_steps <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> old_size = cur_steps <NEW_LINE> new_size = 1 <NEW_LINE> <DEDENT> if old_size != new_size: <NEW_LINE> <INDENT> if hasLUT: <NEW_LINE> <INDENT> in_obj = (new_size, 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> in_obj = (self.out_shape[0], new_size) <NEW_LINE> <DEDENT> self.configure(in_obj=in_obj) <NEW_LINE> for l in self.layers: <NEW_LINE> <INDENT> if l.owns_output: <NEW_LINE> <INDENT> l.outputs = None <NEW_LINE> <DEDENT> <DEDENT> self.allocate(shared_outputs=None) <NEW_LINE> for l in self.layers: <NEW_LINE> <INDENT> l.name += "'"
Dynamically grow or shrink the number of time steps to perform single time step fprop during inference.
625941b8bde94217f3682c59
def test_output_shape(): <NEW_LINE> <INDENT> assert sm.summary(toy_data).shape[1] <= toy_data.shape[1]
Test that output has the correct shape. Can't have more output columns than data input columns
625941b8cb5e8a47e48b790b
def distance_from_region(label_mask, distance_mask=None, scale=1, ord=2): <NEW_LINE> <INDENT> if distance_mask is None: <NEW_LINE> <INDENT> distance_mask = np.ones(label_mask.shape, dtype=bool) <NEW_LINE> <DEDENT> assert label_mask.shape == distance_mask.shape <NEW_LINE> scale = np.array(scale) <NEW_LINE> output = np.zeros(label_mask.shape) <NEW_LINE> indxs = np.indices(label_mask.shape) <NEW_LINE> X = indxs[:, distance_mask].T <NEW_LINE> Y = indxs[:, label_mask].T <NEW_LINE> for x in X: <NEW_LINE> <INDENT> output[tuple(x)] = np.linalg.norm(scale*(x-Y), ord=ord, axis=1).min() <NEW_LINE> <DEDENT> return np.ma.array(output, mask=np.logical_not(distance_mask))
Find the distance at every point in an image from a set of labeled points. Parameters ========== label_mask : ndarray A mask designating the points to find the distance from. A True value indicates that the pixel is in the region, a False value indicates it is not. distance_mask : ndarray A mask inidicating which regions to calculate the distance in scale : int Scale the calculated distance to another distance measure (eg. to millimeters) ord : int Order of norm to use when calculating distance. See np.linalg.norm for more details Returns ======= distances : ndarray A masked array of the same size as label_mask. If distance_mask is passed in then the output array is masked by it.
625941b838b623060ff0ac4b
def MAP(scores,label_list): <NEW_LINE> <INDENT> sorted, indices = torch.sort(scores,descending=True) <NEW_LINE> map=0 <NEW_LINE> score_rank=0. <NEW_LINE> rel_num=0. <NEW_LINE> for index in indices: <NEW_LINE> <INDENT> score_rank+=1 <NEW_LINE> if label_list[index]==1: <NEW_LINE> <INDENT> rel_num+=1 <NEW_LINE> map+= rel_num/score_rank <NEW_LINE> <DEDENT> <DEDENT> if rel_num==0: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return map/rel_num
calculate the map of this rank problem :param scores: tensor [0.9,0.9,0.5,0.8,....]这样的一维列表 :param label_list: [1,0,1,0,1,0,1,1,1,1] 这样的列表,都是整数,只能是1和0,1代表相关 :return: map score:float
625941b8f9cc0f698b140462
def get_asset_vulnerabilities_with_http_info(self, id, **kwargs): <NEW_LINE> <INDENT> all_params = ['id', 'page', 'size', 'sort'] <NEW_LINE> all_params.append('async_req') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in six.iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_asset_vulnerabilities" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('id' not in params or params['id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `id` when calling `get_asset_vulnerabilities`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> path_params = {} <NEW_LINE> if 'id' in params: <NEW_LINE> <INDENT> path_params['id'] = params['id'] <NEW_LINE> <DEDENT> query_params = [] <NEW_LINE> if 'page' in params: <NEW_LINE> <INDENT> query_params.append(('page', params['page'])) <NEW_LINE> <DEDENT> if 'size' in params: <NEW_LINE> <INDENT> query_params.append(('size', params['size'])) <NEW_LINE> <DEDENT> if 'sort' in params: <NEW_LINE> <INDENT> query_params.append(('sort', params['sort'])) <NEW_LINE> collection_formats['sort'] = 'multi' <NEW_LINE> <DEDENT> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client.select_header_accept( ['application/json;charset=UTF-8']) <NEW_LINE> header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) <NEW_LINE> auth_settings = [] <NEW_LINE> return self.api_client.call_api( '/api/3/assets/{id}/vulnerabilities', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageOfVulnerabilityFinding', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Asset Vulnerabilities # noqa: E501 Retrieves all vulnerability findings on an asset. A finding may be `invulnerable` if all instances have exceptions applied. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_asset_vulnerabilities_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The identifier of the asset. (required) :param int page: The index of the page (zero-based) to retrieve. :param int size: The number of records per page to retrieve. :param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters. :return: PageOfVulnerabilityFinding If the method is called asynchronously, returns the request thread.
625941b86e29344779a62472
def __getattr__(self, attr): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> result = self.data_dict[attr] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise errors.ModelAttributeError(attr + ' is not exist') <NEW_LINE> <DEDENT> return result
Access to resource data by attributes.
625941b84a966d76dd550e69
def transform_data(data): <NEW_LINE> <INDENT> df_imoveis = clean_data(data) <NEW_LINE> le = LabelEncoder() <NEW_LINE> categorical_columns = ['Finalidade', 'Categoria', 'Bairro'] <NEW_LINE> for column in categorical_columns: <NEW_LINE> <INDENT> df_imoveis[column] = le.fit_transform(df_imoveis[column]) <NEW_LINE> <DEDENT> sc = StandardScaler() <NEW_LINE> numerical_columns = ['AreaPrivativa', 'Bairro'] <NEW_LINE> df_imoveis[numerical_columns] = sc.fit_transform( df_imoveis[numerical_columns]) <NEW_LINE> return df_imoveis
Return the dataframe with categorical columns encoded with label encoder and numerical columns standardized.
625941b8cb5e8a47e48b790c
def reset(self): <NEW_LINE> <INDENT> self.n_iter = 0 <NEW_LINE> self.t_fit = 0 <NEW_LINE> self.intercept_ = 0 <NEW_LINE> if self.coef_ is not None: <NEW_LINE> <INDENT> self.coef_[:] = 0
Reset classifier parameters (weights, bias, and # iterations)
625941b83539df3088e2e1a8
@receiver(reset_password_token_created) <NEW_LINE> def password_reset_token_created(sender, reset_password_token, *args, **kwargs): <NEW_LINE> <INDENT> email = reset_password_token.user.email <NEW_LINE> context = { 'username': reset_password_token.user.username, 'reset_password_url': settings.THIS_URL + "/password_change?token={}" .format(reset_password_token.key) } <NEW_LINE> content = render_to_string('email/password_reset.html', context) <NEW_LINE> send_email(email, 'Password Reset Token', content, True)
Handles password reset tokens When a token is created, an e-mail needs to be sent to the user :param sender: :param reset_password_token: :param args: :param kwargs: :return:
625941b891f36d47f21ac353
def main(): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> user_socket_type = input("服务端udp协议选择0,服务端tcp协议选择1,客户端udp协议选择2,客户端tcp协议选择3:") <NEW_LINE> if user_socket_type not in ["0", "1", "2", "3"]: <NEW_LINE> <INDENT> print("wrong user_socket_type, try again!!") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> if user_socket_type == "0": <NEW_LINE> <INDENT> create_udp_server() <NEW_LINE> <DEDENT> if user_socket_type == "1": <NEW_LINE> <INDENT> create_tcp_server() <NEW_LINE> <DEDENT> if user_socket_type == "2": <NEW_LINE> <INDENT> create_udp_client() <NEW_LINE> <DEDENT> if user_socket_type == "3": <NEW_LINE> <INDENT> create_tcp_client()
主函数
625941b83c8af77a43ae35fb
def __init__(self, riddle: dict, levels: dict, secret_levels: dict): <NEW_LINE> <INDENT> self.full_name = riddle['full_name'] <NEW_LINE> self.guild = get(bot.guilds, id=int(riddle['guild_id'])) <NEW_LINE> self.levels = OrderedDict() <NEW_LINE> for level in levels: <NEW_LINE> <INDENT> id = level['name'] <NEW_LINE> self.levels[id] = level <NEW_LINE> <DEDENT> self.secret_levels = OrderedDict() <NEW_LINE> for level in secret_levels: <NEW_LINE> <INDENT> id = level['name'] <NEW_LINE> self.secret_levels[id] = level
Build riddle object by row extracted from database.
625941b88e71fb1e9831d60a
def __init__(self, udpPort=4000, dataStore=None, routingTable=None, networkProtocol=None, **kwargs): <NEW_LINE> <INDENT> self.id = kwargs.get('id') <NEW_LINE> if not self.id: <NEW_LINE> <INDENT> self.id = self._generateID() <NEW_LINE> <DEDENT> self.port = udpPort <NEW_LINE> self.listener = None <NEW_LINE> self.refresher = None <NEW_LINE> self._joinDeferred = None <NEW_LINE> if routingTable is None: <NEW_LINE> <INDENT> self._routingTable = routingtable.TreeRoutingTable(self.id) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._routingTable = routingTable <NEW_LINE> <DEDENT> if networkProtocol is None: <NEW_LINE> <INDENT> self._protocol = protocol.KademliaProtocol(self) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._protocol = networkProtocol(self) <NEW_LINE> <DEDENT> if dataStore is None: <NEW_LINE> <INDENT> self._dataStore = datastore.DictDataStore() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._dataStore = dataStore <NEW_LINE> h = hashlib.sha1() <NEW_LINE> h.update(b'nodeState') <NEW_LINE> nodeStateKey = h.hexdigest() <NEW_LINE> if nodeStateKey in self._dataStore: <NEW_LINE> <INDENT> json_state = self._dataStore[nodeStateKey] <NEW_LINE> state = json.loads(json_state) <NEW_LINE> self.id = state['id'] <NEW_LINE> for contactTriple in state['closestNodes']: <NEW_LINE> <INDENT> contact = Contact(encoding.to_text(contactTriple[0]), contactTriple[1], contactTriple[2], self._protocol) <NEW_LINE> self._routingTable.addContact(contact) <NEW_LINE> <DEDENT> if _Debug: <NEW_LINE> <INDENT> print('[DHT NODE] found "nodeState" key in local db and added %d contacts to routing table' % len(state['closestNodes'])) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> self._counter = None
@param dataStore: The data store to use. This must be class inheriting from the C{DataStore} interface (or providing the same API). How the data store manages its data internally is up to the implementation of that data store. @type dataStore: entangled.kademlia.datastore.DataStore @param routingTable: The routing table to use. Since there exists some ambiguity as to how the routing table should be implemented in Kademlia, a different routing table may be used, as long as the appropriate API is exposed. @type routingTable: entangled.kademlia.routingtable.RoutingTable @param networkProtocol: The network protocol to use. This can be overridden from the default to (for example) change the format of the physical RPC messages being transmitted. @type networkProtocol: entangled.kademlia.protocol.KademliaProtocol
625941b8b830903b967e9773
def get_file_str(path, saltenv='base'): <NEW_LINE> <INDENT> fn_ = cache_file(path, saltenv) <NEW_LINE> if isinstance(fn_, six.string_types): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with salt.utils.fopen(fn_, 'r') as fp_: <NEW_LINE> <INDENT> return fp_.read() <NEW_LINE> <DEDENT> <DEDENT> except IOError: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return fn_
Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file
625941b80383005118ecf441
def train_rdp_classifier_and_assign_taxonomy( training_seqs_file, taxonomy_file, seqs_to_classify, min_confidence=0.80, model_output_dir=None, classification_output_fp=None): <NEW_LINE> <INDENT> if model_output_dir is None: <NEW_LINE> <INDENT> training_dir = tempfile.mkdtemp(prefix='RdpTrainer_') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> training_dir = model_output_dir <NEW_LINE> <DEDENT> training_results = train_rdp_classifier( training_seqs_file, taxonomy_file, training_dir) <NEW_LINE> training_data_fp = training_results['properties'].name <NEW_LINE> assignment_results = assign_taxonomy( seqs_to_classify, min_confidence=min_confidence, output_fp=classification_output_fp, training_data_fp=training_data_fp) <NEW_LINE> if model_output_dir is None: <NEW_LINE> <INDENT> rmtree(training_dir) <NEW_LINE> <DEDENT> return assignment_results
Train RDP Classifier and assign taxonomy in one fell swoop The file objects training_seqs_file and taxonomy_file are used to train the RDP Classifier (see RdpTrainer documentation for details). Model data is stored in model_output_dir. If model_output_dir is not provided, a temporary directory is created and removed after classification. The sequences in seqs_to_classify are classified according to the model and filtered at the desired confidence level (default: 0.80). The results are saved to classification_output_fp if provided, otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is returned.
625941b8377c676e91272007
def conv_backward_naive(dout, cache): <NEW_LINE> <INDENT> dx, dw, db = None, None, None <NEW_LINE> N,F,outH,outW = dout.shape <NEW_LINE> x, w, b, conv_param = cache <NEW_LINE> F,C,HH,WW = w.shape <NEW_LINE> N,C,H,W = x.shape <NEW_LINE> stride = conv_param['stride'] <NEW_LINE> pad = conv_param['pad'] <NEW_LINE> db = np.zeros((F)) <NEW_LINE> for fil in range(F): <NEW_LINE> <INDENT> db[fil] = np.sum(dout[:,fil,:,:]) <NEW_LINE> <DEDENT> x_pad = np.pad(x,((0,),(0,),(pad,),(pad,)),mode='constant') <NEW_LINE> dw = np.zeros((F,C,HH,WW)) <NEW_LINE> for fil in range(F): <NEW_LINE> <INDENT> for c in range(C): <NEW_LINE> <INDENT> for row in range(HH): <NEW_LINE> <INDENT> for col in range(WW): <NEW_LINE> <INDENT> action_region = x_pad[:,c,row:row+outH*stride:stride,col:col+outW*stride:stride] <NEW_LINE> dw[fil,c,row,col] = np.sum(dout[:,fil,:,:]*action_region) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> dx = np.zeros((N,C,H,W)) <NEW_LINE> for i in range(N): <NEW_LINE> <INDENT> for row in range(H): <NEW_LINE> <INDENT> for col in range(W): <NEW_LINE> <INDENT> for fil in range(F): <NEW_LINE> <INDENT> for k in range(outH): <NEW_LINE> <INDENT> for l in range(outW): <NEW_LINE> <INDENT> mask1 = np.zeros_like(w[fil,:,:,:]) <NEW_LINE> mask2 = np.zeros_like(w[fil,:,:,:]) <NEW_LINE> if(row+pad-k*stride) < HH and (row+pad-k*stride) >= 0: <NEW_LINE> <INDENT> mask1[:,row+pad-k*stride,:] = 1.0 <NEW_LINE> <DEDENT> if(col+pad-l*stride) < WW and (col+pad-l*stride) >= 0: <NEW_LINE> <INDENT> mask2[:,:,col+pad-l*stride] = 1.0 <NEW_LINE> <DEDENT> w_masked = np.sum(w[fil,:,:,:]*mask1*mask2, axis=(1,2)) <NEW_LINE> dx[i,:,row,col] += dout[i,fil,k,l]*w_masked <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return dx, dw, db
A naive implementation of the backward pass for a convolutional layer. Inputs: - dout: Upstream derivatives. - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive Returns a tuple of: - dx: Gradient with respect to x - dw: Gradient with respect to w - db: Gradient with respect to b
625941b87c178a314d6ef2b5
def write_to_dayone(dayone_list): <NEW_LINE> <INDENT> print("SCRIPT IS: echo \"" + dayone_list[0] + "\" | dayone2 " + dayone_list[1] + " new") <NEW_LINE> try: <NEW_LINE> <INDENT> retcode = call("echo \"" + dayone_list[0] + "\" | dayone2 " + dayone_list[1] + " new", shell=True) <NEW_LINE> if retcode < 0: <NEW_LINE> <INDENT> print("Child was terminated by signal", -retcode) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Child returned", retcode) <NEW_LINE> <DEDENT> <DEDENT> except OSError as e: <NEW_LINE> <INDENT> print("Execution failed:", e)
Take a formatted list of text & everything else, run dayone2 cli
625941b8c432627299f04aa1
def res_net(data_x, keep_prob, training): <NEW_LINE> <INDENT> with tf.name_scope("resnet_classify"): <NEW_LINE> <INDENT> data_logit = conv_net(data_x, (7, 7, 3, 64), (1, 2, 2, 1)) <NEW_LINE> data_logit = batch_normalization(data_logit, training) <NEW_LINE> data_logit = actv_net(data_logit) <NEW_LINE> data_logit = max_pool(data_logit, (1, 3, 3, 1), (1, 2, 2, 1)) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 64, 64, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 64, 64, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 64, 64, training) <NEW_LINE> data_logit = match_dimen_shortcut_conv(data_logit, 64, 128, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 128, 128, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 128, 128, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 128, 128, training) <NEW_LINE> data_logit = match_dimen_shortcut_conv(data_logit, 128, 256, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 256, 256, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 256, 256, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 256, 256, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 256, 256, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 256, 256, training) <NEW_LINE> data_logit = match_dimen_shortcut_conv(data_logit, 256, 512, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 512, 512, training) <NEW_LINE> data_logit = identity_shortcut_conv(data_logit, 512, 512, training) <NEW_LINE> data_logit = avg_pool(data_logit, (1, 2, 2, 1), (1, 2, 2, 1)) <NEW_LINE> data_logit = tf.reshape(data_logit, [-1, 512]) <NEW_LINE> data_logit_embed = dense_net(data_logit, [512, 300], add_b=True) <NEW_LINE> data_logit_class = dense_net(data_logit, [512, 157], add_b=True) <NEW_LINE> <DEDENT> return data_logit_embed
create a resnet network Returns: data_x: tf.placeholder(). Input of the whole network. A 4-D shape [batch_size, height, width, channel] training: tf.placeholder(). True/False. Used for batch_normalization data_logit: Tensor. The predicted output
625941b8462c4b4f79d1d52d
def make_lower(arg): <NEW_LINE> <INDENT> if arg is not None: <NEW_LINE> <INDENT> if isinstance(arg, str): <NEW_LINE> <INDENT> arg = arg.tolower() <NEW_LINE> <DEDENT> <DEDENT> return arg
helper function to make lowercase or return original
625941b863d6d428bbe4434c
def __init__(self, experiment_id=experiment_id, create=True): <NEW_LINE> <INDENT> self.experiment_id = experiment_id <NEW_LINE> if create: <NEW_LINE> <INDENT> self.psm = self.get_or_create_cc_parameterset(create=True) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.psm = self.get_or_create_cc_parameterset(create=False)
:param experiment_id: The id of the experiment :type experiment_id: integer :param create: If true, creates a new parameterset object to hold the cc license :type create: boolean
625941b8091ae35668666dc2
def read_sleb(blob, offset): <NEW_LINE> <INDENT> result = 0 <NEW_LINE> shift = 0 <NEW_LINE> index = offset <NEW_LINE> end = len(blob) <NEW_LINE> done = False <NEW_LINE> while not done and index < end: <NEW_LINE> <INDENT> b = struct.unpack("B", blob[index])[0] <NEW_LINE> result |= ((b & 0x7f) << shift) <NEW_LINE> shift += 7 <NEW_LINE> index += 1 <NEW_LINE> if b & 0x80 == 0: <NEW_LINE> <INDENT> done = True <NEW_LINE> <DEDENT> <DEDENT> if b & 0x40: <NEW_LINE> <INDENT> result -= (1 << shift) <NEW_LINE> <DEDENT> return (result, index - offset)
Reads a number encoded as sleb128
625941b899cbb53fe6792a44
@pytest.fixture <NEW_LINE> def tesults_flag(request): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return request.config.getoption("--tesults") <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print("Exception when trying to run test: %s"%__file__) <NEW_LINE> print("Python says:%s"%str(e))
pytest fixture for sending results to tesults
625941b830dc7b76659017c7
def DoGetBestSize(self): <NEW_LINE> <INDENT> return wx.Size(*self._layouts[0].overall_size)
Gets the size which best suits the window: for a control, it would be the minimal size which doesn't truncate the control, for a panel - the same size as it would have after a call to `Fit()`. :return: An instance of :class:`Size`. :note: Overridden from :class:`Control`.
625941b84527f215b584c2b8
def getAtEnergy( self, energy ) : <NEW_LINE> <INDENT> return( self.getSpectrumAtEnergy( energy ) )
This method is deprecated, use getSpectrumAtEnergy.
625941b88c3a87329515821b
def load_data(self, all_data, *args, rowOffset=0, colOffset=0, **kwargs): <NEW_LINE> <INDENT> data = all_data.get_data() <NEW_LINE> columns = all_data.get_headers() <NEW_LINE> if rowOffset+data.shape[0] > self.rowCount()-10: <NEW_LINE> <INDENT> self.setRowCount(rowOffset+data.shape[0]+10) <NEW_LINE> <DEDENT> if colOffset+data.shape[1] > self.columnCount()-10: <NEW_LINE> <INDENT> self.setColumnCount(colOffset+data.shape[1]+10) <NEW_LINE> <DEDENT> for i, row in enumerate(data): <NEW_LINE> <INDENT> for j, elem in enumerate(row): <NEW_LINE> <INDENT> self.setItem(rowOffset+i, colOffset+j, self.CustomTableWidgetItem(str(elem))) <NEW_LINE> <DEDENT> <DEDENT> if columns is not None: <NEW_LINE> <INDENT> if len(set(columns)) < len(columns): <NEW_LINE> <INDENT> raise ValueError("Duplicate column name") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> newHeaders = [] <NEW_LINE> for i in range(0, colOffset): <NEW_LINE> <INDENT> newHeaders.append(str(i) if self.horizontalHeaderItem(i) is None else self.horizontalHeaderItem(i).text()) <NEW_LINE> <DEDENT> newHeaders = newHeaders + columns <NEW_LINE> self.setHorizontalHeaderLabels(newHeaders)
Load data into the table :param all_data: The data to load :type all_data: Data
625941b8925a0f43d2549cd0
@app.route('/results') <NEW_LINE> def results(): <NEW_LINE> <INDENT> city = request.args.get('city') <NEW_LINE> units = request.args.get('units') <NEW_LINE> params = { 'appid': API_KEY, 'q': city, 'units': units } <NEW_LINE> result_json = requests.get(API_URL, params=params).json() <NEW_LINE> pp.pprint(result_json) <NEW_LINE> context = { 'date': datetime.now(), 'city': city, 'description': result_json['weather'][0]['description'], 'temp': result_json['main']['temp'], 'humidity': result_json['main']['humidity'], 'wind_speed': result_json['wind']['speed'], 'sunrise': datetime.fromtimestamp(result_json['sys']['sunrise']), 'sunset': datetime.fromtimestamp(result_json['sys']['sunset']), 'units_letter': get_letter_for_units(units) } <NEW_LINE> return render_template('results.html', **context)
Displays results for current weather conditions.
625941b8d164cc6175782bab
def _generate_tokens(self, administrator_id, with_refresh_token=True): <NEW_LINE> <INDENT> now = datetime.utcnow() <NEW_LINE> expiry = now + timedelta(hours=current_app.config['JWT_EXPIRY_HOURS']) <NEW_LINE> token = generate_jwt({'administrator_id': administrator_id, 'refresh': False}, expiry) <NEW_LINE> refresh_token = None <NEW_LINE> if with_refresh_token: <NEW_LINE> <INDENT> refresh_expiry = now + timedelta(days=current_app.config['JWT_REFRESH_DAYS']) <NEW_LINE> refresh_token = generate_jwt({'administrator_id': administrator_id, 'refresh': True}, refresh_expiry) <NEW_LINE> <DEDENT> return token, refresh_token
生成token 和refresh_token :param administrator_id: 管理员id :return: token, refresh_token
625941b821bff66bcd6847b2
def placer_reports_with_partitionable_graph( report_folder, hostname, graph, graph_mapper, placements, machine): <NEW_LINE> <INDENT> placement_report_with_partitionable_graph_by_vertex( report_folder, hostname, graph, graph_mapper, placements) <NEW_LINE> placement_report_with_partitionable_graph_by_core( report_folder, hostname, placements, machine, graph_mapper) <NEW_LINE> sdram_usage_report_per_chip( report_folder, hostname, placements, machine)
Reports that can be produced from placement given a partitionable graph's existence :param report_folder: the folder to which the reports are being written :param hostname: the machine's hostname to which the placer worked on :param graph: the partitionable graph to which placements were built :param graph_mapper: the mapping between partitionable and partitioned graphs :param placements: the placements objects built by the placer. :param machine: the python machine object :return None
625941b8627d3e7fe0d68cac
def test_get_attribute_request_GET_lang(self): <NEW_LINE> <INDENT> request = Request(self.factory.get('/', {'lang': 'lol'})) <NEW_LINE> assert request.GET['lang'] == 'lol' <NEW_LINE> mock_serializer = serializers.Serializer(context={'request': request}) <NEW_LINE> field = self.field_class() <NEW_LINE> self._test_expected_single_string(field, mock_serializer)
Pass a lang in the query string, expect to have a single string returned instead of an object.
625941b87047854f462a126a
def test_wikitext_revision_hash(revision_wikitext): <NEW_LINE> <INDENT> labeled = wikivision.label_version(revision_wikitext) <NEW_LINE> expected = revision_wikitext.wikitext.apply(wikivision.data._hash) <NEW_LINE> assert labeled.rev_sha1.tolist() == expected.tolist()
Labeling creates a column containing a hash of the wikitexts.
625941b8adb09d7d5db6c5f0
def isochrone(self, age=1.e8, metallicity=0.0): <NEW_LINE> <INDENT> z_defined = self.z_solar * (10.**metallicity) <NEW_LINE> log_age = math.log10(age) <NEW_LINE> if ((log_age < np.min(self.age_list)) or (log_age > np.max(self.age_list))): <NEW_LINE> <INDENT> logger.error('Requested age {0} is out of bounds.'.format(log_age)) <NEW_LINE> <DEDENT> if ((z_defined < np.min(self.z_list)) or (z_defined > np.max(self.z_list))): <NEW_LINE> <INDENT> logger.error('Requested metallicity {0} is out of bounds.'.format(z_defined)) <NEW_LINE> <DEDENT> if log_age != self.age_list[0]: <NEW_LINE> <INDENT> age_idx = searchsorted(self.age_list, log_age, side='right') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> age_idx = searchsorted(self.age_list, log_age, side='left') <NEW_LINE> <DEDENT> iso_file = 'iso_{0:.2f}.fits'.format(self.age_list[age_idx]) <NEW_LINE> z_idx = searchsorted(self.z_list, z_defined, side='left') <NEW_LINE> if z_idx == len(self.z_list): <NEW_LINE> <INDENT> z_idx = z_idx - 1 <NEW_LINE> <DEDENT> z_dir = self.z_file_map[self.z_list[z_idx]] <NEW_LINE> full_iso_file = self.model_dir + 'iso/' + z_dir + iso_file <NEW_LINE> iso = Table.read(full_iso_file, format='fits') <NEW_LINE> if self.version == 1.0: <NEW_LINE> <INDENT> iso.rename_column('col7', 'Z') <NEW_LINE> iso.rename_column('col2', 'logAge') <NEW_LINE> iso.rename_column('col3', 'mass') <NEW_LINE> iso.rename_column('col4', 'logT') <NEW_LINE> iso.rename_column('col5', 'logg') <NEW_LINE> iso.rename_column('col6', 'logL') <NEW_LINE> iso.rename_column('col65', 'phase') <NEW_LINE> <DEDENT> elif self.version == 1.2: <NEW_LINE> <INDENT> iso.rename_column('col2', 'logAge') <NEW_LINE> iso.rename_column('col3', 'mass') <NEW_LINE> iso.rename_column('col4', 'mass_current') <NEW_LINE> iso.rename_column('col9', 'logL') <NEW_LINE> iso.rename_column('col14', 'logT') <NEW_LINE> iso.rename_column('col17', 'logg') <NEW_LINE> iso.rename_column('col79', 'phase') <NEW_LINE> <DEDENT> isWD = np.where(iso['phase'] == 6)[0] <NEW_LINE> iso['phase'][isWD] = 101 <NEW_LINE> isWR = Column([False] * len(iso), name='isWR') <NEW_LINE> idx_WR = np.where(iso['phase'] == 9)[0] <NEW_LINE> isWR[idx_WR] = True <NEW_LINE> iso.add_column(isWR) <NEW_LINE> iso.meta['log_age'] = log_age <NEW_LINE> iso.meta['metallicity_in'] = metallicity <NEW_LINE> iso.meta['metallicity_act'] = np.log10(self.z_list[z_idx] / self.z_solar) <NEW_LINE> return iso
Extract an individual isochrone from the MISTv1 collection.
625941b88da39b475bd64dd4
@request_utils.cluster_api_exception_handler <NEW_LINE> def ovdc_update(data, operation_context: ctx.OperationContext): <NEW_LINE> <INDENT> ovdc_spec = common_models.Ovdc(**data[RequestKey.INPUT_SPEC]) <NEW_LINE> return ovdc_service.update_ovdc(operation_context, ovdc_id=data[RequestKey.OVDC_ID], ovdc_spec=ovdc_spec)
Request handler for ovdc enable, disable operations. Add or remove the respective cluster placement policies to enable or disable cluster deployment of a certain kind in the OVDC. Required data: k8s_runtime :return: Dictionary with org VDC update task href.
625941b8d6c5a10208143ea4
def on_canvas_configure(self, event): <NEW_LINE> <INDENT> self.canvas.itemconfigure("container", width=event.width) <NEW_LINE> self.canvas.configure(scrollregion=self.canvas.bbox("all"))
Reconfigure the widget.
625941b81d351010ab85597b
def test_duplicated_key_omitted(self): <NEW_LINE> <INDENT> cif = "_exptl.method foo\n_exptl.method .\n" <NEW_LINE> for real_file in (True, False): <NEW_LINE> <INDENT> h = GenericHandler() <NEW_LINE> h.omitted = 'OMIT' <NEW_LINE> self._read_cif(cif, real_file, {'_exptl': h}) <NEW_LINE> self.assertEqual(h.data, [{'method': 'OMIT'}])
If a key is duplicated, we take the final (omitted) value
625941b830bbd722463cbc20
@cli.command("merge-tables", short_help="for a sample, merge its eggnog and refseq tables") <NEW_LINE> @click.argument("refseq", type=click.File("r")) <NEW_LINE> @click.argument("eggnog", type=click.File("r")) <NEW_LINE> @click.argument("output", type=click.File("w")) <NEW_LINE> def merge_tables(refseq, eggnog, output): <NEW_LINE> <INDENT> import pandas as pd <NEW_LINE> index_cols = ["contig", "orf"] <NEW_LINE> try: <NEW_LINE> <INDENT> ref_df = pd.read_table(refseq, index_col=index_cols) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> logging.critical("The expected headers ('contig', 'orf') are missing from %s" % refseq.name) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> logging.info("%d contained in %s" % (len(ref_df), refseq.name)) <NEW_LINE> try: <NEW_LINE> <INDENT> egg_df = pd.read_table(eggnog, index_col=index_cols) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> logging.critical("The expected headers ('contig', 'orf') are missing from %s" % eggnog.name) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> logging.info("%d contained in %s" % (len(egg_df), eggnog.name)) <NEW_LINE> merged = pd.merge(left=ref_df, right=egg_df, how="outer", left_index=True, right_index=True) <NEW_LINE> logging.info("%d total lines after merging" % len(merged)) <NEW_LINE> merged.to_csv(output, sep="\t", na_rep="NA")
Takes the output from `refseq` and `eggnog` and combines them into a single TSV table. Headers are required and should contain 'contig' and 'orf' column labels.
625941b81f5feb6acb0c49b2
def get_quotes_for_words(conn, conn_i, words): <NEW_LINE> <INDENT> quotes = [] <NEW_LINE> for word in words: <NEW_LINE> <INDENT> candidates = db.get_quotes_i(conn_i, word) <NEW_LINE> if random.random() < IMAGE_PROBABILITY and len(candidates) > 0: <NEW_LINE> <INDENT> quotes.append(random.choice(candidates)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> quotes.append(random.choice(db.get_quotes(conn, word))) <NEW_LINE> <DEDENT> <DEDENT> return quotes
Retourne des citations texte ou image, associés aux mots words (une citation par mot) Tient compte de la probabilité pour les images :param conn: Connexion à la base texte :param conn_i: Connexion à la base image :param words: Mots à chercher :return: Une liste avec une citation par mot
625941b863b5f9789fde6f42
def convert_classes_to_indexes(labels, classes): <NEW_LINE> <INDENT> if all([l in classes for l in labels]): <NEW_LINE> <INDENT> labels = [classes.index(label) for label in labels] <NEW_LINE> <DEDENT> return labels
Convert a list of labels representing classes to their corresponding indexes. More precisely, convert TripletDataset labels to the index of the class in the dataset, while keeping the current label for a FolderDataset dataset. :param labels: list of labels to convert. :param classes: list of all the classes in the dataset.
625941b863b5f9789fde6f43
def recognize_package(location): <NEW_LINE> <INDENT> if not filetype.is_file(location): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> T = contenttype.get_type(location) <NEW_LINE> ftype = T.filetype_file.lower() <NEW_LINE> mtype = T.mimetype_file <NEW_LINE> for package_type in PACKAGE_TYPES: <NEW_LINE> <INDENT> metafiles = package_type.metafiles <NEW_LINE> if on_linux: <NEW_LINE> <INDENT> metafiles = (fsencode(m) for m in metafiles) <NEW_LINE> <DEDENT> if location.endswith(tuple(metafiles)): <NEW_LINE> <INDENT> logger_debug('metafile matching: package_type is of type:', package_type) <NEW_LINE> return package_type.recognize(location) <NEW_LINE> <DEDENT> if package_type.filetypes: <NEW_LINE> <INDENT> type_matched = any(t in ftype for t in package_type.filetypes) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> type_matched = False <NEW_LINE> <DEDENT> if package_type.mimetypes: <NEW_LINE> <INDENT> mime_matched = any(m in mtype for m in package_type.mimetypes) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mime_matched = False <NEW_LINE> <DEDENT> extensions = package_type.extensions <NEW_LINE> if extensions: <NEW_LINE> <INDENT> if on_linux: <NEW_LINE> <INDENT> extensions = tuple(fsencode(e) for e in extensions) <NEW_LINE> <DEDENT> extension_matched = location.lower().endswith(extensions) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> extension_matched = False <NEW_LINE> <DEDENT> if type_matched and mime_matched and extension_matched: <NEW_LINE> <INDENT> logger_debug('all matching: package is of type:', package_type) <NEW_LINE> recognized = package_type.recognize(location) <NEW_LINE> logger_debug('all matching: recognized as:', repr(recognized)) <NEW_LINE> return recognized <NEW_LINE> <DEDENT> logger_debug('no match: package is not of known type:', package_type)
Return a Package object if one was recognized or None for this `location`.
625941b8be7bc26dc91cd463
def rollback(): <NEW_LINE> <INDENT> with cd(_REMOTE_BASE_DIR): <NEW_LINE> <INDENT> r = run("ls -p -1") <NEW_LINE> files = [s[:-1] for s in RE_FILES.split(r) if s.startswith("www-") and s.endswith("/")] <NEW_LINE> files.sort(cmp=lambda s1, s2: 1 if s1 < s2 else -1) <NEW_LINE> r = run("ls -l www") <NEW_LINE> ss = r.split(" -> ") <NEW_LINE> if len(ss) != 2: <NEW_LINE> <INDENT> print("ERROR: 'www' is not a symbol link.") <NEW_LINE> return <NEW_LINE> <DEDENT> current = ss[1] <NEW_LINE> print("Found current symbol link points to: %s\n" % current) <NEW_LINE> try: <NEW_LINE> <INDENT> index = files.index(current) <NEW_LINE> <DEDENT> except ValueError as e: <NEW_LINE> <INDENT> print("ERROR: symbol link is invalid") <NEW_LINE> raise e <NEW_LINE> return <NEW_LINE> <DEDENT> if len(files) == index + 1: <NEW_LINE> <INDENT> print("ERROR: already the oldest version.") <NEW_LINE> return <NEW_LINE> <DEDENT> old = files[index + 1] <NEW_LINE> print("=" * 80) <NEW_LINE> for f in files: <NEW_LINE> <INDENT> if f == current: <NEW_LINE> <INDENT> print(" Current ---> %s" % current) <NEW_LINE> <DEDENT> elif f == old: <NEW_LINE> <INDENT> print(" Rollback to ---> %s" % old) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(" %s" % f) <NEW_LINE> <DEDENT> <DEDENT> print("=" * 80) <NEW_LINE> print("") <NEW_LINE> yn = raw_input("continue? y/N?") <NEW_LINE> if yn != 'y' and yn != 'Y': <NEW_LINE> <INDENT> print("Rollback cancelled.") <NEW_LINE> return <NEW_LINE> <DEDENT> print("Start rollbask...") <NEW_LINE> sudo("rm -f www") <NEW_LINE> sudo("ln -s %s www" % old) <NEW_LINE> sudo("chown wxd:wxd www") <NEW_LINE> with settings(warn_only=True): <NEW_LINE> <INDENT> sudo("supervisor stop webapp") <NEW_LINE> sudo("supervisor start webapp") <NEW_LINE> sudo("/etc/init.d/nginx reload") <NEW_LINE> <DEDENT> print("ROLLBACKED OK")
rollback to previous version
625941b8a8ecb033257d2f33
def read_into(self, buffer, *, alignment=1, write_offset=0) -> None: <NEW_LINE> <INDENT> if isinstance(buffer, Buffer): <NEW_LINE> <INDENT> buffer = buffer.mglo <NEW_LINE> <DEDENT> return self.mglo.read_into(buffer, alignment, write_offset)
Read the content of the texture into a buffer. Args: buffer (bytearray): The buffer that will receive the pixels. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels. write_offset (int): The write offset.
625941b876d4e153a657e98e
def save_flow(video_flows, flow_path, format="flow{}_{:05d}.{}", ext="jpg", separate=True): <NEW_LINE> <INDENT> if not os.path.exists(flow_path): <NEW_LINE> <INDENT> os.makedirs(flow_path) <NEW_LINE> <DEDENT> for i, flow in enumerate(video_flows): <NEW_LINE> <INDENT> if separate: <NEW_LINE> <INDENT> cv2.imwrite(os.path.join(flow_path, format.format("_x", i, ext)), flow[:, :, 0]) <NEW_LINE> cv2.imwrite(os.path.join(flow_path, format.format("_y", i, ext)), flow[:, :, 1]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_flows = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=flow.dtype) <NEW_LINE> new_flows[:, :, :2] = flow <NEW_LINE> cv2.imwrite(os.path.join(flow_path, format.format("_xy", i, ext)), new_flows)
Args: video_flows (list): store the flow (numpy.ndarray) flow_type (str): the path to store the flows. format (str): using which formate to store the flow images. Return:
625941b85166f23b2e1a4fb7
def groupAsDict(group): <NEW_LINE> <INDENT> groupDict = {'groupID': group.key.id(), 'userID': group.userID, 'creatorName': group.creatorName, 'groupName': group.groupName, 'description': group.description} <NEW_LINE> return groupDict
Return a dict type group variable Get detailed group data from input
625941b8d6c5a10208143ea5