code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def tearDown(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> os.remove(self.filepath) <NEW_LINE> <DEDENT> except OSError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if self.backed_up: <NEW_LINE> <INDENT> log.debug('Restoring original config.') <NEW_LINE> os.rename(self.backup_path, self.config_path)
Delete a tempfile if it exists. Otherwise carry on.
625941b755399d3f055884e0
def on_vsb(self, *args): <NEW_LINE> <INDENT> self.box_one.yview(*args) <NEW_LINE> self.box_two.yview(*args)
Vertical Scrolling event
625941b738b623060ff0ac1b
def Display(self, args, result): <NEW_LINE> <INDENT> self.context['api_adapter'].PrintNodePools([result])
This method is called to print the result of the Run() method. Args: args: The arguments that command was run with. result: The value returned from the Run() method.
625941b71f037a2d8b94602b
def _load_identifiers(self, attrs, meta, model, resource_name): <NEW_LINE> <INDENT> for identifier in model.identifiers: <NEW_LINE> <INDENT> meta.identifiers.append(identifier.name) <NEW_LINE> attrs[identifier.name] = self._create_identifier( identifier, resource_name)
Populate required identifiers. These are arguments without which the resource cannot be used. Identifiers become arguments for operations on the resource.
625941b74527f215b584c288
def verify(self, hash, sig): <NEW_LINE> <INDENT> return _ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
Verify a DER signature
625941b70383005118ecf411
def predict(theta, X): <NEW_LINE> <INDENT> m = len(X) <NEW_LINE> p = np.zeros(m) <NEW_LINE> predictions = sigmoid(np.dot(X, theta)) <NEW_LINE> pos = np.where(predictions >= 0.5) <NEW_LINE> neg = np.where(predictions < 0.5) <NEW_LINE> p[pos] = 1 <NEW_LINE> return p
predict() - Logistic Regression Prediction Function
625941b773bcbd0ca4b2beaa
def content( userName = '', accessLevel = '', newUrl = '', command = '', queryString = '', postData = '', cookies = '', uploadFile = '' ): <NEW_LINE> <INDENT> cacheFile = cache.cacheFile( '_____', module ) <NEW_LINE> noCache = 0 <NEW_LINE> data = '' <NEW_LINE> if ENABLECACHING is 1: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cacheStream = open ( cacheFile, 'r') <NEW_LINE> data = pickle.load( cacheStream ) <NEW_LINE> cacheStream.close () <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> logger.error( 'failed to load cacheFile: \'%s\'' % cacheFile ) <NEW_LINE> noCache = 1 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> noCache = 1 <NEW_LINE> <DEDENT> if len(data) is 0: <NEW_LINE> <INDENT> noCache = 1 <NEW_LINE> <DEDENT> if noCache: <NEW_LINE> <INDENT> data = buildData(userName) <NEW_LINE> if ENABLECACHING is 1: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cacheStream = open ( cacheFile, 'w') <NEW_LINE> pickle.dump( data, cacheStream ) <NEW_LINE> cacheStream.close() <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> logger.error( 'failed to save cacheFile: \'%s\'' % cacheFile ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> returnStatus = "200 OK" <NEW_LINE> cnt = buildHtml(data) <NEW_LINE> responseHeaders = [ ( "Content-Type", CONTENT_HTML ), ( "Content-Length", str( len( cnt ) ) ) ] <NEW_LINE> return responseHeaders, returnStatus, cnt
What this module does?
625941b7ff9c53063f47c02a
def p_comp_exp(p): <NEW_LINE> <INDENT> if len(p) == 4: <NEW_LINE> <INDENT> p[0] = ga.c_comp_exp__op_arithmetic(p[1], p[2], p[3]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p[0] = ga.c_comp_exp(p[1])
comp_exp : comp_exp comp_op op_arithmetic | op_arithmetic
625941b7c4546d3d9de7285d
def forward(self,example): <NEW_LINE> <INDENT> pillar_x = example[0] <NEW_LINE> pillar_y = example[1] <NEW_LINE> pillar_z = example[2] <NEW_LINE> pillar_i = example[3] <NEW_LINE> num_points = example[4] <NEW_LINE> x_sub_shaped = example[5] <NEW_LINE> y_sub_shaped = example[6] <NEW_LINE> mask = example[7] <NEW_LINE> voxel_features = self.pfn(pillar_x, pillar_y, pillar_z, pillar_i, num_points, x_sub_shaped, y_sub_shaped, mask) <NEW_LINE> voxel_features = voxel_features.squeeze() <NEW_LINE> voxel_features = voxel_features.permute(1, 0) <NEW_LINE> coors = example[8] <NEW_LINE> spatial_features = self.mfe(voxel_features, coors,example[9]) <NEW_LINE> preds_dict = self.rpn(spatial_features) <NEW_LINE> return self.predict(example, preds_dict)
nutonomy/second.pytorch
625941b7baa26c4b54cb0f50
def _read_from_memory(self): <NEW_LINE> <INDENT> return self.bin_data
Override for getting buffer for converter
625941b7d8ef3951e324336a
def add_task(self, task_name, task_status, task_project, task_description): <NEW_LINE> <INDENT> tasks_table = TasksTable(task_name, task_project) <NEW_LINE> self.database.create(tasks_table) <NEW_LINE> self.database.commit() <NEW_LINE> task_id = self.get_task_id_by_name(task_name) <NEW_LINE> self.set_start_date(task_id) <NEW_LINE> self.set_status(task_id, IN_PROGRESS) <NEW_LINE> self.set_task_assignee(task_id) <NEW_LINE> self.set_task_description(task_id, task_description)
Create new task :param task_name: :param task_status: :param task_project: :param task_description: :return:
625941b767a9b606de4a7cea
def cross_reference(self, model): <NEW_LINE> <INDENT> msg = ' which is required by SPLINE2 sid=%s' % self.eid <NEW_LINE> self.caero = model.CAero(self.caero, msg=msg) <NEW_LINE> self.setg = model.Set(self.setg, msg=msg)
Cross links the card :param self: the SPLINE2 object pointer :param model: the BDF object :type model: BDF()
625941b726238365f5f0ec96
def input(self, args, key): <NEW_LINE> <INDENT> if self._container.process_user_input(key): <NEW_LINE> <INDENT> return InputState.PROCESSED <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if key == Prompt.CONTINUE: <NEW_LINE> <INDENT> for spoke in self._spokes.values(): <NEW_LINE> <INDENT> if not spoke.completed and spoke.mandatory: <NEW_LINE> <INDENT> print(_("Please complete all spokes before continuing")) <NEW_LINE> return InputState.DISCARDED <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif key == Prompt.HELP: <NEW_LINE> <INDENT> if self.has_help: <NEW_LINE> <INDENT> help_path = get_help_path(self.helpFile, True) <NEW_LINE> ScreenHandler.push_screen_modal(HelpScreen(help_path)) <NEW_LINE> return InputState.PROCESSED_AND_REDRAW <NEW_LINE> <DEDENT> <DEDENT> return key
Handle user input. Numbers are used to show a spoke, the rest is passed to the higher level for processing.
625941b7627d3e7fe0d68c7b
def finish_parse(self): <NEW_LINE> <INDENT> pass
Finalize parse details.
625941b738b623060ff0ac1c
def allocateSkillPoints(self, skill_points): <NEW_LINE> <INDENT> self.points = int(math.floor(skill_points)) <NEW_LINE> self.chooseSkills()
Allocate a number of points to be divided in some way between the skills
625941b7e1aae11d1e749ae0
def train(X,vectorizer, true_k=10,minibatch=False,showLable =False): <NEW_LINE> <INDENT> if minibatch: <NEW_LINE> <INDENT> km = MiniBatchKMeans(n_clusters=true_k,init='k-means++',n_init=1, init_size=1000,batch_size=1000,verbose=False) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> km = KMeans(n_clusters=true_k,init='k-means++',max_iter=300,n_init=1,verbose=False) <NEW_LINE> <DEDENT> km.fit(X) <NEW_LINE> if showLable: <NEW_LINE> <INDENT> print("Top terms per cluster:") <NEW_LINE> order_centroids= km.cluster_centers_.argsort()[:,::-1] <NEW_LINE> terms =vectorizer.get_feature_names() <NEW_LINE> print(vectorizer.get_stop_words()) <NEW_LINE> for i in range(true_k): <NEW_LINE> <INDENT> print("Cluster %d:" % i ,end='') <NEW_LINE> for ind in order_centroids[i,:10]: <NEW_LINE> <INDENT> print(' %s' % terms[ind],end='') <NEW_LINE> <DEDENT> print() <NEW_LINE> <DEDENT> <DEDENT> result = list(km.predict(X)) <NEW_LINE> print('Cluster distribution:') <NEW_LINE> print(dict([(i,result.count(i)) for i in result])) <NEW_LINE> return -km.score(X)
输入:数据集的向量,多少簇,输出:K族分类
625941b7711fe17d825421a9
def add_vel_to_base(self, frame_name, twist_base, vel): <NEW_LINE> <INDENT> if not frame_name in self.transforms: <NEW_LINE> <INDENT> raise SpeedLimitException("no transform exists for frame '{}'".format(frame_name)) <NEW_LINE> <DEDENT> ang_base = self.vect(twist_base.angular) <NEW_LINE> lin_base = self.vect(twist_base.linear) <NEW_LINE> trp = self.no_z(self.vect(self.transforms[frame_name].translation)) <NEW_LINE> rot_part = numpy.cross(trp, vel) / numpy.linalg.norm(trp) / numpy.linalg.norm(trp) <NEW_LINE> lin_part = vel - numpy.cross(rot_part, trp) <NEW_LINE> ang_base[2] = self.clamp(ang_base[2] + rot_part[2], 0.0, ang_base[2]) <NEW_LINE> lin_base[0] += lin_part[0] <NEW_LINE> out_twist = GM.Twist() <NEW_LINE> out_twist.linear = GM.Vector3(*lin_base) <NEW_LINE> out_twist.angular = GM.Vector3(*ang_base) <NEW_LINE> return out_twist
This method merges a velocity applied to one of the sensors into the mobile base velocity. It uses tricks to provide an acceptable behavior.
625941b763f4b57ef0000f4f
def _pre_execute(self): <NEW_LINE> <INDENT> LOG.info("Initializing " + self.get_display_name()) <NEW_LINE> if not self.compute_model: <NEW_LINE> <INDENT> raise exception.ClusterStateNotDefined() <NEW_LINE> <DEDENT> if self.compute_model.stale: <NEW_LINE> <INDENT> raise exception.ClusterStateStale() <NEW_LINE> <DEDENT> LOG.debug(self.compute_model.to_string())
Base Pre-execution phase This will perform basic pre execution operations most strategies should perform.
625941b71d351010ab85594b
def load_df(file, columns): <NEW_LINE> <INDENT> df = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load(file) <NEW_LINE> sel_df = df.select(columns) <NEW_LINE> return sel_df
load csv file as dataframe Read the csv file with it's headers, automatically generate schema by databricks package. Args: file: string. Input filename with its path column: list. the column name want to be select
625941b7a4f1c619b28afe6f
def isclose(bb1, bb2): <NEW_LINE> <INDENT> dist = distance(bb1,bb2) <NEW_LINE> dim2 = characteristic_dimension(bb2) <NEW_LINE> return dist < 2 * dim2
Returns True if the first object is close to the second. More precisely, returns True if the first bounding box is within a radius R (R = 2 X second bounding box dimension) of the second bounding box. Note that in general, isclose(bb1, bb2) != isclose(bb2, bb1)
625941b7cdde0d52a9e52e5c
def clear(self): <NEW_LINE> <INDENT> super(OpenImagesChallengeEvaluator, self).clear() <NEW_LINE> self._evaluatable_labels.clear()
Clears stored data.
625941b73eb6a72ae02ec307
def transform(self, T): <NEW_LINE> <INDENT> T = assertion.ensure_tmatrix(T, dim=self.dim) <NEW_LINE> tcoords = transformation.transform(self.flattened, T) <NEW_LINE> return tcoords.reshape(self.shape).view(Coords)
Transform coordinates. Parameters ---------- T : array_like(Number, shape=(self.dim+1, self.dim+1)) Transformation matrix to apply. Returns ------- Coords(shape=self.shape) transformed coords. Examples -------- Transform structured coordinates. >>> coords = Coords( ... [[(2, 3), (2, 4), (3, 2)], [(0, 0), (3, 5), (9, 4)]]) >>> print_rounded(coords) [[[2 3] [2 4] [3 2]] <BLANKLINE> [[0 0] [3 5] [9 4]]] >>> T = transformation.matrix(t=[10, 20], s=[0.5, 1]) >>> tcoords = coords.transform(T) >>> print_rounded(tcoords) [[[ 11. 23. ] [ 11. 24. ] [ 11.5 22. ]] <BLANKLINE> [[ 10. 20. ] [ 11.5 25. ] [ 14.5 24. ]]]
625941b7a05bb46b383ec65a
def __neq__(self, other): <NEW_LINE> <INDENT> if isinstance(other, self.__class__): <NEW_LINE> <INDENT> return not self.__eq__(other) <NEW_LINE> <DEDENT> return NotImplemented
Test the non-equality of SingleScatteringData.
625941b791f36d47f21ac323
def restricted_loads(s): <NEW_LINE> <INDENT> return RestrictedUnpickler(io.BytesIO(s)).load()
Helper function analogous to pickle.loads()
625941b77047854f462a123a
def on_deploy(self, *args): <NEW_LINE> <INDENT> self.ui_creator.kivy_console.unbind(on_command_list_done=self.on_deploy) <NEW_LINE> self.project_watcher.resume_watching(delay=0) <NEW_LINE> self.profiler.dispatch('on_message', 'Installed on device', 5) <NEW_LINE> self.profiler.dispatch('on_deploy')
on_build event handler
625941b78e71fb1e9831d5da
def main(fileInput, everyStr, homology, kind): <NEW_LINE> <INDENT> allMotifs = readFasta(fileInput, everyStr=everyStr) <NEW_LINE> allMotifs = removeEqualentSeq(allMotifs, homology=homology) <NEW_LINE> results = [] <NEW_LINE> for n in range(len(allMotifs)): <NEW_LINE> <INDENT> subMotifs = list(allMotifs) <NEW_LINE> subMotifs.remove(allMotifs[n]) <NEW_LINE> background = backgroundFreq(subMotifs, kind=kind) <NEW_LINE> PCM = makePCM(subMotifs, kind=kind) <NEW_LINE> PWM = makePWMfromPCM(PCM, background=background, kind=kind) <NEW_LINE> treshold = score(allMotifs[n], PWM, kind=kind) <NEW_LINE> minS = minScore(PWM) <NEW_LINE> maxS = maxScore(PWM) <NEW_LINE> normTreshold = toNorm(treshold, minS, maxS) <NEW_LINE> FP = int() <NEW_LINE> totalLen = int() <NEW_LINE> while FP <= 150: <NEW_LINE> <INDENT> randomMotifs = [] <NEW_LINE> for i in subMotifs: <NEW_LINE> <INDENT> randomMotifs += randomSeq(i, 20) <NEW_LINE> <DEDENT> totalLen += len(randomMotifs) <NEW_LINE> FP += scanSequences(randomMotifs, PWM, treshold, kind=kind) <NEW_LINE> if totalLen >= 150000: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> if FP == 0: <NEW_LINE> <INDENT> FP = 1.0/(2*totalLen) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> FP = FP/totalLen <NEW_LINE> <DEDENT> results.append([normTreshold, FP]) <NEW_LINE> <DEDENT> results.sort(key=lambda x: x[1]) <NEW_LINE> results = results[::-1] <NEW_LINE> for i in range(len(results)): <NEW_LINE> <INDENT> results[i].append((i + 1) / len(allMotifs)) <NEW_LINE> <DEDENT> return(results)
Основная функция
625941b7460517430c393fbc
def compute_ratio(background, measured): <NEW_LINE> <INDENT> both = np.logical_and(background > 0, measured > 0) <NEW_LINE> background = background.astype(np.float32, copy=False) <NEW_LINE> measured = measured.astype(np.float32, copy=False) <NEW_LINE> ratio = background[both]/measured[both] <NEW_LINE> lower, upper = np.percentile(ratio, [5, 95]) <NEW_LINE> return np.mean(ratio[np.logical_and(ratio >= lower, ratio <= upper)])
Compute the mean ratio of non extrem values between `background` and `measured` where both occured.
625941b71f5feb6acb0c4982
def update_when_older(self, days): <NEW_LINE> <INDENT> last_cache = self._get_last_cachefile_modification() <NEW_LINE> if last_cache is None: <NEW_LINE> <INDENT> return self.update() <NEW_LINE> <DEDENT> time_to_update = last_cache + timedelta(days=days) <NEW_LINE> if datetime.now() >= time_to_update: <NEW_LINE> <INDENT> return self.update() <NEW_LINE> <DEDENT> return True
Update TLD list cache file if the list is older than number of days given in parameter `days` or if does not exist. :param int days: number of days from last change :return: True if update was successful, False otherwise :rtype: bool
625941b796565a6dacc8f502
def test_entity_asset_status_by_id_get(self): <NEW_LINE> <INDENT> pass
Test case for entity_asset_status_by_id_get EntityAssetStatusById_GET # noqa: E501
625941b7090684286d50eb0d
def use(self, middleware, path = None): <NEW_LINE> <INDENT> print("[Router::use] Adding middleware", middleware) <NEW_LINE> self.middleware.append(middleware) <NEW_LINE> return self
Use the middleware (a callable with parameters res, req, next) upon requests match the provided path. A None path matches every request.
625941b7ad47b63b2c509db7
def commit(self): <NEW_LINE> <INDENT> pass
commit transaction
625941b78c0ade5d55d3e7ed
def __len__(self): <NEW_LINE> <INDENT> return len(self.verses)
Returns the length of the parallel text in number of verses.
625941b7ac7a0e7691ed3f07
def fit(self, S, A, R): <NEW_LINE> <INDENT> action_onehot = np_utils.to_categorical(A, num_classes=self.output_dim) <NEW_LINE> discounted_return = self.compute_discounted_R(R) <NEW_LINE> assert S.shape[1] == self.input_dim, "{} != {}".format(S.shape[1], self.input_dim) <NEW_LINE> assert action_onehot.shape[0] == S.shape[0], "{} != {}".format(action_onehot.shape[0], S.shape[0]) <NEW_LINE> assert action_onehot.shape[1] == self.output_dim, "{} != {}".format(action_onehot.shape[1], self.output_dim) <NEW_LINE> assert len(discounted_return.shape) == 1, "{} != 1".format(len(discounted_return.shape)) <NEW_LINE> self.train_fn([S, action_onehot, discounted_return])
Train a network Args: S (2-D Array): `state` array of shape (n_samples, state_dimension) A (1-D Array): `action` array of shape (n_samples,) It's simply a list of int that stores which actions the agent chose R (1-D Array): `reward` array of shape (n_samples,) A reward is given after each action.
625941b7d99f1b3c44c673c5
def get_name(self): <NEW_LINE> <INDENT> raise NotImplementedError()
:returns: the string name of the filesystem
625941b77047854f462a123b
def step(self, alive_components, thermals): <NEW_LINE> <INDENT> return self._update_agings(alive_components, thermals)
Increment a single timestep regarding the aging process of the simulation :param alive_components: 2D numpy boolean array indicating the position of alive components. :param thermals: 2D numpy float array with the current local thermals at this iteration. :return: Boolean - indicating if any new failures have occurred (which should be handled).
625941b70fa83653e4656dea
def convert_to_dot(self, show_probabilities=True): <NEW_LINE> <INDENT> s = 'digraph DT{\n' <NEW_LINE> s += 'node[fontname="Arial"];\n' <NEW_LINE> s += self._convert_node_to_dot(show_probabilities=show_probabilities) <NEW_LINE> s += '}' <NEW_LINE> return s
Converts a decision tree object to DOT code **Params** ---------- - `show_probabilities` (boolean) - if this is `True`, probabilities will be displayed in the leafs too **Returns** ----------- a string with the dot code for the decision tree
625941b7d10714528d5ffb0d
def Encrypt(self): <NEW_LINE> <INDENT> pass
Encrypt(self: FileInfo) Encrypts a file so that only the account used to encrypt the file can decrypt it.
625941b721a7993f00bc7b17
def test_empty_constructor(self): <NEW_LINE> <INDENT> self.assertEqual(self.f(), 0) <NEW_LINE> self.assertEqual(self.f.size(), 1) <NEW_LINE> self.assertEqual(self.f.emit(), b'\x00')
The no argument constructor is equivalent to 0.
625941b78e7ae83300e4adf9
def set_assertions(self, list_of_z3_assertions: List[BoolRef]) -> None: <NEW_LINE> <INDENT> if self.optional: <NEW_LINE> <INDENT> self.scheduled = Bool("%s_scheduled" % self.name) <NEW_LINE> point_in_past = -self.task_number <NEW_LINE> not_scheduled_assertion = And( self.start == point_in_past, self.end == point_in_past, self.duration == 0, ) <NEW_LINE> self.append_z3_assertion( If(self.scheduled, And(list_of_z3_assertions), not_scheduled_assertion) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.append_z3_assertion(And(list_of_z3_assertions))
Take a list of constraint to satisfy. Create two cases: if the task is scheduled, nothing is done; if the task is optional, move task to the past
625941b7eab8aa0e5d26d98c
def gen_blocks(self, count=None): <NEW_LINE> <INDENT> if not count: <NEW_LINE> <INDENT> count = self._num_blk <NEW_LINE> <DEDENT> for x in range(0, count * 32, 32): <NEW_LINE> <INDENT> buf = self._read(x) <NEW_LINE> yield x, buf
generator that returns consecutive blocks of station memory
625941b75510c4643540f226
def reactivate_post_author(self, request, queryset): <NEW_LINE> <INDENT> for denuncia in queryset: <NEW_LINE> <INDENT> denuncia.post.activate_post_author()
Turn Authors of selected Posts to an active state
625941b7a8370b77170526ce
def rSNP(self): <NEW_LINE> <INDENT> return self.rChrom().rSNP()
returns a random 'SNP' from the genome
625941b7656771135c3eb6a0
@must_be_logged_in <NEW_LINE> def dataverse_user_config_get(auth, **kwargs): <NEW_LINE> <INDENT> user_addon = auth.user.get_addon('dataverse') <NEW_LINE> user_has_auth = False <NEW_LINE> if user_addon: <NEW_LINE> <INDENT> user_has_auth = user_addon.has_auth <NEW_LINE> <DEDENT> return { 'result': { 'userHasAuth': user_has_auth, 'urls': { 'create': api_url_for('dataverse_add_user_account'), 'accounts': api_url_for('dataverse_account_list'), }, 'hosts': DEFAULT_HOSTS, }, }, http_status.HTTP_200_OK
View for getting a JSON representation of the logged-in user's Dataverse user settings.
625941b71b99ca400220a8de
def pelllucas(): <NEW_LINE> <INDENT> pelllucases = [2,2] <NEW_LINE> while pelllucases[-1] < 10000: <NEW_LINE> <INDENT> pelllucases.append(pelllucases[-1]*2+pelllucases[-2]) <NEW_LINE> <DEDENT> del pelllucases[-1] <NEW_LINE> return set(pelllucases)
The pell lucas numbers are recursively defined numbers. the series starts with 2 and 2. Each subsequent number is the sum of twice the previous number and the number before that: 2, 2, 2*2+2=6, 2*6+2=14, 2*14+6=34, and so on.
625941b723849d37ff7b2ec0
def generateData(self): <NEW_LINE> <INDENT> self.cohort.aggregateDataFromSQL(verbose=True) <NEW_LINE> dest = self.createDirectory(base=REPORTDATA) <NEW_LINE> self.cohort.saveDataToDisk(destination=dest) <NEW_LINE> self.freeData()
Generates and saves the cohort data. Calls the :meth:`.aggregateDataFromSQL` method from the :class:`.Cohort` instance passed as argument. The collected data matrices are stored in the :attr:`.Cohort.data` attribute. The data matrices are saved as txt files in the data destination directory.
625941b74d74a7450ccd3ff0
def main(): <NEW_LINE> <INDENT> base_path = 'D:/Transfer/Research/' <NEW_LINE> raw_data_paths = ['Data', 'Data_Ampt', 'Data_Sim'] <NEW_LINE> mix_data_paths = ['Data_Mix', 'Data_Ampt_Mix', 'Data_Sim_Mix'] <NEW_LINE> total_proton_shift = -1 <NEW_LINE> fix = True <NEW_LINE> fix_time = 1640293741 <NEW_LINE> n_fix_files = 0 <NEW_LINE> for data_type, paths in zip(['raw', 'mix'], [raw_data_paths, mix_data_paths]): <NEW_LINE> <INDENT> for data_path in paths: <NEW_LINE> <INDENT> data_full_path = f'{base_path}{data_path}/' <NEW_LINE> for set_dir in os.listdir(data_full_path): <NEW_LINE> <INDENT> if '_resample' in set_dir: <NEW_LINE> <INDENT> set_path = f'{data_full_path}{set_dir}/' <NEW_LINE> print(set_path) <NEW_LINE> for subset_dir in os.listdir(set_path): <NEW_LINE> <INDENT> subset_path = f'{set_path}{subset_dir}/' <NEW_LINE> for energy_dir in os.listdir(subset_path): <NEW_LINE> <INDENT> energy_path = f'{subset_path}{energy_dir}/' <NEW_LINE> for file_name in os.listdir(energy_path): <NEW_LINE> <INDENT> if 'ratios_divisions_' in file_name: <NEW_LINE> <INDENT> file_path = f'{energy_path}{file_name}' <NEW_LINE> if os.path.getmtime(file_path) < fix_time: <NEW_LINE> <INDENT> n_fix_files += 1 <NEW_LINE> if fix: <NEW_LINE> <INDENT> fix_file(file_path, data_type, total_proton_shift) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> print(f'Number of fix files: {n_fix_files}') <NEW_LINE> print('donzo')
Due to bug all total protons were shifted by 1 (except default raw dists). Go through resample files and correct. :return:
625941b7b830903b967e9745
def set_children(self, child_nodes): <NEW_LINE> <INDENT> if not self.children: <NEW_LINE> <INDENT> self.children = [] <NEW_LINE> <DEDENT> for child in child_nodes: <NEW_LINE> <INDENT> self.children.append(child)
Set the children of this tree node
625941b73617ad0b5ed67d2d
def test_post_with_interdiff(self): <NEW_LINE> <INDENT> comment_text = "Test diff comment" <NEW_LINE> review_request, filediff = self._create_diff_review_request() <NEW_LINE> interdiffset = self.create_diffset(review_request) <NEW_LINE> interfilediff = self.create_filediff(interdiffset) <NEW_LINE> review = self.create_review(review_request, user=self.user) <NEW_LINE> rsp = self.api_post( get_review_diff_comment_list_url(review), { 'filediff_id': filediff.pk, 'interfilediff_id': interfilediff.pk, 'issue_opened': True, 'first_line': 1, 'num_lines': 5, 'text': comment_text, }, expected_mimetype=review_diff_comment_item_mimetype) <NEW_LINE> self.assertEqual(rsp['stat'], 'ok') <NEW_LINE> self.assertIn('diff_comment', rsp) <NEW_LINE> self.assertEqual(rsp['diff_comment']['text'], comment_text) <NEW_LINE> comment = Comment.objects.get(pk=rsp['diff_comment']['id']) <NEW_LINE> self.assertEqual(comment.filediff_id, filediff.pk) <NEW_LINE> self.assertEqual(comment.interfilediff_id, interfilediff.pk)
Testing the POST review-requests/<id>/reviews/<id>/diff-comments/ API with interdiff
625941b763b5f9789fde6f13
def __init__(self, board_area, markers, stability_level, reporter_id, callback_function): <NEW_LINE> <INDENT> super(FindMarkersReporter, self).__init__(reporter_id, callback_function) <NEW_LINE> self.board_area = board_area <NEW_LINE> self.markers = markers <NEW_LINE> self.stability_level = stability_level
:param board_area: Board area :param markers: Markers to search for :param stability_level Minimum board area stability level before searching for markers
625941b7a79ad161976cbf73
def assign_crew(starship, crew): <NEW_LINE> <INDENT> for key,value in crew.items(): <NEW_LINE> <INDENT> starship[key] = value <NEW_LINE> <DEDENT> return starship
This function assigns crew members to a starship. Parameters: starship (dict): source entity. crew (dict): source entity. Returns: updated starship with one or more new crew member key-value pairs added to the caller.
625941b70a366e3fb873e645
def __init__(self, instance, prop_type, networked): <NEW_LINE> <INDENT> self._instance = instance <NEW_LINE> self._prop_type = prop_type <NEW_LINE> self._networked = networked
Store the base attributes on instantiation.
625941b755399d3f055884e1
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pythonDjango.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
Run administrative tasks.
625941b74d74a7450ccd3ff1
def BindCallbacks(self): <NEW_LINE> <INDENT> callbacks = [ (wx.EVT_CLOSE, [ (ID_MAIN_FRAME, self.OnClose) ]), (wx.EVT_MENU, [ (wx.ID_EXIT, self.OnExit), (wx.ID_OPEN, self.OnOpen), (wx.ID_SAVE, self.OnSave), (wx.ID_ABOUT, self.OnAbout), ]), (wx.EVT_BUTTON, [ (ID_EXPOSURE_ADD, self.OnAddExposures), (ID_EXPOSURE_DEL, self.OnDeleteSelected), ]), (wx.EVT_SPINCTRL, [ (ID_INDIVIDUAL_MIN, self.OnIndividualMin), (ID_INDIVIDUAL_MAX, self.OnIndividualMax), (ID_INDIVIDUAL_EXP, self.OnIndividualExp), (ID_CIRCLE_RADIUS, self.OnCircleRadius), ]), (wx.EVT_RADIOBOX, [ (ID_SELECT_MODE, self.OnSelectionMode), ]), (EVT_COORDS, [ (ID_EXPOSURE_VIEW, self.OnImageCoords), ]), ] <NEW_LINE> for event, bindings in callbacks: <NEW_LINE> <INDENT> for id, callback in bindings: <NEW_LINE> <INDENT> self.view.Bind(event, callback, id=id) <NEW_LINE> <DEDENT> <DEDENT> self.view.image_view.Bind(wx.EVT_KEY_DOWN, self.OnImageKeyDown)
Connect up event handlers
625941b7596a8972360898f8
def lnprob(self, tipmag, alphargb, alphaother, fracother): <NEW_LINE> <INDENT> return self._lnprob_func(self.magdata, tipmag, alphargb, alphaother, fracother)
This does *not* sum up the lnprobs - that goes in __call__. Instead it gives the lnprob per data point
625941b7462c4b4f79d1d4fe
def show_kernels_dense(weights, name=None): <NEW_LINE> <INDENT> fig, axs = plt.subplots(1, 1, figsize=FIG_SIZE) <NEW_LINE> axs.imshow(weights, cmap='gray') <NEW_LINE> axs.axis('off') <NEW_LINE> axs.set_yticks([]) <NEW_LINE> axs.set_xticks([]) <NEW_LINE> save_figure(fig, name)
2 dimensional images of dense weights.
625941b7e5267d203edcdacf
def test_long_line_include_rule(self): <NEW_LINE> <INDENT> text = ( 'x' * 90 + '\n' + '\tthis line has a tab' ) <NEW_LINE> m = linkwort.lint.MarkdownLint(include_rules=['hard-tabs']) <NEW_LINE> v = m.parse(text) <NEW_LINE> assert len(v) == 1 <NEW_LINE> assert v[0].ruleid == 'hard-tabs'
test that when we include a rule, only that rule is checked
625941b77cff6e4e811177b4
def get_scrambled_order(total_lines): <NEW_LINE> <INDENT> scrambled_order = [[i] for i in range(total_lines)] <NEW_LINE> random.shuffle(scrambled_order) <NEW_LINE> return [item[0] for item in scrambled_order]
(int) --> array Return an array containing the numbers 0 to total_lines - 1 in random order
625941b7287bf620b61d389d
@app.route('/restaurant/<int:restaurant_id>/<int:menu_id>/edit/', methods=['GET', 'POST']) <NEW_LINE> def editMenuItem(restaurant_id, menu_id): <NEW_LINE> <INDENT> restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one() <NEW_LINE> menu_item = session.query(MenuItem).filter_by(id = menu_id).one() <NEW_LINE> if request.method == 'POST': <NEW_LINE> <INDENT> data = ({'name': request.form['name'], 'description': request.form['description'], 'price': request.form['price']} ) <NEW_LINE> session.query(MenuItem).filter_by(id=menu_id).update(data) <NEW_LINE> session.commit() <NEW_LINE> flash('Congratulations! Menu Item Successfully Edited!') <NEW_LINE> return redirect(url_for('menu', restaurant_id = restaurant.id)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return render_template('edit_menu_item.html', menu_item = menu_item, restaurant = restaurant)
Form to edit a menu item, and logic to POST updates to db
625941b7de87d2750b85fbbc
def loglike(self, params): <NEW_LINE> <INDENT> r <NEW_LINE> return 0.5*(-np.sum((self.y - self.X@params)**2) - self.m*self.log2pi)
...
625941b7a4f1c619b28afe70
def attention(decoder_state, coverage=None): <NEW_LINE> <INDENT> with variable_scope.variable_scope("Attention"): <NEW_LINE> <INDENT> decoder_features = linear(decoder_state, attention_vec_size, True) <NEW_LINE> decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) <NEW_LINE> def masked_attention(e): <NEW_LINE> <INDENT> attn_dist = nn_ops.softmax(e) <NEW_LINE> attn_dist *= enc_padding_mask <NEW_LINE> masked_sums = tf.reduce_sum(attn_dist, axis=1) <NEW_LINE> return attn_dist / tf.reshape(masked_sums, [-1, 1]) <NEW_LINE> <DEDENT> if use_coverage and coverage is not None: <NEW_LINE> <INDENT> coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], "SAME") <NEW_LINE> e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) <NEW_LINE> attn_dist = masked_attention(e) <NEW_LINE> coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) <NEW_LINE> attn_dist = masked_attention(e) <NEW_LINE> if use_coverage: <NEW_LINE> <INDENT> coverage = tf.expand_dims(tf.expand_dims(attn_dist, 2), 2) <NEW_LINE> <DEDENT> <DEDENT> context_vector = math_ops.reduce_sum( array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) * encoder_states, [1, 2]) <NEW_LINE> context_vector = array_ops.reshape(context_vector, [-1, attn_size]) <NEW_LINE> <DEDENT> return context_vector, attn_dist, coverage
Calculate the context vector and attention distribution from the decoder state. Args: decoder_state: state of the decoder coverage: Optional. Previous timestep's coverage vector, shape (batch_size, attn_len, 1, 1). Returns: context_vector: weighted sum of encoder_states attn_dist: attention distribution coverage: new coverage vector. shape (batch_size, attn_len, 1, 1)
625941b792d797404e303fb8
@commandWrap <NEW_LINE> def AppendToHairCache(*args, **kwargs): <NEW_LINE> <INDENT> return cmds.AppendToHairCache(*args, **kwargs)
:rtype: list|str|DagNode|AttrObject|ArrayAttrObject|Components1Base
625941b7d53ae8145f87a0a5
def get_last_sync(self, device_id): <NEW_LINE> <INDENT> self.log.debug('get-last-sync', device_id=device_id) <NEW_LINE> try: <NEW_LINE> <INDENT> device_proxy = self._device_proxy(device_id) <NEW_LINE> data = device_proxy.get(depth=0) <NEW_LINE> return self._string_to_time(data.last_sync_time) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.log.exception('get-last-sync-exception', e=e) <NEW_LINE> raise
Get the Last Sync Time saved to the database for a device :param device_id: (str) ONU Device ID :return: (int) The Value or None if not found
625941b7d99f1b3c44c673c6
def get_hosts(self): <NEW_LINE> <INDENT> return [x for x in self.node if self.node[x]['type'] == 'host']
Returns all the nodes that are hosts
625941b724f1403a92600998
def __repr__(self) -> str: <NEW_LINE> <INDENT> return self.to_string("%s:%s" % (self.server, self.port))
String representation
625941b7d486a94d0b98df7d
def Vector_generate(prop): <NEW_LINE> <INDENT> return [[Vector(v) for v in obj] for obj in prop]
return a list of Vector() objects from a standard Sverchok data
625941b70383005118ecf413
def energy(self, mass = 100): <NEW_LINE> <INDENT> energy = [self.proteins, self.fats, self.carbs, self.calories] <NEW_LINE> energy = map(lambda x: x / 100 * mass, energy) <NEW_LINE> return energy
Return list with proteins, fats, carbs and calories in amount(gr) of product
625941b74527f215b584c28a
def twoSum(self, nums, target): <NEW_LINE> <INDENT> hash_map = {} <NEW_LINE> for idx, num in enumerate(nums): <NEW_LINE> <INDENT> if target - num in hash_map.keys(): <NEW_LINE> <INDENT> return [hash_map[target - num], idx] <NEW_LINE> <DEDENT> hash_map[num] = idx
Parameters ---------- nums: List[int] target: int Returns ------- List[int] ⇨ return a list including the numbers' indexes which the total is target.
625941b7d4950a0f3b08c189
def __call__(self, trainer): <NEW_LINE> <INDENT> reporter = reporter_module.Reporter() <NEW_LINE> if hasattr(self, 'name'): <NEW_LINE> <INDENT> prefix = self.name + '/' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> prefix = '' <NEW_LINE> <DEDENT> for name, target in six.iteritems(self._targets): <NEW_LINE> <INDENT> reporter.add_observer(prefix + name, target) <NEW_LINE> reporter.add_observers(prefix + name, target.namedlinks(skipself=True)) <NEW_LINE> <DEDENT> with reporter: <NEW_LINE> <INDENT> self.patch_image_dir = os.path.join(trainer.out, self.layer_name) <NEW_LINE> if not os.path.exists(self.patch_image_dir): <NEW_LINE> <INDENT> os.makedirs(self.patch_image_dir) <NEW_LINE> <DEDENT> result, locs, bounds = self.evaluate() <NEW_LINE> outputdir = os.path.join(trainer.out, 'features') <NEW_LINE> if not os.path.exists(outputdir): <NEW_LINE> <INDENT> os.makedirs(outputdir) <NEW_LINE> <DEDENT> if locs: <NEW_LINE> <INDENT> self.save_tuple_list(os.path.join(outputdir, 'maxloc_' + self.layer_name + '.txt'), locs) <NEW_LINE> <DEDENT> if bounds: <NEW_LINE> <INDENT> self.save_tuple_list(os.path.join(outputdir, 'maxbounds_' + self.layer_name + '.txt'), bounds) <NEW_LINE> <DEDENT> <DEDENT> reporter_module.report(result) <NEW_LINE> return result
override method of extensions.Evaluator.
625941b7d8ef3951e324336c
def _get_connection(self): <NEW_LINE> <INDENT> if not hasattr(self, 'ldap'): <NEW_LINE> <INDENT> self.ldap = ldap <NEW_LINE> <DEDENT> if not hasattr(self, '_connection'): <NEW_LINE> <INDENT> self._connection = None <NEW_LINE> <DEDENT> if self._connection is None: <NEW_LINE> <INDENT> self._connection = self.ldap.initialize(settings.ldap_settings.AUTH_LDAP_SERVER_URI) <NEW_LINE> for opt, value in settings.ldap_settings.AUTH_LDAP_CONNECTION_OPTIONS.iteritems(): <NEW_LINE> <INDENT> self._connection.set_option(opt, value) <NEW_LINE> <DEDENT> if settings.ldap_settings.AUTH_LDAP_START_TLS: <NEW_LINE> <INDENT> self._connection.start_tls_s() <NEW_LINE> <DEDENT> <DEDENT> return self._connection
Returns our cached LDAPObject, which may or may not be bound.
625941b7b5575c28eb68de2c
def random_erasing(self, probability, rectangle_area): <NEW_LINE> <INDENT> if not 0 < probability <= 1: <NEW_LINE> <INDENT> raise ValueError(Pipeline._probability_error_text) <NEW_LINE> <DEDENT> elif not 0.1 < rectangle_area <= 1: <NEW_LINE> <INDENT> raise ValueError("The rectangle_area must be between 0.1 and 1.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.add_operation(RandomErasing(probability=probability, rectangle_area=rectangle_area))
Work in progress. This operation performs a Random Erasing operation, as described in `https://arxiv.org/abs/1708.04896 <https://arxiv.org/abs/1708.04896>`_ by Zhong et al. Its purpose is to make models robust to occlusion, by randomly replacing rectangular regions with random pixel values. For greyscale images the random pixels values will also be greyscale, and for RGB images the random pixels values will be in RGB. This operation is subject to change, the original work describes several ways of filling the random regions, including a random solid colour or greyscale value. Currently this operations uses the method which yielded the best results in the tests performed by Zhong et al. :param probability: A value between 0 and 1 representing the probability that the operation should be performed. :param rectangle_area: The percentage area of the image to occlude with the random rectangle, between 0.1 and 1. :return: None
625941b776e4537e8c3514a6
def __init__( self, *, enabled: bool, encryption_settings: Optional[List["EncryptionSettingsElement"]] = None, encryption_settings_version: Optional[str] = None, **kwargs ): <NEW_LINE> <INDENT> super(EncryptionSettingsCollection, self).__init__(**kwargs) <NEW_LINE> self.enabled = enabled <NEW_LINE> self.encryption_settings = encryption_settings <NEW_LINE> self.encryption_settings_version = encryption_settings_version
:keyword enabled: Required. Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. :paramtype enabled: bool :keyword encryption_settings: A collection of encryption settings, one for each disk volume. :paramtype encryption_settings: list[~azure.mgmt.compute.v2019_03_01.models.EncryptionSettingsElement] :keyword encryption_settings_version: Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption. :paramtype encryption_settings_version: str
625941b78e7ae83300e4adfa
def IsLicensedForProduct(self): <NEW_LINE> <INDENT> return super(IGPToolExtensionGen, self).IsLicensedForProduct()
Method IGPToolExtensionGen.IsLicensedForProduct OUTPUT IsLicensed : VARIANT_BOOL*
625941b7566aa707497f43a9
def cleanup(self) -> None: <NEW_LINE> <INDENT> if self._homedir is not None and not self._failed: <NEW_LINE> <INDENT> shutil.rmtree(str(self._homedir))
Clean up the temporary home directory for asciidoc.
625941b738b623060ff0ac1e
def add_arguments(self, parser): <NEW_LINE> <INDENT> parser.add_argument('schema_name', nargs='+', type=str)
Run manually in console: python manage.py hotfix_15 "london"
625941b7627d3e7fe0d68c7d
def numberOfBoomerangs3(self, points): <NEW_LINE> <INDENT> if len(points) < 3: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> res = 0 <NEW_LINE> for i in range(len(points)): <NEW_LINE> <INDENT> pDict = {} <NEW_LINE> for j in range(len(points)): <NEW_LINE> <INDENT> if j == i: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> dis = pow(points[i][0] - points[j][0], 2) + pow(points[i][1] - points[j][1], 2) <NEW_LINE> key = str(dis) <NEW_LINE> if key in pDict: <NEW_LINE> <INDENT> pDict[key] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pDict[key] = 1 <NEW_LINE> <DEDENT> <DEDENT> for p in pDict: <NEW_LINE> <INDENT> if pDict[p] > 1: <NEW_LINE> <INDENT> res += pDict[p] * (pDict[p] - 1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return res
:type points: List[List[int]] :rtype: int
625941b72ae34c7f2600cf61
def contains_prop(self, name): <NEW_LINE> <INDENT> new_list = [x for x in self._props if x.get_name() == name] <NEW_LINE> if(len(new_list) == 0): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return new_list[0]
Checks if the block contains the property with the following name. If it does return the property
625941b7dc8b845886cb5363
def _lineToPointList(self, polyline): <NEW_LINE> <INDENT> return [polyline[i:i + self.INTERPOLATION + 1] for i in range(0, len(polyline), self.INTERPOLATION)][:-1]
convert to pointList from polyline. pointList is points list between anchor to anchor The number of elements in pointList is INTERPOLATION + 1 because each anchor overlaps.
625941b715baa723493c3da1
def get( self, banana_id=None ): <NEW_LINE> <INDENT> if not banana_id: <NEW_LINE> <INDENT> self.write( { "error": "Missing argument: id", "errorCode": 400, } ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> conn = self.connect_db() <NEW_LINE> try: <NEW_LINE> <INDENT> with conn.cursor() as cursor: <NEW_LINE> <INDENT> sql = 'SELECT * FROM `bananas` WHERE `id` = %s' <NEW_LINE> qty = cursor.execute(sql, banana_id) <NEW_LINE> query = cursor.fetchone() <NEW_LINE> <DEDENT> if not qty: <NEW_LINE> <INDENT> raise tornado.web.HTTPError(404) <NEW_LINE> <DEDENT> <DEDENT> except tornado.web.HTTPError: <NEW_LINE> <INDENT> self.write( { "error": "This banana doesn't exist", "errorCode": 404 } ) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.write( { "error": "Couldn't query Bananas: " + str(e), "errorCode": 500 } ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.write( { "id": query[0], "color": query[1], "size": query[2], "price": query[3], } ) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> conn.close()
Get a single banana Get a single banana identified by the mandatory banana_id param Handle common errors such as banana_id arg missing, not existing or DB errors. Args: INT banana_id: mandatory to identify which banana to return Returns: A dict describing all data (id, color, size, price) related to the chosen banana. Example (banana_id = 1): { "id": 1, "color": "yellow", "size": 10.5, "price": 4.99, } If banana_id arg is missing, doesn't exist in the DB, or there is an issue to query it from the DB, it will return a dict describing an error instead. Example (doesn't exist in the DB): { "error": "This banana doesn't exist", "errorCode": 404 }
625941b791f36d47f21ac325
def bloated(self,material,spacing=None): <NEW_LINE> <INDENT> if spacing is None: <NEW_LINE> <INDENT> spacing = material_spacing[material] <NEW_LINE> <DEDENT> x = self.x - spacing <NEW_LINE> y = self.y - spacing <NEW_LINE> w = self.w + 2 * spacing <NEW_LINE> h = self.h + 2 * spacing <NEW_LINE> return Rect(x,y,w,h,material)
Returns bloated rectangle (with spacing)
625941b7c432627299f04a73
def dan_acf(x, axis=0, fast=False): <NEW_LINE> <INDENT> x = np.atleast_1d(x) <NEW_LINE> m = [slice(None), ] * len(x.shape) <NEW_LINE> if fast: <NEW_LINE> <INDENT> n = int(2**np.floor(np.log2(x.shape[axis]))) <NEW_LINE> m[axis] = slice(0, n) <NEW_LINE> x = x <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> n = x.shape[axis] <NEW_LINE> <DEDENT> f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis) <NEW_LINE> m[axis] = slice(0, n) <NEW_LINE> acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real <NEW_LINE> m[axis] = 0 <NEW_LINE> return acf / acf[m]
Estimate the autocorrelation function of a time series using the FFT. :param x: The time series. If multidimensional, set the time axis using the ``axis`` keyword argument and the function will be computed for every other axis. :param axis: (optional) The time axis of ``x``. Assumed to be the first axis if not specified. :param fast: (optional) If ``True``, only use the largest ``2^n`` entries for efficiency. (default: False)
625941b7d7e4931a7ee9dd4b
def infer(self, patternNZ, actValueList): <NEW_LINE> <INDENT> if self.steps[0] == 0 or actValueList is None: <NEW_LINE> <INDENT> defaultValue = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> defaultValue = actValueList[0] <NEW_LINE> <DEDENT> actValues = [x if x is not None else defaultValue for x in self._actualValues] <NEW_LINE> retval = {"actualValues": actValues} <NEW_LINE> for nSteps in self.steps: <NEW_LINE> <INDENT> predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps]) <NEW_LINE> retval[nSteps] = predictDist <NEW_LINE> <DEDENT> return retval
Return the inference value from one input sample. The actual learning happens in compute(). :param patternNZ: list of the active indices from the output below :param classification: dict of the classification information: bucketIdx: index of the encoder bucket actValue: actual value going into the encoder :return: dict containing inference results, one entry for each step in self.steps. The key is the number of steps, the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. for example: .. code-block:: python {'actualValues': [0.0, 1.0, 2.0, 3.0] 1 : [0.1, 0.3, 0.2, 0.7] 4 : [0.2, 0.4, 0.3, 0.5]}
625941b7187af65679ca4f4c
def execute(self, GFDI, WindGust,Wind, RH, Curing, T, GridTimeRange,editArea, varDict): <NEW_LINE> <INDENT> myTimeRange=GridTimeRange <NEW_LINE> tc = 5.0/9.0*(T-32.0) <NEW_LINE> windSustained = self.getGrids("Fcst","Wind","SFC", myTimeRange, noDataError=0) <NEW_LINE> windGust = self.getGrids("Fcst","WindGust","SFC", myTimeRange, noDataError=0) <NEW_LINE> wspd_kph = (windSustained[0]+windGust)* 1.85325*0.5 <NEW_LINE> Curing = self.getGrids("Fcst", "Curing", "SFC", myTimeRange, noDataError=0) <NEW_LINE> rh = self.getGrids("Fcst", "RH", "SFC", myTimeRange, noDataError=0) <NEW_LINE> GFDI = pow(10,(0.009254 -0.004096*pow((100.0-Curing),1.536) +0.01201*tc +0.2789*pow(wspd_kph,0.5) -0.09577*pow(rh,0.5) )) <NEW_LINE> GFDI = around(GFDI,0) <NEW_LINE> GFDI = clip(GFDI,0.1,150.0) <NEW_LINE> return GFDI
Calculates the GFDI based on T, RH, Wind and Curing grids
625941b750485f2cf553cbc8
def create_augmentation_func(**kwargs): <NEW_LINE> <INDENT> def augmentation_func(X): <NEW_LINE> <INDENT> return random_transformations(X=X, **kwargs) <NEW_LINE> <DEDENT> return augmentation_func
Args: shadow: (tuple of two floats) (min, max) shadow intensity shadow_file: (str) Path fo image file containing shadow pattern shadow_crop_range: (tuple of two floats) min and max proportion of shadow image to take crop from. shadow_crop_range: ()(default=(0.02, 0.25)) rotate: (int)(default=180) Max angle to rotate in each direction crop: (float)(default=0.5) lr_flip: (bool)(default=True) tb_flip: (bool)(default=True) brightness: ()(default=) (std, min, max) contrast: ()(default=) (std, min, max) blur: ()(default=3) noise: ()(default=10) resampling:PIL.Image.BICUBIC
625941b7377c676e91271fda
def __init__(self, api_key, client_key, platform, *access_token, **kwargs): <NEW_LINE> <INDENT> self.headers = {'apikey':api_key,'clientkey':client_key,'platform':platform} <NEW_LINE> if access_token: <NEW_LINE> <INDENT> self.headers['token'] = access_token[0] <NEW_LINE> <DEDENT> if kwargs.get('serverRoot'): <NEW_LINE> <INDENT> self.serverRoot = kwargs.get('serverRoot') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.serverRoot = 'https://wordhopapi.herokuapp.com' <NEW_LINE> <DEDENT> if kwargs.get('socketServer'): <NEW_LINE> <INDENT> self.socketServer = kwargs.get('socketServer') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.socketServer = 'https://wordhop-socket-server.herokuapp.com' <NEW_LINE> <DEDENT> if kwargs.get('path'): <NEW_LINE> <INDENT> self.path = kwargs.get('path') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.path = '/api/v1/' <NEW_LINE> <DEDENT> self.apiUrl = self.serverRoot + self.path <NEW_LINE> self.platform = platform <NEW_LINE> self.clientkey = client_key <NEW_LINE> self.start()
@required: api_key client_key platform @optional: access_token kwargs
625941b7009cb60464c631ec
def load_data(img_dir, valid_split_size): <NEW_LINE> <INDENT> data_df = pd.read_csv(os.path.join(img_dir, 'driving_log.csv')) <NEW_LINE> X = data_df[['center', 'left', 'right']].values <NEW_LINE> y = data_df['steering'].values <NEW_LINE> X, y = shuffle(X, y) <NEW_LINE> X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=valid_split_size, random_state=0) <NEW_LINE> return X_train, X_valid, y_train, y_valid
Load training data and split it into training and validation set
625941b726068e7796caeb07
def random_element(self, degree=2, *args, **kwds): <NEW_LINE> <INDENT> if isinstance(degree, (list, tuple)): <NEW_LINE> <INDENT> if len(degree) != 2: <NEW_LINE> <INDENT> raise ValueError("degree argument must be an integer or a tuple of 2 integers (min_degree, max_degree)") <NEW_LINE> <DEDENT> if degree[0] > degree[1]: <NEW_LINE> <INDENT> raise ValueError("minimum degree must be less or equal than maximum degree") <NEW_LINE> <DEDENT> degree = randint(*degree) <NEW_LINE> <DEDENT> R = self.base_ring() <NEW_LINE> return self([R.random_element(*args, **kwds) for _ in xrange(degree+1)])
Return a random polynomial. INPUT: - ``degree`` - Integer with degree (default: 2) or a tuple of integers with minimum and maximum degrees - ``*args, **kwds`` - Passed on to the ``random_element`` method for the base ring OUTPUT: - Polynomial such that the coefficients of `x^i`, for `i` up to ``degree``, are random elements from the base ring, randomized subject to the arguments ``*args`` and ``**kwds`` EXAMPLES:: sage: R.<x> = ZZ[] sage: R.random_element(10, 5,10) 9*x^10 + 8*x^9 + 6*x^8 + 8*x^7 + 8*x^6 + 9*x^5 + 8*x^4 + 8*x^3 + 6*x^2 + 8*x + 8 sage: R.random_element(6) x^6 - 3*x^5 - x^4 + x^3 - x^2 + x + 1 sage: R.random_element(6) -2*x^5 + 2*x^4 - 3*x^3 + 1 sage: R.random_element(6) x^4 - x^3 + x - 2 If a tuple of two integers is given for the degree argument, a random integer will be chosen between the first and second element of the tuple as the degree:: sage: R.random_element(degree=(0,8)) 2*x^7 - x^5 + 4*x^4 - 5*x^3 + x^2 + 14*x - 1 sage: R.random_element(degree=(0,8)) -2*x^3 + x^2 + x + 4 TESTS:: sage: R.random_element(degree=[5]) Traceback (most recent call last): ... ValueError: degree argument must be an integer or a tuple of 2 integers (min_degree, max_degree) sage: R.random_element(degree=(5,4)) Traceback (most recent call last): ... ValueError: minimum degree must be less or equal than maximum degree
625941b78c3a8732951581ed
def _measurement_start_cb(self, sender): <NEW_LINE> <INDENT> pass
Things to do at starting of measurement
625941b7ad47b63b2c509db9
def poke_watchdog(self): <NEW_LINE> <INDENT> logging.debug("Poking watchdog") <NEW_LINE> self.fd.write("\n") <NEW_LINE> self.fd.flush()
Write something to the watchdog file.
625941b74c3428357757c15a
def nLLeval(ldelta, Uy, S, REML=True): <NEW_LINE> <INDENT> n_s = Uy.shape[0] <NEW_LINE> delta = scipy.exp(ldelta) <NEW_LINE> Sd = S + delta <NEW_LINE> ldet = scipy.sum(scipy.log(Sd)) <NEW_LINE> Sdi = 1.0 / Sd <NEW_LINE> Uy = Uy.flatten() <NEW_LINE> ss = 1. / n_s * (Uy * Uy * Sdi).sum() <NEW_LINE> nLL = 0.5 * (n_s * scipy.log(2.0 * scipy.pi) + ldet + n_s + n_s * scipy.log(ss)) <NEW_LINE> if REML: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return nLL
evaluate the negative log likelihood of a random effects model: nLL = 1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y, where K = USU^T. Uy: transformed outcome: n_s x 1 S: eigenvectors of K: n_s ldelta: log-transformed ratio sigma_gg/sigma_ee
625941b7d99f1b3c44c673c7
def set_application_config(self, release): <NEW_LINE> <INDENT> version = 'v{}'.format(release.version) <NEW_LINE> try: <NEW_LINE> <INDENT> labels = { 'version': version, 'type': 'env' } <NEW_LINE> secrets_env = {} <NEW_LINE> for key, value in self._build_env_vars(release).items(): <NEW_LINE> <INDENT> secrets_env[key.lower().replace('_', '-')] = str(value) <NEW_LINE> <DEDENT> secrets_env = OrderedDict(sorted(secrets_env.items(), key=lambda t: t[0])) <NEW_LINE> secret_name = "{}-{}-env".format(self.id, version) <NEW_LINE> self._scheduler.secret.get(self.id, secret_name) <NEW_LINE> <DEDENT> except KubeHTTPException: <NEW_LINE> <INDENT> self._scheduler.secret.create(self.id, secret_name, secrets_env, labels=labels) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._scheduler.secret.update(self.id, secret_name, secrets_env, labels=labels)
Creates the application config as a secret in Kubernetes and updates it if it already exists
625941b730c21e258bdfa2cc
def setHeliocentricTextFlag(self, flag): <NEW_LINE> <INDENT> self.showHeliocentricTextFlag = flag
Sets the flag that indicates that the planet geocentric longitude movement should be measured, where retrograde movements count as negative. Arguments: flag - bool value for the enabled flag.
625941b77b25080760e3928a
def setContent(self, list, index): <NEW_LINE> <INDENT> self._list = list <NEW_LINE> self._index = index <NEW_LINE> self._displayItem(self._list[self._index])
Set the content and the displayed index Arguments list -- a list containing the content index -- the currently active index
625941b799fddb7c1c9de1c3
def __ne__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, NewSOROrder): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
625941b74428ac0f6e5ba621
def led_off(self, id): <NEW_LINE> <INDENT> self.blinker_off(id)
| Turns *OFF* one of the 2 red blinkers that `GoPiGo3`_ has. | The same as :py:meth:`~easygopigo3.EasyGoPiGo3.blinker_off`. :param int|str id: **0** / **1** for the right / left led or string literals can be used : ``"right"`` and ``"left"``.
625941b7460517430c393fbf
def getRandom(self): <NEW_LINE> <INDENT> idx = random.randint(0, self.len) <NEW_LINE> return self.l[idx]
Get a random element from the set. :rtype: int
625941b7b57a9660fec336af
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, FirewallStatus): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.__dict__ == other.__dict__
Returns true if both objects are equal
625941b7004d5f362079a167
def __init__(self): <NEW_LINE> <INDENT> self.swagger_types = { 'phy_identifier': 'int', 'is_valid': 'bool', 'current_speed': 'str', 'maximum_speed': 'str', 'error_counts': 'SasPhyErrorCounts' } <NEW_LINE> self.attribute_map = { 'phy_identifier': 'phyIdentifier', 'is_valid': 'isValid', 'current_speed': 'currentSpeed', 'maximum_speed': 'maximumSpeed', 'error_counts': 'errorCounts' } <NEW_LINE> self._phy_identifier = None <NEW_LINE> self._is_valid = None <NEW_LINE> self._current_speed = None <NEW_LINE> self._maximum_speed = None <NEW_LINE> self._error_counts = None
SasPhyData - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
625941b72c8b7c6e89b355f3
def inorder_print_tree(self): <NEW_LINE> <INDENT> if (self.left != None): <NEW_LINE> <INDENT> self.left.inorder_print_tree() <NEW_LINE> <DEDENT> self.print_node() <NEW_LINE> if (self.right != None): <NEW_LINE> <INDENT> self.right.inorder_print_tree()
Print tree content inorder
625941b7091ae35668666d95
def set(self, key: str, value: Any) -> None: <NEW_LINE> <INDENT> self._data[key] = value
Sets key and value in the data set. Args: key: The key of the data set value. value: The data set value.
625941b721bff66bcd684785
def prepare_network_operation(vca_client, operation): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> gateway = get_gateway( vca_client, ctx.target.node.properties['nat']['edge_gateway']) <NEW_LINE> public_ip = _obtain_public_ip(vca_client, ctx, gateway, operation) <NEW_LINE> private_ip = _create_ip_range(vca_client, gateway) <NEW_LINE> for rule in ctx.target.node.properties['rules']: <NEW_LINE> <INDENT> rule_type = rule['type'] <NEW_LINE> nat_network_operation( vca_client, gateway, operation, rule_type, public_ip, private_ip, "any", "any", "any") <NEW_LINE> <DEDENT> <DEDENT> except KeyError as e: <NEW_LINE> <INDENT> raise cfy_exc.NonRecoverableError( "Parameter not found: {0}".format(e) ) <NEW_LINE> <DEDENT> return _save_configuration(gateway, vca_client, operation, public_ip)
create nat rules by rules from network node
625941b74d74a7450ccd3ff2