query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Simulate printing each design, until there are none left. Move each design to completed_models after printing.
def print_models(unprinted_designs, completed_models): while unprinted_designs: current_design = unprinted_designs.pop() # Simulate creating a 3d print from the design. print("Printing model: " + current_design) completed_models.append(current_design)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_models (unprinted_designs,completed_models):\n \n while unprinted_designs:\n current_design = unprinted_designs.pop()\n #Simulate creating a 3D print from the desig.\n print (\"printing model: \" + current_design)\n completed_models.append (current_design)", "def print_models(unprinted_designs, completed_models):\r\n while unprinted_designs:\r\n current_designs = unprinted_designs.pop()\r\n\r\n # simulate creating a 3d print from the design\r\n print(\"Printing model: \" + current_designs)\r\n completed_models.append(current_designs)", "def print_models(unprinted_design, completed_design):\n \n while unprinted_design:\n current_design = unprinted_design.pop()\n print(f\"Printing model: {current_design}\")\n completed_design.append(current_design)", "def print_models(unprinted_designs, completed_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n \n # Simulate creating a 3D print from the design.\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)", "def print_models(unprinted_designs, completed_models):\n\twhile unprinted_designs:\n\t\tcurrent_deign = unprinted_designs.pop()\n\t\tprint(\"Printing Model: \" + current_deign)\n\t\tcompleted_models.append(current_deign)", "def print_modles (unprinted_designs,completed_models):\n\twhile unprinted_designs:\n\t\tcurrent_design = unprinted_designs.pop()\n\t\tprint(\"Printing the design {current_design}\")\n\t\tcompleted_models.append(current_design)", "def show_completed_design(completed_design):\n print(\"\\nThe following models have been printed:\")\n for completed_designs in completed_design:\n print(completed_designs)", "def show_completed_models(completed_models):\r\n print(\"\\nThe following models have been printed:\")\r\n for completed_model in completed_models:\r\n print(completed_model)", "def show_completed_models(completed_models):\n\tprint(\"\\n The following models has been printed \")\n\tfor completed_model in completed_models:\n\t\tprint(completed_model)", "def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)", "def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)", "def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)", "def show_completed_models(completed_models):\n\tprint(\"\\nThe following models have been printed:\")\n\tfor completed_model in completed_models:\n\t\tprint(completed_model)", "def show_completed_models (completed_models):\n print (\"\\nThe followin models have been printed: \")\n for completed_model in completed_models:\n print (completed_model)", "def main_loop(num_models, output, interaction_dict, verbose=False, max_chains=100, dirty=False,\n stech_dict=False):\n out_models = []\n for i in range(1, num_models + 1):\n print(\"Macrocomplex \" + str(i) + \" ...\")\n macrocomplex = get_starting_model(interaction_dict, verbose).copy() # Selects a starting model\n model_stech = generate_model_profile(macrocomplex) # Generates the stechometry of the first two chains\n macrocomplex.id = \"Model_\" + str(i)\n run = True # WHile this variable is true, the program will keep trying to add chains to the macrocomplex\n num_of_chains = 2 # The model starts with 2 chains already\n num_empty_chains = 0 # NUmber of chains that have all their interactions depleted\n while run:\n for chain in macrocomplex: # Iterates the macrocomplex chains\n if num_of_chains < max_chains: # If the number of chains still hasn't reached the maximum allowed\n if chain.interactions: # If this chain still has pending interactions\n random.shuffle(chain.interactions) # Shuffle the interactions list (to avoid\n # repetitive behaviour)\n for inter_tple in chain.interactions:\n if stech_dict: # If there is stechometry input (either as stirng or template pdb)\n target_chain_id = interaction_dict[chain.id][inter_tple][1].id # chain to be added\n model_stech.setdefault(target_chain_id, 0)\n model_number_chain = model_stech[target_chain_id] # Get the number of repetitions\n stech_dict.setdefault(target_chain_id, 0)\n if stech_dict[target_chain_id] <= model_number_chain: # If the number of this target\n # chain would surpass the stechemestry given, don't add the chain and\n if verbose:\n print(\"(S) Chain NOT added: interaction \" + chain.id + \": \" +\n str(inter_tple[:1]) + \" ... \" + str(inter_tple[-1]) + \" to \" + target_chain_id)\n continue # jump to the next interaction tuple\n fix, to_move = interaction_dict[chain.id][inter_tple] # Get the interaction chain instances\n sup = Bio.PDB.Superimposer() # Generates a superimposer instance\n chain_atoms, fix_atoms = chain.get_common_atoms(fix) # Get common atoms between the\n # macrocomplex chain and the one in the interaction dictionary\n sup.set_atoms(chain_atoms, fix_atoms) # Generate the superposition\n move = to_move.copy() # Make a copy of the chain to move\n sup.apply(move) # Apply superposition matrix\n move_atoms = sorted(move.get_atoms())\n # Now it checks if the target chain has clashes with the model\n if not has_clashes(move_atoms, macrocomplex): # If it hasn't\n if verbose:\n print(\"Chain \" + str(num_of_chains) + \" added: interaction \" + chain.id + \": \" +\n str(inter_tple[0]) + \" ... \" + str(inter_tple[-1]) + \" to \" + move.id)\n move.parent = None # Sets the parent to none to evade biopython's strict id policy\n macrocomplex.add(move) # Adds the target chain to the model\n model_stech.setdefault(move.id, 0) # Updates stech dict\n model_stech[move.id] += 1\n num_of_chains += 1\n if dirty: # Generates a cif file for each step in the building of the model\n macrocomplex.save_to_mmCIF(output + str(i) + \"_tmp_\" + str(num_of_chains))\n elif verbose: # If it has don't add the target chain\n print(\"Chain NOT added: interaction \" + chain.id + \": \" +\n str(inter_tple[:1]) + \" ... \" + str(inter_tple[-1]) + \" to \" + move.id)\n chain.interactions = False # Set the interaction attribute to 0, this chain now will be ignored\n else:\n if verbose:\n print(\"Chain \" + chain.id + \" empty\")\n num_empty_chains += 1\n else:\n run = False # When the maximum chain treshold is reached stop running\n break\n if num_empty_chains >= len(macrocomplex): # If all chains are empty of interactions stop running\n run = False\n if verbose:\n stechometry_string = \"\" # Print the model's stechometry\n for key in sorted(model_stech.keys()):\n stechometry_string += key + \":\" + str(model_stech[key]) + \",\"\n stechometry_string = stechometry_string[:-1]\n print(\"Macrocomplex's\"+str(i)+\" Stoichiometry is: \"+stechometry_string)\n print(\"Macrocomplex \" + str(i) + \" finished\")\n out_models.append(macrocomplex) # Add model to the models list\n return out_models", "def print_fl_models(self,fl):\n for t_id in self.fl2t_ids[fl]:\n print t_id\n self.draw_grid(self.t_id2model[t_id])\n print \"\"", "def print_simulation_sequence(self):\n print('-----------------------------------------------')\n for msg_group in self.msg_group_list:\n msg_group.print()\n print('-----------------------------------------------')", "def show_pipline_infor(self):\r\n self.normalOutputWritten('--------Pipeline general info--------\\n')\r\n for eachround in range(int(len(self.RoundQueueDict)/2-1)):\r\n\r\n #--------------------------------------------------------------\r\n # show waveform settings\r\n waveformPackage = self.RoundQueueDict['RoundPackage_'+str(eachround+1)][0]\r\n camOperationPackage = self.RoundQueueDict['RoundPackage_'+str(eachround+1)][1]\r\n waveform_sequence = 1\r\n \r\n for eachwaveform in waveformPackage:\r\n\r\n try:\r\n if len(waveformPackage[eachwaveform][3]) != 0:\r\n self.normalOutputWritten('Round {}, sequence {}, recording channels:{}.\\n'.format(eachround+1, waveform_sequence, waveformPackage[eachwaveform][3]))\r\n print('Round {}, recording channels:{}.'.format(eachround+1, waveformPackage[eachwaveform][3]))#[1]['Sepcification']\r\n# else:\r\n# self.normalOutputWritten('Round {} No recording channel.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No recording channel.\\n')\r\n print('No recording channel.')\r\n try:\r\n self.normalOutputWritten('Round {}, Analog signals:{}.\\n'.format(eachround+1, waveformPackage[eachwaveform][1]['Sepcification']))\r\n print('Round {}, Analog signals:{}.'.format(eachround+1, waveformPackage[eachwaveform][1]['Sepcification']))#\r\n except:\r\n self.normalOutputWritten('No Analog signals.\\n')\r\n print('No Analog signals.')\r\n try:\r\n if len(waveformPackage[eachwaveform][2]['Sepcification']) != 0:\r\n self.normalOutputWritten('Round {}, Digital signals:{}.\\n'.format(eachround+1, waveformPackage[eachwaveform][2]['Sepcification']))\r\n self.normalOutputWritten('Lasting time:{} s.\\n'.format(len(waveformPackage[eachwaveform][2]['Waveform'][0])/waveformPackage[eachwaveform][0]))\r\n \r\n print('Lasting time:{} s.\\n'.format(len(waveformPackage[eachwaveform][2]['Waveform'][0])/waveformPackage[eachwaveform][0]))\r\n print('Round {}, Digital signals:{}.'.format(eachround+1, waveformPackage[eachwaveform][2]['Sepcification']))#\r\n# else:\r\n# self.normalOutputWritten('Round {} No Digital signals.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No Digital signals.\\n')\r\n print('No Digital signals.')\r\n waveform_sequence += 1\r\n self.normalOutputWritten('\\n')\r\n \r\n for eachcamoperation in camOperationPackage:\r\n #--------------------------------------------------------------\r\n # Show camera operations\r\n \r\n try:\r\n if len(camOperationPackage[eachcamoperation]) != 0:\r\n self.normalOutputWritten('Round {}, cam Buffer_number:{}.\\n'.format(eachround+1, camOperationPackage[eachcamoperation]['Buffer_number']))\r\n print('Round {}, cam Buffer_number:{}.\\n'.format(eachround+1, camOperationPackage[eachcamoperation]['Buffer_number']))#\r\n# else:\r\n# self.normalOutputWritten('Round {} No Digital signals.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No camera operations.\\n')\r\n print('No camera operations.') \r\n \r\n self.normalOutputWritten('-----------end of round-----------\\n')\r\n self.normalOutputWritten('----------------------------------------\\n')", "def write(self, output_stream=sys.stdout):\n for model in self.models:\n if len(model.chains) == 0:\n continue\n if len(self.models) > 1:\n print(\"MODEL %4d\" % (model.number), file=output_stream)\n model.write(output_stream)\n if len(self.models) > 1:\n print(\"ENDMDL\", file=output_stream)\n print(\"END\", file=output_stream)", "def do_cycle(self):\n c.check_running()\n online_models = self.get_online_models()\n if len(online_models) > 0:\n self.process_models(online_models)\n self.print_recording()", "async def print_processor(self) -> None:\n try:\n while True:\n while self.print_queue.empty() is not True:\n stub = await self.print_queue.get()\n if isinstance(stub, str):\n print(stub)\n elif isinstance(stub, tuple):\n if stub[0] == \"error\":\n print(f\"{r}{stub[1]}{reset}\")\n elif stub[0] == \"warning\":\n print(f\"{y}{stub[1]}{reset}\")\n elif stub[0] == \"success\":\n print(f\"{g}{stub[1]}{reset}\")\n elif stub[0] == \"bold\":\n print(f\"{bold}{stub[1]}{reset}\")\n else:\n print(f\"{stub[1]}\")\n self.print_queue.task_done()\n await asyncio.sleep(0.002)\n except asyncio.CancelledError:\n print('Closing the RedCisco application... Cleaning up running tasks...\\n')", "def run(self):\n counter = 0\n self.clear_screen()\n while self.env.remaining_boxes and counter < self.step_limit:\n if ProgramParameters.USE_GUI:\n self.clear_screen()\n print(self.env)\n counter += 1\n print('\\r', counter, len(self.env.delivered_boxes), self.find_overall_efficiency(), end='')\n if ProgramParameters.USE_GUI:\n self.wait()\n for agent in self.env.free_agents:\n agent.explore()\n for box in self.env.remaining_boxes:\n box.simulate()\n if ProgramParameters.EXCHANGE_GRAPHS:\n self.env.exchange_graphs()\n print('\\r', ' ' * 80, '\\rTime taken:', counter)\n print('Efficiency:', self.find_overall_efficiency())\n if not self.env.remaining_boxes:\n print('They did it!')\n # train on all examples of current experience\n training_losses = []\n print('Instantaneous learing')\n for network in self.networks.values():\n training_loss = network.train_from_database()\n training_losses.append(training_loss)\n # write it to file\n with open(HOME + 'data/losses', 'a+') as f:\n print(counter, file=f, end=' ')\n print(*training_losses, file=f, sep='\\t')\n if ProgramParameters.USE_EXPERIENCE_REPLAY:\n # read instances for experience replay from file\n replay_losses = []\n print('Experience replay')\n # add current experiences to pickled file of all experiences\n for network in self.networks.values():\n network.write_current_experience_to_file()\n try:\n replay_database = network.read_random_experience_file()\n replay_loss = network.experience_replay(replay_database)\n replay_losses.append(replay_loss)\n except IndexError:\n pass\n # write it to file\n with open(HOME + 'data/replay_losses', 'a+') as f:\n print(counter, file=f, end=' ')\n print(*replay_losses, file=f, sep='\\t')", "def display_all(self,current_object = None,already_done = [],depth = 0):\n #FIXME : infinite loops are possible\n if current_object in already_done:\n return\n else:\n if current_object== None : current_object = self.model\n already_done.append(current_object)\n print '-'*depth + '>' + str(current_object)\n if hasattr(current_object,'__dict__'):\n for truc in current_object.__dict__.items():\n self.display_all(current_object = truc,already_done = already_done,depth = depth + 1)\n if isinstance(current_object,list):\n for truc in current_object:\n self.display_all(current_object = truc,already_done = already_done,depth = depth + 1)\n if isinstance(current_object,tuple):\n for truc in current_object:\n self.display_all(current_object = truc,already_done = already_done,depth = depth + 1)", "def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)", "def printWaiting(self):\n\t\tfor wait in self.w:\n\t\t\tw_print=\"\"\n\t\t\tfor c in wait:\n\t\t\t\tif c:\n\t\t\t\t\tw_print += str(c[1])\n\t\t\t\telse:\n\t\t\t\t\tw_print += 'NO'\n\t\t\t\tw_print += \" \"\n\t\t\tprint w_print", "def _prepare_printing(self):\n # generate PDF for the recordset\n self._generate_attachment()\n\n providers = set(self.mapped('provider_id.id'))\n for provider_id in providers: # process by provider id\n records = self.filtered(lambda r: r.provider_id.id == provider_id)\n # call provider implementation\n provider_name = records[0].provider_id.provider\n if hasattr(records, '_%s_prepare_printing' % provider_name):\n getattr(records, '_%s_prepare_printing' % provider_name)()", "def printModels(cls, options):\n print \"Generating experiment requests...\"\n\n searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)", "def print_all(jobs):\n\n if len(jobs) == 0:\n print('print_all() recieved empty input')\n return\n\n for job in jobs:\n if job.is_relevant:\n print(job)\n else:\n continue", "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()", "def report_printing(cls):\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n print(donor_db.create_report())\n print('Returning to main menu...\\n')\n return", "def _validate_printing(self):\n # group the PO by res_model\n for model in set(self.mapped('res_model')):\n if hasattr(self.env[model], 'print_validate_sending'):\n objects = self.env[model].browse(self.filtered(lambda r: r.res_model == model).mapped('res_id'))\n objects.print_validate_sending()", "def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()", "def print(self):\n\n if self._delayed_mode:\n self._nevonecut = list(dask.compute(*self._nevonecut))\n self._nevcutflow = list(dask.compute(*self._nevcutflow))\n nevonecut = self._nevonecut\n nevcutflow = self._nevcutflow\n print(\"Cutflow stats:\")\n for i, name in enumerate(self._names):\n print(\n f\"Cut {name:<20}: pass = {nevonecut[i+1]:<20}\\\n cumulative pass = {nevcutflow[i+1]:<20}\\\n all = {nevonecut[0]:<20}\\\n -- eff = {nevonecut[i+1]*100/nevonecut[0]:.1f} %\\\n -- cumulative eff = {nevcutflow[i+1]*100/nevcutflow[0]:.1f} %\"\n )", "def print_all_contents(self, *args, **kwargs):\n while self.has_to_print():\n # Try to print the first element in the queue.\n tar_to_print: str = self.print_queue[0].tar\n self.print_monitor.wait_turn(self, tar_to_print, *args, **kwargs)\n\n # Print all applicable values in the print_queue.\n while self.print_queue and (self.print_queue[0].tar == tar_to_print):\n msg: str = self.print_queue.popleft().msg\n print(msg, end=\"\", flush=True)\n\n # If True, then all of the output for extracting tar_to_print was in the queue.\n # Since we just finished printing all of it, we can move onto the next one.\n if self.is_output_done_enqueuing[tar_to_print]:\n # Let all of the other workers know that this worker is done.\n self.print_monitor.done_dequeuing_output_for_tar(self, tar_to_print)", "def run(self):\n self._display_sims(self._compute_sims())", "def full_screen(self,ploton=False):\n self._screener_init() # initialise the model with a single expensive test\n\n for i in range(self.nthreads): # at the start, give the workers a job to do each\n self._select_and_run_experiment(i)\n\n while self.model.b >= self.cy: # spend budget till cant afford any more expensive tests\n i = self._record_experiment(final=False)\n self._select_and_run_experiment(i)\n if ploton:\n self.model.plot(self.model.x,self.y,self.z)\n\n for i in range(self.nthreads): # finish up any remaining jobs and record their results\n self._record_experiment(final=True)\n\n return self.history", "def printModel(self, model):\n print(\"[L DIAG] startLoop =\", model.evaluate(self.startLoop))\n print(\"[L DIAG] endLoop =\", model.evaluate(self.endLoop))\n print(\"[L DIAG] projStartStateFaulty =\", model.evaluate(self.projStartStateFaulty))\n print(\"[L DIAG] projEndStateFaulty =\", model.evaluate(self.projEndStateFaulty))\n print(\"[L DIAG] projStartStateNormal =\", model.evaluate(self.projStartStateNormal))\n print(\"[L DIAG] projEndStateNormal =\", model.evaluate(self.projEndStateNormal))\n\n print(\"[L DIAG] stateFaultyPath: \")\n self.printOneIntArray(model, self.stateFaultyPath)\n print(\"[L DIAG] stateNormalPath: \")\n self.printOneIntArray(model, self.stateNormalPath)\n\n print()\n super().printModel(model)", "def display_collected():\n os.system('clear') # clearscreen\n print('BS4 widget generator')\n print('-' * 20)\n print('options selected:')\n for col in collected:\n print(col)\n\n print('-' * 20)\n\n return", "def print_summary(self, print_level = 0):\n\n print(\"==========================\")\n print(\"= FUNtoFEM model summary =\")\n print(\"==========================\")\n print(\"Model name:\", self.name)\n print(\"Number of bodies:\", len(self.bodies))\n print(\"Number of scenarios:\", len(self.scenarios))\n print(\" \")\n print(\"------------------\")\n print(\"| Bodies summary |\")\n print(\"------------------\")\n for body in self.bodies:\n print(\"Body:\", body.id, body.name)\n print(\" coupling group:\", body.group)\n print(\" transfer scheme:\", type(body.transfer))\n print(\" shape parameteration:\", type(body.shape))\n for vartype in body.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(body.variables[vartype]))\n if print_level >= 0:\n for var in body.variables[vartype]:\n print(' variable:', var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)\n\n print(\" \")\n print(\"--------------------\")\n print(\"| Scenario summary |\")\n print(\"--------------------\")\n for scenario in self.scenarios:\n print(\"scenario:\", scenario.id, scenario.name)\n print(\" coupling group:\", scenario.group)\n print(\" steps:\", scenario.steps)\n print(\" steady?:\", scenario.steady)\n for func in scenario.functions:\n print(' function:', func.name, ', analysis_type:', func.analysis_type)\n print(' adjoint?', func.adjoint)\n if not scenario.steady:\n print(' time range', func.start, ',', func.stop)\n print(' averaging', func.averaging)\n\n\n for vartype in scenario.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(scenario.variables[vartype]))\n if print_level >= 0:\n for var in scenario.variables[vartype]:\n print(' variable:', var.id, var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)", "def print_queue(self):\n for i in self.Obs:\n print(i)", "def run_all_models(self):\n #self.process_nitrate()\n try:\n sur_df = self.store.get('/said/{}/iv'.format(self.site['id']))\n con_df = self.store.get('/said/{}/qwdata'.format(self.site['id']))\n\n except KeyError:\n print('site {} not found'.format(site['name']))\n\n\n #determine start and end for plots\n start_date, end_date = get_time_limit(sur_df, con_df)\n\n #update start and end according to user\n user_start = self.site.get('start')\n user_end = self.site.get('end')\n\n if user_start:\n start_date = pd.to_datetime(user_start)\n\n if user_end:\n end_date = pd.to_datetime(user_end)\n\n\n #plot_ssc(ssc_model, filename='plots/{}_ssc.png'.format(site['name']),\n # start_date=start_date, end_date=end_date)\n\n #append the model results to summary\n #summary_table= summary_table.append(model_row_summary(ssc_model))\n\n for directory in ['model_data','report']:\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n\n #pp_model_list = import pdb; pdb.set_trace()[\n # ['log(PP)',['log(Turb_HACH)']],\n # ['log(PP)',['log(Turb_YSI)']]\n #]\n\n #self.run_model(pp_model_list, 'PP')\n\n no3_model_list = [\n ['Nitrate',['NitrateSurr']],\n ]\n self.run_model(no3_model_list, 'Nitrate')\n\n ssc_model_list = [\n ['log(SSC)',['log(Turb_HACH)']],\n ['log(SSC)',['log(Turb_YSI)']]\n ]\n self.run_model(ssc_model_list, 'SSC')\n\n tp_model_list = [\n ['log(TP)',['log(OrthoP)','log(Turb_HACH)']],\n ['log(TP)',['log(OrthoP)','log(Turb_YSI)']],\n ['log(TP)',['log(Turb_HACH)']],\n ['log(TP)',['log(Turb_YSI)']]\n ]\n self.run_model(tp_model_list, 'TP')\n\n #write ssc model report\n #reportfile = 'report/{}_ssc_report.txt'.format(site['name'])\n #with open(reportfile, 'w') as f:\n # f.write(ssc_model.get_model_report().as_text())\n #summary_table= summary_table.append(model_row_summary(p_model1))\n #summary_table= summary_table.append(model_row_summary(p_model2))\n #plot_model(ssc_model, filename='plots/{}_ssc_model.png'.format(site['name']))\n #plot_phos(p_model1, p_model2, filename='plots/{}_tp.png'.format(site['name']),\n # start_date=start_date, end_date=end_date)\n #plot_model(p_model1, filename='plots/{}_orthoP_model.png'.format(site['name']))\n #\n ## try to plot phosphate\n #try:\n # phos_plot(con_data, sur_data, filename='plots/{}_p.png'.format(site['name']), title=site['name'],\n # return_model=True)\n #except:\n # print('phospate plot didnt work')\n #\n self.summary_table.to_csv('report/{}_model_summary.csv'.format(self.site['name']),\n index=False)", "def sequential_print_statements():\n pass", "def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))", "def print_processor(print_que):\n print(termcolor.colored(\"!--DO NOT CLOSE--!\", \"red\"))\n print(len(print_que))\n ID_LIMIT = 40\n run = True\n jobs_ran = 0\n while run:\n Q_Jobs = 0\n if len(print_que) > 0:\n if \"10.56.54.162\" in print_que[0]:\n Q_Jobs = print_status(\"10.56.54.162\")\n else:\n Q_Jobs = print_status(\"10.56.54.156\")\n if Q_Jobs >= ID_LIMIT:\n print(\"Printed so Far: \", str(jobs_ran))\n print(\"Waiting For Jobs to Clear Up\")\n # input(\n # \"Please Confirm Printers Will Support 40 More Job IDS before pressing enter: \")\n jobs_ran = 0\n time.sleep(100)\n continue\n if len(print_que) > 0:\n if(\"banner\" not in print_que[0]):\n os.system(print_que[0])\n print((str(print_que[0]).replace(\n \"C:/Windows/System32/lpr.exe -S 10.56.54.\", \"\").replace(\n '-P PS \"C:/S/SO/', \"\").split(\"-J\")[0]))\n print_que.pop(0)\n jobs_ran += 1\n else:\n print(termcolor.colored(\"\\n!--PROCESSING CAUGHT UP--!: \", \"green\"))\n run = False\n jobs_ran += 1", "def print_people_strategies():\n\t\tfor person in sorted(Simulation.community):\n\t\t\tSimulation.community[person].print_info()\n\t\tPerson.person_progression.write(\"--------------- END OF WEEK ---------------\" + \"\\n\")", "def start(self):\n for circuit in self.circuits:\n self.modes[self.print_mode](circuit)", "def iterate(self):\n for i in range(self.generations):\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print(\n [item.to_string() for item in sorted_polulation[:8]],\n [round(item.fitness_function(item),2) for item in sorted_polulation]\n )\n\n # print([item.to_string() for item in self.data])\n\n self.step()\n print(\"result\")\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print([str(item) for item in sorted_polulation])", "def test_slow_printing(self):\n response = support.create_project(self, 'duluth')\n self.assertFalse(\n response.failed,\n Message('should have created project', response=response)\n )\n\n code = '\\n'.join([\n 'import time',\n 'for letter in \"BCFHMNOPRSTUVWX\":',\n ' print(\"{}AT\".format(letter))',\n ' time.sleep(0.5)'\n ])\n\n support.add_step(self, contents=code)\n response = commander.execute('run', '-f')\n response.thread.join(2)\n\n step = cauldron.project.get_internal_project().steps[1]\n dom = step.dumps()\n self.assertEqual(dom.count('BAT'), 1, 'first check failed')\n\n response.thread.join(1)\n dom = step.dumps()\n self.assertEqual(dom.count('BAT'), 1, 'second check failed')\n\n response.thread.join()\n dom = step.dumps()\n self.assertEqual(dom.count('BAT'), 1, 'third check failed')\n self.assertLess(dom.count('SAT'), 2, 'fourth check failed')", "def report(self):\n for c in self._call_chain:\n print c.title\n print '=' * len(c.title)\n c.report()\n print", "def drawall(self):\r\n for x in self.objectlist:\r\n if x.model:\r\n x.model.draw()", "def status_print(optim_result):\n \n # Get all the models tested so far in DataFrame format\n all_models = pd.DataFrame(bayes_cv_tuner.cv_results_) \n \n # Get current parameters and the best parameters \n best_params = pd.Series(bayes_cv_tuner.best_params_)\n print('Model #{}\\nBest mse: {}\\nBest params: {}\\n'.format(\n len(all_models),\n np.round(bayes_cv_tuner.best_score_, 4),\n bayes_cv_tuner.best_params_\n ))\n \n # Save all model results\n clf_name = bayes_cv_tuner.estimator.__class__.__name__\n all_models.to_csv(clf_name+\"_cv_results.csv\")", "def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return", "def process_models(self, models):\n for model in models:\n # already recording it, ignore\n if self.is_recording(model) is True:\n continue\n self.logger.info(\"Model \" + model + \" is chaturbating\")\n info = self.get_model_info(model)\n # if the embed info was scrapped\n if len(info) > 0:\n # check if the show is private\n if self.is_private(info) is False:\n self.capture(info)\n else:\n self.logger.warning(\"But the show is private\")", "def l_print(*args):\n for rank in range(0, comm.size):\n comm.Barrier()\n if rank == comm.rank:\n l_print_no_barrier(*args)\n comm.Barrier()", "def print_model_quality_report(pred_path: str, ground_path: str):\n predictions = np.load(pred_path).argmax(axis=1)\n groundtruth = pd.read_csv(ground_path).open_channels.values\n groups = pd.read_csv(ground_path).group.values\n\n print(\"Macro F1 score, F1 scores and confusion matrix per group:\")\n for group in range(6):\n pred = predictions[groups == group]\n true = groundtruth[groups == group]\n print(f\"Group {group} macro F1 score, F1 scores and confusion matrix:\")\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Batch 5 macro F1 score, F1 scores and confusion matrix:\")\n pred = predictions[2_000_000:2_500_000]\n true = groundtruth[2_000_000:2_500_000]\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Batch 9 macro F1 score, F1 scores and confusion matrix:\")\n pred = predictions[4_500_000:5_000_000]\n true = groundtruth[4_500_000:5_000_000]\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Overall OOF macro F1 score, F1 scores and confusion matrix:\")\n print(f1_score(groundtruth[:5_000_000], predictions[:5_000_000], average='macro'))\n print(f1_score(groundtruth[:5_000_000], predictions[:5_000_000], average=None))\n print(confusion_matrix(groundtruth[:5_000_000], predictions[:5_000_000], normalize='true').round(3))\n print()", "def display(self):\n count = 0\n self.displays[0].start() # call only once to support shift chain\n for d in self.displays:\n d.output(self.data[count])\n count += 1\n self.displays[0].latch() # call only once to support shift chain", "def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass", "def loading(delay):\r\n\r\n for i in range(3):\r\n\r\n print \".\",\r\n sys.stdout.flush()\r\n time.sleep(delay)\r\n\r\n print(\"\")", "def display(self):\n ob = self._convert_state(self._env.reset())\n done = False\n while not done:\n ac, _ = self._act(ob, stochastic=False)\n ob, rew, done, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n self._env.render()\n self._env.close()", "def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')", "def print(self):\n\n if self._delayed_mode:\n self._nev = list(dask.compute(*self._nev))\n nev = self._nev\n print(\"N-1 selection stats:\")\n for i, name in enumerate(self._names):\n print(\n f\"Ignoring {name:<20}: pass = {nev[i+1]:<20}\\\n all = {nev[0]:<20}\\\n -- eff = {nev[i+1]*100/nev[0]:.1f} %\"\n )\n\n if True:\n print(\n f\"All cuts {'':<20}: pass = {nev[-1]:<20}\\\n all = {nev[0]:<20}\\\n -- eff = {nev[-1]*100/nev[0]:.1f} %\"\n )", "def main(self):\n if self.mode==0: #drawing\n self.draw()\n self.graph_drawing=self.cleanGraph(self.graph_drawing)\n #if len(self.graph_drawing)>1:\n # self.function_interpolation=self.polynomialInterpolation2D(self.graph_drawing,1)\n # self.graph_interpolation=self.sample(self.function_interpolation,len(self.graph_drawing))\n elif self.mode==1: #construction\n self.step+=1\n self.time=self.step/self.max_step\n if self.step>self.max_step:\n self.mode=2\n #self.graph_construction=self.discreteComplexComposeGraph(self.coefficients,self.time) #complex now\n self.graph_construction=self.numpyComposeConstructionGraph(self.coefficients,t=self.time)\n self.vectors=self.getVectors([(0,0)]+self.graph_construction)\n self.graph_display.append(self.graph_construction[-1])\n\n elif self.mode==2:\n self.draw()", "def run_main():\n\n #Check for sold bikes\n checkSold(auto=True) #Change Auto to True to prevent user input\n\n #Find all available URLs split by Make & Model - Find Make\n print(\"Getting Makes...\")\n makes = getMakes()\n \n #Find all Models for each Make\n print(\"Getting Models...\")\n models = []\n for make in tqdm(makes, desc=\"Makes\"):\n models += getModels(make)\n\n\n #Find all URLs for each Model - Scrape bikes on each model\n errlog = \"\"\n print(\"Scraping Bikes...\")\n for model in tqdm(models, desc=\"Models\"):\n #Get urls for each model\n urlsTemp = getURLs(model)\n\n #Remove duplicates\n urlsTemp = removeDups(urlsTemp)\n\n #Remove listings already found\n urlsTemp = newURLs(\"motorcycles\", urlsTemp)\n\n #Get model description\n try:\n modelDesc = model.split(\"/\")[2].replace(\"model-\", \"\")\n except:\n modelDesc = \"Listings\"\n\n #Find motorbike details on all urls for this model\n #Split by model to prevent large datasets changing during code runtime\n for url in tqdm(urlsTemp, desc=modelDesc, leave=False):\n temp = Motorcycle(url)\n if not temp.na:\n temp.dbInsert()\n else:\n errlog += url + \"|\"\n \n #Finish\n if not errlog:\n print(\"Errors Found: \", errlog)\n if not printlog:\n print(printlog)\n \n print(\"Done!\")", "def main_loop(self, max_steps, max_gen, display_on, collect_data,\n garden_mode, i):\n if display_on:\n self.make_background()\n self.load_sprites()\n if display_on:\n pygame.display.flip()\n font = pygame.font.Font(None, 18)\n if collect_data or self.export_all:\n dead_tako = deque()\n while 1:\n #see if ending conditions have been met\n if max_steps > 0:\n if self.stepid > max_steps:\n if collect_data or self.export_all:\n for env in self.env_list:\n for tak in env.tako_list:\n dead_tako.append([tak, self.stepid,\n env.env_id])\n if self.export_all:\n export(dead_tako, self.filename)\n if collect_data:\n write_csv(self.filename, i, dead_tako)\n return\n end = False\n if max_gen > 0:\n for env in self.env_list:\n if env.highest_gen > max_gen:\n end = True\n for en in self.env_list:\n for tak in en.tako_list:\n dead_tako.append([tak, self.stepid,\n en.env_id])\n #if we have not met the predefined end conditions, begin main loop\n if display_on:\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n elif event.type == KEYDOWN:\n if self.two_envs:\n if event.key == K_s:\n if self.current_env == 0:\n self.current_env = 1\n else:\n self.current_env = 0\n self.load_sprites()\n if self.scroll:\n if event.key == K_LEFT:\n if self.cam_pos[0] > 0:\n self.cam_pos[0] -= 1\n for spr in self.all_sprites:\n spr.move_rect(1, 0)\n elif event.key == K_RIGHT:\n if self.cam_pos[0] < (self.env_list[0].size -\n self.spr_width):\n self.cam_pos[0] += 1\n for spr in self.all_sprites:\n spr.move_rect(-1, 0)\n elif event.key == K_UP:\n if self.cam_pos[1] > 0:\n self.cam_pos[1] -= 1\n for spr in self.all_sprites:\n spr.move_rect(0, 1)\n elif event.key == K_DOWN:\n if self.cam_pos[1] < (self.env_list[0].size -\n self.spr_height):\n self.cam_pos[1] += 1\n for spr in self.all_sprites:\n spr.move_rect(0, -1)\n #see if all are dead\n for env in self.env_list:\n if len(env.tako_list) == 0:\n end = True\n print(\"Tako are dead :(\")\n if end == True:\n for env in self.env_list:\n if self.export_all:\n export(dead_tako, self.filename)\n if collect_data:\n if len(dead_tako) > 0:\n write_csv(self.filename, i, dead_tako)\n return\n #let experiment go a step\n for task in self.task_list:\n task.interact_and_learn()\n if garden_mode == \"Changing\":\n if self.stepid > 0 and self.stepid % 100000 == 0:\n for env in self.env_list:\n env.switch_grasses()\n elif garden_mode == \"Nutrition\":\n if self.stepid > 0 and self.stepid % 40000 == 0:\n for env in self.env_list:\n env.switch_nutrition()\n #see if any are dead\n for env in self.env_list:\n for tak in env.tako_list:\n if tak.dead == True:\n env.garden_map[tak.y][tak.x] = Dirt(display_on,\n tak.x, tak.y)\n env.tako_list.remove(tak)\n if collect_data or self.export_all:\n dead_tako.append([tak, self.stepid, env.env_id])\n tak.kill()\n #then check for migration\n if self.two_envs:\n if self.migration_rate > 0 and self.stepid > 0:\n if self.stepid % 50000 == 0:\n self.migrate(display_on)\n #check if we are doing data collection output this tick\n if self.stepid % 3000 == 0:\n if self.export_all:\n export(dead_tako, self.filename)\n if collect_data:\n write_csv(self.filename, i, dead_tako)\n #now, update sprites, then draw them if using graphics\n for env in self.env_list:\n if env.new_sprites != []:\n self.get_new(env)\n self.widget_sprites.update()\n for env in self.env_list:\n for tak in env.tako_list:\n tak.update()\n if display_on:\n self.graphics_loop(font)\n self.stepid += 1", "def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)", "def wait_empty(cls):\n for recorder in cls.recorders:\n recorder._wait_empty()", "def run(self, simulation):\n sys.stdout.write(\"Post processing ... \")\n sys.stdout.flush()\n\n particle_list = simulation.particle_list\n layer_system = simulation.layer_system\n initial_field = simulation.initial_field\n for item in self.tasks:\n if item['task'] == 'evaluate far field':\n outputdir = simulation.output_dir + '/far_field'\n if item.get('angle units', 'polar') == 'degree':\n ang_fac = np.pi / 180\n else:\n ang_fac = 1\n\n if type(initial_field).__name__ == 'PlaneWave':\n self.scattering_cross_section, self.extinction_cross_section = evaluate_cross_section(\n initial_field=initial_field, particle_list=particle_list, layer_system=layer_system,\n outputdir=outputdir, show_plots=item.get('show plots', False),\n save_plots=item.get('save plots', False),\n save_data=item.get('save data', False), length_unit=simulation.length_unit)\n elif type(initial_field).__name__ == 'GaussianBeam':\n self.total_far_field, self.initial_far_field, self.scattered_far_field = farf.total_far_field(\n initial_field=initial_field, particle_list=particle_list, layer_system=layer_system)\n\n go.show_far_field(far_field=self.total_far_field, save_plots=item.get('save plots', False),\n show_plots=item.get('show plots', False), save_data=item.get('save data', False),\n tag='total_far_field', outputdir=outputdir, flip_downward=True, split=True)\n go.show_far_field(far_field=self.initial_far_field, save_plots=item.get('save plots', False),\n show_plots=item.get('show plots', False), save_data=item.get('save data', False),\n tag='initial_far_field', outputdir=outputdir, flip_downward=True, split=True)\n go.show_far_field(far_field=self.scattered_far_field, save_plots=item.get('save plots', False),\n show_plots=item.get('show plots', False), save_data=item.get('save data', False),\n tag='scattered_far_field', outputdir=outputdir, flip_downward=True, split=True)\n\n in_pow = sum(initial_field.initial_intensity(layer_system).integral()).real\n if self.total_far_field.top() is not None:\n top_pow = sum(self.total_far_field.top().integral()).real\n else:\n top_pow = 0\n if self.total_far_field.bottom() is not None:\n bottom_pow = sum(self.total_far_field.bottom().integral()).real\n else:\n bottom_pow = 0\n\n print()\n print('-------------------------------------------------------------------------')\n print('Far field:')\n print('Initial power: ', in_pow)\n if initial_field.polar_angle < np.pi / 2:\n if bottom_pow:\n print('Radiation into bottom layer (total reflection): ', bottom_pow,\n ' or ', round(bottom_pow / in_pow * 100, 2), '%')\n if top_pow:\n print('Radiation into top layer (total transmission): ', top_pow,\n ' or ', round(top_pow / in_pow * 100, 2), '%')\n else:\n if bottom_pow:\n print('Radiation into bottom layer (total transmission): ', bottom_pow,\n ' or ', round(bottom_pow / in_pow * 100, 2), '%')\n if top_pow:\n print('Radiation into top layer (total reflection): ', top_pow,\n ' or ', round(top_pow / in_pow * 100, 2), '%')\n print('Absorption and incoupling into waveguide modes: ', in_pow - top_pow - bottom_pow,\n ' or ', round((in_pow - top_pow - bottom_pow) / in_pow * 100, 2), '%')\n print('-------------------------------------------------------------------------')\n elif (type(initial_field).__name__ == 'DipoleSource'\n or type(initial_field).__name__ == 'DipoleCollection'):\n self.total_far_field, self.initial_far_field, self.scattered_far_field = farf.total_far_field(\n initial_field=initial_field, particle_list=particle_list, layer_system=layer_system)\n\n go.show_far_field(far_field=self.total_far_field, save_plots=item.get('save plots', False),\n show_plots=item.get('show plots', False), save_data=item.get('save data', False),\n tag='total_far_field', outputdir=outputdir, flip_downward=True, split=True)\n go.show_far_field(far_field=self.initial_far_field, save_plots=item.get('save plots', False),\n show_plots=item.get('show plots', False), save_data=item.get('save data', False),\n tag='initial_far_field', outputdir=outputdir, flip_downward=True, split=True)\n go.show_far_field(far_field=self.scattered_far_field, save_plots=item.get('save plots', False),\n show_plots=item.get('show plots', False), save_data=item.get('save data', False),\n tag='scattered_far_field', outputdir=outputdir, flip_downward=True, split=True)\n\n if type(initial_field).__name__ == 'DipoleSource':\n diss_pow = initial_field.dissipated_power(particle_list, layer_system)\n else:\n diss_pow = sum(initial_field.dissipated_power(particle_list, layer_system))\n\n assert abs(diss_pow.imag / diss_pow) < 1e-8\n diss_pow = diss_pow.real\n\n if self.total_far_field.top() is not None:\n top_pow = sum(self.total_far_field.top().integral()).real\n else:\n top_pow = 0\n if self.total_far_field.bottom() is not None:\n bottom_pow = sum(self.total_far_field.bottom().integral()).real\n else:\n bottom_pow = 0\n\n print()\n print('-------------------------------------------------------------------------')\n print('Dissipated power: ', diss_pow)\n print()\n print('Far field:')\n if bottom_pow:\n print('Radiation into bottom layer (bottom outcoupling): ', bottom_pow,\n ' or ', round(bottom_pow / diss_pow * 100, 2), '%')\n if top_pow:\n print('Radiation into top layer (top outcoupling): ', top_pow,\n ' or ', round(top_pow / diss_pow * 100, 2), '%')\n print('Absorption and incoupling into waveguide modes: ', diss_pow - top_pow - bottom_pow,\n ' or ', round((diss_pow - top_pow - bottom_pow) / diss_pow * 100, 2), '%')\n print('-------------------------------------------------------------------------')\n\n elif item['task'] == 'evaluate near field':\n sys.stdout.write(\"\\nEvaluate near fields ... \")\n sys.stdout.flush()\n\n quantities_to_plot = item['quantities to plot']\n\n if simulation.output_dir:\n outputdir = simulation.output_dir + '/near_field'\n else:\n outputdir = '.'\n\n go.show_near_field(quantities_to_plot=quantities_to_plot, show_plots=item.get('show plots', False),\n save_plots=item.get('save plots', False), save_data=item.get('save data', False),\n save_animations=item.get('save animations', False), outputdir=outputdir,\n xmin=item.get('xmin', 0), xmax=item.get('xmax', 0), ymin=item.get('ymin', 0),\n ymax=item.get('ymax', 0), zmin=item.get('zmin', 0), zmax=item.get('zmax', 0),\n simulation=simulation, max_field=item.get('maximal field strength'),\n resolution_step=item.get('spatial resolution', 25),\n max_particle_distance=item.get('maximal particle distance', float('inf')),\n interpolate_step=item.get('interpolation spatial resolution'))\n\n sys.stdout.write(\"done. \\n\")\n sys.stdout.flush()", "def simulate_trajectories(kav):\n print \"Simulating \"+str(kav)\n wt_trajectories = []\n avp_trajectories = []\n vip_trajectories = []\n for tn in range(100):\n # get random initial condition\n # initial phases\n init_conditions_AV = [single_osc.lc(wt_T*np.random.rand()) \n for i in range(AVPcells+VIPcells)]\n init_conditions_NAV = [single_osc.lc(wt_T*np.random.rand())[:-1]\n for i in range(NAVcells)]\n y0_random = np.hstack(init_conditions_AV+init_conditions_NAV)\n\n # do the simulation\n model = GonzeModelManyCells(param, kav=kav, \n initial_values=y0_random)\n wt_trajectories.append(model.run(show_labels=False, seed=0))\n\n # avp bmalko\n avp_model = GonzeModelManyCells(param, bmalko='AVP', kav=kav, \n initial_values=y0_random)\n avp_trajectories.append(avp_model.run(show_labels=False, seed=0))\n\n # vip bmalko\n vip_model = GonzeModelManyCells(param, bmalko='VIP', kav=kav, \n initial_values=y0_random)\n vip_trajectories.append(vip_model.run(show_labels=False, seed=0))\n\n # save results\n with open(\"Data/params/wt_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(wt_trajectories, output_file)\n with open(\"Data/params/avp_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(avp_trajectories, output_file)\n with open(\"Data/params/vip_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(vip_trajectories, output_file)\n\n return {'wt': wt_trajectories,\n 'avp': avp_trajectories,\n 'vip': vip_trajectories}", "def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)", "def main(self):\n if self.mode == 0: # drawing\n pass\n elif self.mode == 1: # construction\n if self.step > self.max_step:\n self.mode = 2\n else:\n self.construction = Fourier.build(self.coefficients, self.time)\n self.display.append(self.construction[-1])\n if not self.pause:\n self.step += 1\n elif self.mode == 2: # display\n pass", "def finishBuild(self, graphviz=False):\n if graphviz is True:\n self_loops = set(self.G.selfloop_edges())\n for edge in self.G.edges():\n if edge not in self_loops and removeLabel(edge[0]) == removeLabel(edge[1]):\n # sequence edge\n l = removeLabel(edge[0]) + \"\\\\n\" + \" - \".join([\": \".join([y, \", \".join([str(x) for x in self.G.edge[edge[0]][edge[1]]['positions'][y]])]) for y in self.G.edge[edge[0]][edge[1]]['positions']]) + \"\\\\ncount: \" + str(self.G.edge[edge[0]][edge[1]][\"count\"])\n self.G.edge[edge[0]][edge[1]][\"label\"] = l\n else:\n self.G.edge[edge[0]][edge[1]]['penwidth'] = 2\n\n self.paralogs = sorted(self.paralogs, key=lambda x: x[0])\n\n self.weights = {x : 2.0 for x in self.kmers}\n\n assert len(self.kmers.intersection(self.normalizingKmers)) == 0", "def play(self):\r\n legal_solution_found = False\r\n self.simulation_stats.add_stats(self.round, self.solutions)\r\n if self.simulation_viz is SimulationViz.FRONT:\r\n self.report_progress()\r\n while not legal_solution_found:\r\n legal_solution = self.find_solution() # might exist already in the first round\r\n if legal_solution:\r\n legal_solution_found = True\r\n else:\r\n self.play_one_round()\r\n if self.simulation_viz is SimulationViz.FRONT:\r\n self.draw_final_screen()\r\n else:\r\n output_lines = self.report_stats()\r\n return output_lines", "def step6(self):\n if len(self.mrs) == 3:\n count = 3\n elif len(self.mrs) == 2:\n count = 2\n else:\n count = 1\n for mr in self.mrs[0:count]:\n self.log.info(\"Display boot drive on controller:%d\"\n % (mr.ctrl_id))\n vd_id = mr.cli.bootdrive_vd_get()\n if (int(vd_id) == -1): # -1 : No boot VD.\n self.log.info(\"No boot VD found on controller: %d\"\n % (mr.ctrl_id))\n else:\n self.log.info(\"VD ID of the boot VD: %d\"\n % int((vd_id)))", "def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return", "def main(self):\r\n\r\n #Train the GEN and DISC\r\n self.modelTrain.main()\r\n self.disp.show()", "def visualizar(self):\n print(self.queue)", "def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()", "def render_all(self):\n for name,card in self._name2database.items():\n card.render()\n print 'Finished rendering {}!'.format(name)\n return", "def process_list(self):\n for p in self._queue:\n print \"%-5d %-10s %-10s %2d %10s %10s\" % (p.id, p.name,\n p.status['type'], p.priority, \n self.print_cr_tree(p.creation_tree['parent']), \n self.print_cr_tree(p.creation_tree['child']))", "def save(self):\n if len(self.queue):\n #logging.info('Saving %d pages' % len(self.queue))\n for q in self.queue:\n if q and q != '':\n if self.output:\n print >> self.output, q\n else:\n print q\n self.queue = []", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def print_model_generation(model):\n print('g1 = {} MW'.format(model.g[1].value))\n print('g2 = {} MW'.format(model.g[2].value))", "def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()", "def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()", "def on_screen(self):\n ########################################################################\n print ' '\n print ' '\n print '====================================================='\n print ' Simulation Results '\n print '====================================================='\n print 'Simulation name: ', self.simulation_name\n print 'Start Date / Time : ', self.start_time\n print 'End Date / Time : ', self.finish_time\n print 'Total Simulation Time (minutes): ', (self.finish - self.start)/60.0\n if self.archive_simulation.lower() == 'yes':\n print 'Archive Status: Active'\n print 'Archive Name : ', self.simulation_name\n else:\n print 'Archive Status: Inactive'\n print ' '\n print 'Number of time steps in the simulation: ', self.stop\n print ' '\n print '-----------------------------------'\n print ' Initial Cohort Information'\n print '-----------------------------------'\n print 'Outputs of Initial Cohort Distribution:'\n if self.initialize['Initial_Cohort_Distribution_Figure'].lower() != 'yes':\n print ' No outputs generated.'\n else:\n if self.initialize['Meadow_WT_Y_Figure'].lower() == 'yes':\n print ' Inital Meadow, Wetland Tundra, Young age Figure [Output/Barrow/Meadow_WT_Y]'\n if self.initialize['Meadow_WT_M_Figure'].lower() == 'yes':\n print ' Initial Meadow, Wetland Tundra, Medium age Figure [Output/Barrow/Meadow_WT_M]'\n if self.initialize['Meadow_WT_O_Figure'].lower() == 'yes':\n print ' Initial Meadow, Wetland Tundra, Old age Figure [Output/Barrow/Meadow_WT_O]'\n if self.initialize['LCP_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Young age Figure [Output/Barrow/LCP_WT_Y]'\n if self.initialize['LCP_WT_M_Figure'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Medium age Figure [Output/Barrow/LCP_WT_M]'\n if self.initialize['LCP_WT_O_Figure'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Old age Figure [Output/Barrow/LCP_WT_O]'\n if self.initialize['CLC_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Young age Figure [Output/Barrow/CLC_WT_Y]'\n if self.initialize['CLC_WT_M_Figure'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Medium age Figure [Output/Barrow/CLC_WT_M]'\n if self.initialize['CLC_WT_O_Figure'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Old age Figure [Output/Barrow/CLC_WT_O]'\n if self.initialize['FCP_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Young age Figure [Output/Barrow/FCP_WT_Y]'\n if self.initialize['FCP_WT_M_Figure'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Medium age Figure [Output/Barrow/FCP_WT_M]'\n if self.initialize['FCP_WT_O_Figure'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Old age Figure [Output/Barrow/FCP_WT_O]'\n if self.initialize['HCP_WT_Y_Figure'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Young age Figure [Output/Barrow/HCP_WT_Y]'\n if self.initialize['HCP_WT_M_Figure'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Medium age Figure [Output/Barrow/HCP_WT_M]'\n if self.initialize['HCP_WT_O_Figure'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Old age Figure [Output/Barrow/HCP_WT_O]'\n if self.initialize['LargeLakes_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Young age Figure [Output/Barrow/LargeLakes_WT_Y]'\n if self.initialize['LargeLakes_WT_M_Figure'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Medium age Figure [Output/Barrow/LargeLakes_WT_M]'\n if self.initialize['LargeLakes_WT_O_Figure'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Old age Figure [Output/Barrow/LargeLakes_WT_O]'\n if self.initialize['MediumLakes_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Young age Figure [Output/Barrow/MediumLakes_WT_Y]'\n if self.initialize['MediumLakes_WT_M_Figure'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Medium age Figure [Output/Barrow/MediumLakes_WT_M]'\n if self.initialize['MediumLakes_WT_O_Figure'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Old age Figure [Output/Barrow/MediumLakes_WT_O]'\n if self.initialize['SmallLakes_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Young age Figure [Output/Barrow/SmallLakes_WT_Y]'\n if self.initialize['SmallLakes_WT_M_Figure'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Medium age Figure [Output/Barrow/SmallLakes_WT_M]'\n if self.initialize['SmallLakes_WT_O_Figure'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Old age Figure [Output/Barrow/SmallLakes_WT_O]'\n if self.initialize['Ponds_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Young age Figure [Output/Barrow/Ponds_WT_Y]'\n if self.initialize['Ponds_WT_M_Figure'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Medium age Figure [Output/Barrow/Ponds_WT_M]'\n if self.initialize['Ponds_WT_O_Figure'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Old age Figure [Output/Barrow/Ponds_WT_O]'\n if self.initialize['CoastalWaters_WT_O_Figure'].lower() == 'yes':\n print ' Initial Coastal Waters, Wetland Tundra, Old age Figure [Output/Barrow/CoastalWaters_WT_O]'\n if self.initialize['DrainedSlope_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Young age Figure [Output/Barrow/DrainedSlope_WT_Y]'\n if self.initialize['DrainedSlope_WT_M_Figure'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Medium age Figure [Output/Barrow/DrainedSlope_WT_M]'\n if self.initialize['DrainedSlope_WT_O_Figure'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Old age Figure [Output/Barrow/DrainedSlope_WT_O]'\n if self.initialize['SandDunes_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Young age Figure [Output/Barrow/SandDunes/WT_Y]'\n if self.initialize['SandDunes_WT_M_Figure'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Medium age Figure [Ouput/Barrow/SandDunes_WT_M]'\n if self.initialize['SandDunes_WT_O_Figure'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Old age Figure [Output/Barrow/SandDunes_WT_O]'\n if self.initialize['SaturatedBarrens_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Young age Figure [Output/Barrow/SaturatedBarrens_WT_Y]'\n if self.initialize['SaturatedBarrens_WT_M_Figure'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Medium age Figure [Output/Barrow/SaturatedBarrens_WT_M]'\n if self.initialize['SaturatedBarrens_WT_O_Figure'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Old age Figure [Output/Barrow/SaturatedBarrens_WT_O]'\n if self.initialize['Shrubs_WT_O_Figure'].lower() == 'yes':\n print ' Initial Shrubs, Wetland Tundra, Old age Figure [Output/Barrow/Shrubs_WT_O]'\n if self.initialize['Urban_WT_Figure'].lower() == 'yes':\n print ' Initial Urban area, Wetland Tundra, Figure [Output/Barrow/Urban_WT]'\n if self.initialize['Rivers_WT_Y_Figure'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tundra, Young age Figure [Output/Barrow/Rivers_WT_Y]'\n if self.initialize['Rivers_WT_M_Figure'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tundra, Medium age Figure [Output/Barrow/Rivers_WT_M]'\n if self.initialize['Rivers_WT_O_Figure'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tunrda, old age Figure [Output/Barrow/Rivers_WT_O]'\n \n# if self.initialize['WetNPG_Figure'].lower() == 'yes':\n# print ' Initial Wetland Non-polygonal Ground Figure [Output/Wet_NPG]'\n# if self.initialize['WetLCP_Figure'].lower() == 'yes':\n# print ' Initial Wetland Low Center Polygon Figure [Output/Wet_LCP]'\n# if self.initialize['WetCLC_Figure'].lower() == 'yes':\n# print ' Initial Wetland Coalescent Low Center Polygon Figure [Output/Wet_CLC]'\n# if self.initialize['WetFCP_Figure'].lower() == 'yes':\n# print ' Initial Wetland Flat Center Polygon Figure [Output/Wet_FCP]'\n# if self.initialize['WetHCP_Figure'].lower() == 'yes':\n# print ' Initial Wetland High Center Polygon Figure [Output/Wet_HCP]'\n# if self.initialize['Lakes_Figure'].lower() == 'yes':\n# print ' Initial Lakes Figure [Output/Lakes]'\n# if self.initialize['Ponds_Figure'].lower() == 'yes':\n# print ' Initial Ponds Figure [Output/Ponds]'\n# if self.initialize['Rivers_Figure'].lower() == 'yes':\n# print ' Initial Rivers Figure [Output/Other_Cohorts]'\n# if self.initialize['Urban_Figure'].lower() == 'yes':\n# print ' Initial Ubran Figure [Output/Other_Cohorts]'\n if self.initialize['All_Cohorts_Figure'].lower() == 'yes':\n print ' Total Cohorts Figure [Output/Barrow/All_Cohorts]'\n print ' '\n\n print 'Outputs of Normalized Cohort Distribution:'\n if self.initialize['Normalized_Cohort_Distribution_Figure'].lower() != 'yes':\n print ' No outputs generated.'\n else:\n if self.initialize['Meadow_WT_Y_Normal'].lower() == 'yes':\n print ' Inital Meadow, Wetland Tundra, Young age Normalized [Output/Barrow/Meadow_WT_Y]'\n if self.initialize['Meadow_WT_M_Normal'].lower() == 'yes':\n print ' Initial Meadow, Wetland Tundra, Medium age Normalized [Output/Barrow/Meadow_WT_M]'\n if self.initialize['Meadow_WT_O_Normal'].lower() == 'yes':\n print ' Initial Meadow, Wetland Tundra, Old age Normalized [Output/Barrow/Meadow_WT_O]'\n if self.initialize['LCP_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Young age Normalized [Output/Barrow/LCP_WT_Y]'\n if self.initialize['LCP_WT_M_Normal'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Medium age Normalized [Output/Barrow/LCP_WT_M]'\n if self.initialize['LCP_WT_O_Normal'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Old age Normalized [Output/Barrow/LCP_WT_O]'\n if self.initialize['CLC_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Young age Normalized [Output/Barrow/CLC_WT_Y]'\n if self.initialize['CLC_WT_M_Normal'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Medium age Normalized [Output/Barrow/CLC_WT_M]'\n if self.initialize['CLC_WT_O_Normal'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Old age Normalized [Output/Barrow/CLC_WT_O]'\n if self.initialize['FCP_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Young age Normalized [Output/Barrow/FCP_WT_Y]'\n if self.initialize['FCP_WT_M_Normal'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Medium age Normalized [Output/Barrow/FCP_WT_M]'\n if self.initialize['FCP_WT_O_Normal'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Old age Normalized [Output/Barrow/FCP_WT_O]'\n if self.initialize['HCP_WT_Y_Normal'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Young age Normalized [Output/Barrow/HCP_WT_Y]'\n if self.initialize['HCP_WT_M_Normal'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Medium age Normalized [Output/Barrow/HCP_WT_M]'\n if self.initialize['HCP_WT_O_Normal'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Old age Normalized [Output/Barrow/HCP_WT_O]'\n if self.initialize['LargeLakes_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Young age Normalized [Output/Barrow/LargeLakes_WT_Y]'\n if self.initialize['LargeLakes_WT_M_Normal'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Medium age Normalized [Output/Barrow/LargeLakes_WT_M]'\n if self.initialize['LargeLakes_WT_O_Normal'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Old age Normalized [Output/Barrow/LargeLakes_WT_O]'\n if self.initialize['MediumLakes_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Young age Normalized [Output/Barrow/MediumLakes_WT_Y]'\n if self.initialize['MediumLakes_WT_M_Normal'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Medium age Normalized [Output/Barrow/MediumLakes_WT_M]'\n if self.initialize['MediumLakes_WT_O_Normal'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Old age Normalized [Output/Barrow/MediumLakes_WT_O]'\n if self.initialize['SmallLakes_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Young age Normalized [Output/Barrow/SmallLakes_WT_Y]'\n if self.initialize['SmallLakes_WT_M_Normal'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Medium age Normalized [Output/Barrow/SmallLakes_WT_M]'\n if self.initialize['SmallLakes_WT_O_Normal'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Old age Normalized [Output/Barrow/SmallLakes_WT_O]'\n if self.initialize['Ponds_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Young age Normalized [Output/Barrow/Ponds_WT_Y]'\n if self.initialize['Ponds_WT_M_Normal'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Medium age Normalized [Output/Barrow/Ponds_WT_M]'\n if self.initialize['Ponds_WT_O_Normal'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Old age Normalized [Output/Barrow/Ponds_WT_O]'\n if self.initialize['CoastalWaters_WT_O_Normal'].lower() == 'yes':\n print ' Initial Coastal Waters, Wetland Tundra, Old age Normalized [Output/Barrow/CoastalWaters_WT_O]'\n if self.initialize['DrainedSlope_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Young age Normalized [Output/Barrow/DrainedSlope_WT_Y]'\n if self.initialize['DrainedSlope_WT_M_Normal'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Medium age Normalized [Output/Barrow/DrainedSlope_WT_M]'\n if self.initialize['DrainedSlope_WT_O_Normal'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Old age Normalized [Output/Barrow/DrainedSlope_WT_O]'\n if self.initialize['SandDunes_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Young age Normalized [Output/Barrow/SandDunes/WT_Y]'\n if self.initialize['SandDunes_WT_M_Normal'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Medium age Normalized [Ouput/Barrow/SandDunes_WT_M]'\n if self.initialize['SandDunes_WT_O_Normal'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Old age Normalized [Output/Barrow/SandDunes_WT_O]'\n if self.initialize['SaturatedBarrens_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Young age Normalized [Output/Barrow/SaturatedBarrens_WT_Y]'\n if self.initialize['SaturatedBarrens_WT_M_Normal'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Medium age Normalized [Output/Barrow/SaturatedBarrens_WT_M]'\n if self.initialize['SaturatedBarrens_WT_O_Normal'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Old age Normalized [Output/Barrow/SaturatedBarrens_WT_O]'\n if self.initialize['Shrubs_WT_O_Normal'].lower() == 'yes':\n print ' Initial Shrubs, Wetland Tundra, Old age Normalized [Output/Barrow/Shrubs_WT_O]'\n if self.initialize['Urban_WT_Normal'].lower() == 'yes':\n print ' Initial Urban area, Wetland Tundra, Normal [Output/Barrow/Urban_WT]'\n if self.initialize['Rivers_WT_Y_Normal'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tundra, Young age Normalized [Output/Barrow/Rivers_WT_Y]'\n if self.initialize['Rivers_WT_M_Normal'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tundra, Medium age Normalized [Output/Barrow/Rivers_WT_M]'\n if self.initialize['Rivers_WT_O_Normal'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tunrda, old age Normalized [Output/Barrow/Rivers_WT_O]'\n \n \n# if self.initialize['WetNPG_Normal'].lower() == 'yes':\n# print ' Normalized Wetland Non-polygonal Ground Figure [Output/Wet_NPG]'\n# if self.initialize['WetLCP_Normal'].lower() == 'yes':\n# print ' Normalized Wetland Low Center Polygon Figure [Output/Wet_LCP]'\n# if self.initialize['WetCLC_Normal'].lower() == 'yes':\n# print ' Normalized Wetland Coalescent Low Center Polygon Figure [Output/Wet_CLC]'\n# if self.initialize['WetFCP_Normal'].lower() == 'yes':\n# print ' Normalized Wetland Flat Center Polygon Figure [Output/Wet_FCP]'\n# if self.initialize['WetHCP_Normal'].lower() == 'yes':\n# print ' Normalized Wetland High Center Polygon Figure [Output/Wet_HCP]'\n# if self.initialize['Lakes_Normal'].lower() == 'yes':\n# print ' Normalized Lakes Figure [Output/Lakes]'\n# if self.initialize['Ponds_Normal'].lower() == 'yes':\n# print ' Normalized Ponds Figure [Output/Ponds]'\n# if self.initialize['Rivers_Normal'].lower() == 'yes':\n# print ' Normalized Rivers Figure [Output/Other_Cohorts]'\n# if self.initialize['Urban_Normal'].lower() == 'yes':\n# print ' Normalized Ubran Figure [Output/Other_Cohorts]'\n if self.initialize['Total_Cohorts_Normal'].lower() == 'yes':\n print ' Normalize Total Cohorts [Output/All_Cohorts]'\n print ' '\n\n print 'Outputs of Cohort Ages:'\n if self.initialize['Initial_Cohort_Age_Figure'].lower() != 'yes':\n print ' No outputs generated.'\n else:\n if self.initialize['Meadow_WT_Y_Age'].lower() == 'yes':\n print ' Inital Meadow, Wetland Tundra, Young age distribution [Output/Barrow/Meadow_WT_Y]'\n if self.initialize['Meadow_WT_M_Age'].lower() == 'yes':\n print ' Initial Meadow, Wetland Tundra, Medium age distribution [Output/Barrow/Meadow_WT_M]'\n if self.initialize['Meadow_WT_O_Age'].lower() == 'yes':\n print ' Initial Meadow, Wetland Tundra, Old age distribution [Output/Barrow/Meadow_WT_O]'\n if self.initialize['LCP_WT_Y_Age'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Young age distribution [Output/Barrow/LCP_WT_Y]'\n if self.initialize['LCP_WT_M_Age'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Medium age distribution [Output/Barrow/LCP_WT_M]'\n if self.initialize['LCP_WT_O_Age'].lower() == 'yes':\n print ' Initial Low Center Polygon, Wetland Tundra, Old age distribution [Output/Barrow/LCP_WT_O]'\n if self.initialize['CLC_WT_Y_Age'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Young age distribution [Output/Barrow/CLC_WT_Y]'\n if self.initialize['CLC_WT_M_Age'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Medium age distribution [Output/Barrow/CLC_WT_M]'\n if self.initialize['CLC_WT_O_Age'].lower() == 'yes':\n print ' Initial Coalescent Low Center Polygon, Wetland Tundra, Old age distribution [Output/Barrow/CLC_WT_O]'\n if self.initialize['FCP_WT_Y_Age'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Young age distribution [Output/Barrow/FCP_WT_Y]'\n if self.initialize['FCP_WT_M_Age'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Medium age distribution [Output/Barrow/FCP_WT_M]'\n if self.initialize['FCP_WT_O_Age'].lower() == 'yes':\n print ' Initial Flat Center Polygon, Wetland Tundra, Old age distribution [Output/Barrow/FCP_WT_O]'\n if self.initialize['HCP_WT_Y_Age'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Young age distribution [Output/Barrow/HCP_WT_Y]'\n if self.initialize['HCP_WT_M_Age'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Medium age distribution [Output/Barrow/HCP_WT_M]'\n if self.initialize['HCP_WT_O_Age'].lower() == 'yes':\n print ' Initial High Center Polygon, Wetland Tundra, Old age distribution [Output/Barrow/HCP_WT_O]'\n if self.initialize['LargeLakes_WT_Y_Age'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Young age distribution [Output/Barrow/LargeLakes_WT_Y]'\n if self.initialize['LargeLakes_WT_M_Age'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Medium age distribution [Output/Barrow/LargeLakes_WT_M]'\n if self.initialize['LargeLakes_WT_O_Age'].lower() == 'yes':\n print ' Initial Large (size) Lakes, Wetland Tundra, Old age distribution [Output/Barrow/LargeLakes_WT_O]'\n if self.initialize['MediumLakes_WT_Y_Age'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Young age distribution [Output/Barrow/MediumLakes_WT_Y]'\n if self.initialize['MediumLakes_WT_M_Age'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Medium age distribution [Output/Barrow/MediumLakes_WT_M]'\n if self.initialize['MediumLakes_WT_O_Age'].lower() == 'yes':\n print ' Initial Medium (size) Lakes, Wetland Tundra, Old age distribution [Output/Barrow/MediumLakes_WT_O]'\n if self.initialize['SmallLakes_WT_Y_Age'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Young age distribution [Output/Barrow/SmallLakes_WT_Y]'\n if self.initialize['SmallLakes_WT_M_Age'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Medium age distribution [Output/Barrow/SmallLakes_WT_M]'\n if self.initialize['SmallLakes_WT_O_Age'].lower() == 'yes':\n print ' Initial Small (size) Lakes, Wetland Tundra, Old age distribution [Output/Barrow/SmallLakes_WT_O]'\n if self.initialize['Ponds_WT_Y_Age'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Young age distribution [Output/Barrow/Ponds_WT_Y]'\n if self.initialize['Ponds_WT_M_Age'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Medium age distribution [Output/Barrow/Ponds_WT_M]'\n if self.initialize['Ponds_WT_O_Age'].lower() == 'yes':\n print ' Initial Ponds, Wetland Tundra, Old age distribution [Output/Barrow/Ponds_WT_O]'\n if self.initialize['CoastalWaters_WT_O_Age'].lower() == 'yes':\n print ' Initial Coastal Waters, Wetland Tundra, Old age distribution [Output/Barrow/CoastalWaters_WT_O]'\n if self.initialize['DrainedSlope_WT_Y_Age'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Young age distribution [Output/Barrow/DrainedSlope_WT_Y]'\n if self.initialize['DrainedSlope_WT_M_Age'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Medium age distribution [Output/Barrow/DrainedSlope_WT_M]'\n if self.initialize['DrainedSlope_WT_O_Age'].lower() == 'yes':\n print ' Initial Drained Slope, Wetland Tundra, Old age distribution [Output/Barrow/DrainedSlope_WT_O]'\n if self.initialize['SandDunes_WT_Y_Age'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Young age distribution [Output/Barrow/SandDunes/WT_Y]'\n if self.initialize['SandDunes_WT_M_Age'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Medium age distribution [Ouput/Barrow/SandDunes_WT_M]'\n if self.initialize['SandDunes_WT_O_Age'].lower() == 'yes':\n print ' Initial Sand Dunes, Wetland Tundra, Old age distribution [Output/Barrow/SandDunes_WT_O]'\n if self.initialize['SaturatedBarrens_WT_Y_Age'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Young age distribution [Output/Barrow/SaturatedBarrens_WT_Y]'\n if self.initialize['SaturatedBarrens_WT_M_Age'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Medium age distribution [Output/Barrow/SaturatedBarrens_WT_M]'\n if self.initialize['SaturatedBarrens_WT_O_Age'].lower() == 'yes':\n print ' Initial Saturated Barrens, Wetland Tundra, Old age distribution [Output/Barrow/SaturatedBarrens_WT_O]'\n if self.initialize['Shrubs_WT_O_Age'].lower() == 'yes':\n print ' Initial Shrubs, Wetland Tundra, Old age distribution [Output/Barrow/Shrubs_WT_O]'\n if self.initialize['Urban_WT_Age'].lower() == 'yes':\n print ' Initial Urban area, Wetland Tundra, Normal [Output/Barrow/Urban_WT]'\n if self.initialize['Rivers_WT_Y_Age'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tundra, Young age distribution [Output/Barrow/Rivers_WT_Y]'\n if self.initialize['Rivers_WT_M_Age'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tundra, Medium age distribution [Output/Barrow/Rivers_WT_M]'\n if self.initialize['Rivers_WT_O_Age'].lower() == 'yes':\n print ' Initial Rivers, Wetland Tunrda, old age distribution [Output/Barrow/Rivers_WT_O]'\n\n# if self.initialize['WetNPG_Age'].lower() == 'yes':\n# print ' Wetland Non-polygonal Ground Age [Output/Wet_NPG]'\n# if self.initialize['WetLCP_Age'].lower() == 'yes':\n# print ' Wetland Low Center Polygon Age [Output/Wet_LCP]'\n# if self.initialize['WetCLC_Age'].lower() == 'yes':\n# print ' Wetland Coalescent Low Center Polygon Age [Output/Wet_CLC]'\n# if self.initialize['WetFCP_Age'].lower() == 'yes':\n# print ' Wetland Flat Center Polygon Age [Output/Wet_FCP]'\n# if self.initialize['WetHCP_Age'].lower() == 'yes':\n# print ' Wetland High Center Polygon Age [Output/Wet_HCP]'\n# if self.initialize['Lakes_Age'].lower() == 'yes':\n# print ' Lakes Age [Output/Lakes]'\n# if self.initialize['Ponds_Age'].lower() == 'yes':\n# print ' Normalized Ponds Age [Output/Ponds]'\n print ' '\n ##########################################################################\n print '-----------------------------------'\n print ' Meteorologic Data Information '\n print '-----------------------------------'\n if self.Met['met_distribution'].lower() == 'point':\n print 'Point meteorologic data is used.'\n else:\n print 'Meterologic data is distributed.'\n print 'Meteorologic Data File: ', self.met_file\n if self.Met['degree_day_method'].lower() == 'read':\n print 'Degree Days read from files: ',self.Met['TDD_file'] +' and '+self.Met['FDD_file']\n else:\n print 'Degree Days calculated during simulation.'\n print ' '\n\n print 'Outputs:'\n if self.Met['Degree_Day_Output'].lower() == 'yes':\n print ' Degree-Days are output.'\n \n # Note: Might want to add climatic event probability and block size here\n ############################################################################\n print '------------------------------------'\n print ' General Terrestrial Information '\n print '------------------------------------'\n print 'Ground Ice Distribution: ', self.Terrestrial['Ice_Distribution']\n print 'Drainage Efficiency Distribution: ', self.Terrestrial['Drainage_Efficiency_Distribution']\n print 'Initial Active Layer Depth Distribution: ',self.Terrestrial['ALD_Distribution']\n print ' '\n ## #________________________________________________________\n ## # Setting Protective Layer Factor Shorthand for results\n ## #________________________________________________________\n ## WNPG = self.Terrestrial['Wet_NPG_PLF']\n ## WLCP = self.Terrestrial['Wet_LCP_PLF']\n ## WCLC = self.Terrestrial['Wet_CLC_PLF']\n ## WFCP = self.Terrestrial['Wet_FCP_PLF']\n ## WHCP = self.Terrestrial['Wet_HCP_PLF']\n ## GNPG = self.Terrestrial['Gra_NPG_PLF']\n ## GLCP = self.Terrestrial['Gra_LCP_PLF']\n ## GFCP = self.Terrestrial['Gra_FCP_PLF']\n ## GHCP = self.Terrestrial['Gra_HCP_PLF']\n ## SNPG = self.Terrestrial['Shr_NPG_PLF']\n ## SLCP = self.Terrestrial['Shr_LCP_PLF']\n ## SFCP = self.Terrestrial['Shr_FCP_PLF']\n ## SHCP = self.Terrestrial['Shr_HCP_PLF']\n ## LPLF = self.Terrestrial['Lakes_PLF']\n ## PPLF = self.Terrestrial['Ponds_PLF']\n ## #_________________________________________________________\n ## print '__________________________________________________'\n ## print ' Protective Layer Factors '\n ## print '__________________________________________________'\n ## print ' | Wetland | Graminoid | Shrub | Lake | Pond |'\n ## print '__________________________________________________'\n ## print ' NPG | '+str(WNPG)+' | '+str(GNPG)+' | '+str(SNPG)+' | '+\\\n ## str(LPLF)+' | '+str(PPLF)+' |'\n ## print ' LCP | '+str(WLCP)+' | '+str(GLCP)+' | '+str(SLCP)+' | '+\\\n ## '-- | -- |'\n ## print ' CLC | '+str(WCLC)+' | -- | -- | '+\\\n ## '-- | -- |'\n ## print ' FCP | '+str(WFCP)+' | '+str(GFCP)+' | '+str(SFCP)+' | '+\\\n ## '-- | -- |'\n ## print ' HCP | '+str(WHCP)+' | '+str(GHCP)+' | '+str(SHCP)+' | '+\\\n ## '-- | -- |'\n ## print '__________________________________________________'\n ## print ' '\n ############################################################################\n print '==========================================================='\n print '----------------------------------- '\n print ' Meadows, Wetland Tundra, All ages'\n print '-----------------------------------'\n init_total = self.Init_Meadow_WT_Y + self.Init_Meadow_WT_M + self.Init_Meadow_WT_O\n final_total = self.Final_Meadow_WT_Y + self.Final_Meadow_WT_M + self.Final_Meadow_WT_O\n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' Meadows, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_Meadow_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_Meadow_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_Meadow_WT_Y - self.Init_Meadow_WT_Y\n print 'Percent difference: ', ((self.Final_Meadow_WT_Y - self.Init_Meadow_WT_Y)/self.Init_Meadow_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Meadows, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_Meadow_WT_M\n print 'Final Fractional Area (km2): ', self.Final_Meadow_WT_M\n print 'Total Fractional Change (km2): ', self.Final_Meadow_WT_M - self.Init_Meadow_WT_M\n print 'Percent difference: ', ((self.Final_Meadow_WT_M - self.Init_Meadow_WT_M)/self.Init_Meadow_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Meadows, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_Meadow_WT_O\n print 'Final Fractional Area (km2): ', self.Final_Meadow_WT_O\n print 'Total Fractional Change (km2): ', self.Final_Meadow_WT_O - self.Init_Meadow_WT_O\n print 'Percent difference: ', ((self.Final_Meadow_WT_O - self.Init_Meadow_WT_O)/self.Init_Meadow_WT_O)*100.\n print ' '\n print '==========================================================='\n print '----------------------------------- '\n print ' Low Center Polygons, Wetland Tundra, All ages'\n print '-----------------------------------'\n init_total = self.Init_LCP_WT_Y + self.Init_LCP_WT_M + self.Init_LCP_WT_O\n final_total = self.Final_LCP_WT_Y + self.Final_LCP_WT_M + self.Final_LCP_WT_O\n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' Low Center Polygons, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_LCP_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_LCP_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_LCP_WT_Y - self.Init_LCP_WT_Y\n print 'Percent difference: ', ((self.Final_LCP_WT_Y - self.Init_LCP_WT_Y)/self.Init_LCP_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Low Center Polygons, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_LCP_WT_M\n print 'Final Fractional Area (km2): ', self.Final_LCP_WT_M\n print 'Total Fractional Change (km2): ', self.Final_LCP_WT_M - self.Init_LCP_WT_M\n print 'Percent difference: ', ((self.Final_LCP_WT_M - self.Init_LCP_WT_M)/self.Init_LCP_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Low Center Polygons, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_LCP_WT_O\n print 'Final Fractional Area (km2): ', self.Final_LCP_WT_O\n print 'Total Fractional Change (km2): ', self.Final_LCP_WT_O - self.Init_LCP_WT_O\n print 'Percent difference: ', ((self.Final_LCP_WT_O - self.Init_LCP_WT_O)/self.Init_LCP_WT_O)*100.\n print ' '\n print '===========================================================' \n print '----------------------------------- '\n print ' Coalescent Low Center Polygons, Wetland Tundra, All ages'\n print '-----------------------------------'\n init_total = self.Init_CLC_WT_Y + self.Init_CLC_WT_M + self.Init_CLC_WT_O\n final_total = self.Final_CLC_WT_Y + self.Final_CLC_WT_M + self.Final_CLC_WT_O\n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' Coalescent Low Center Polygons, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_CLC_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_CLC_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_CLC_WT_Y - self.Init_CLC_WT_Y\n print 'Percent difference: ', ((self.Final_CLC_WT_Y - self.Init_CLC_WT_Y)/self.Init_CLC_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Coalescent Low Center Polygons, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_CLC_WT_M\n print 'Final Fractional Area (km2): ', self.Final_CLC_WT_M\n print 'Total Fractional Change (km2): ', self.Final_CLC_WT_M - self.Init_CLC_WT_M\n print 'Percent difference: ', ((self.Final_CLC_WT_M - self.Init_CLC_WT_M)/self.Init_CLC_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Coalescent Low Center Polygons, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_CLC_WT_O\n print 'Final Fractional Area (km2): ', self.Final_CLC_WT_O\n print 'Total Fractional Change (km2): ', self.Final_CLC_WT_O - self.Init_CLC_WT_O\n print 'Percent difference: ', ((self.Final_CLC_WT_O - self.Init_CLC_WT_O)/self.Init_CLC_WT_O)*100.\n print ' '\n print '==========================================================='\n print '----------------------------------- '\n print ' Flat Center Polygons, Wetland Tundra, All ages'\n print '-----------------------------------'\n init_total = self.Init_FCP_WT_Y + self.Init_FCP_WT_M + self.Init_FCP_WT_O\n final_total = self.Final_FCP_WT_Y + self.Final_FCP_WT_M + self.Final_FCP_WT_O\n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' Flat Center Polygons, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_FCP_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_FCP_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_FCP_WT_Y - self.Init_FCP_WT_Y\n print 'Percent difference: ', ((self.Final_FCP_WT_Y - self.Init_FCP_WT_Y)/self.Init_FCP_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Flat Center Polygons, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_FCP_WT_M\n print 'Final Fractional Area (km2): ', self.Final_FCP_WT_M\n print 'Total Fractional Change (km2): ', self.Final_FCP_WT_M - self.Init_FCP_WT_M\n print 'Percent difference: ', ((self.Final_FCP_WT_M - self.Init_FCP_WT_M)/self.Init_FCP_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Flat Center Polygons, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_FCP_WT_O\n print 'Final Fractional Area (km2): ', self.Final_FCP_WT_O\n print 'Total Fractional Change (km2): ', self.Final_FCP_WT_O - self.Init_FCP_WT_O\n print 'Percent difference: ', ((self.Final_FCP_WT_O - self.Init_FCP_WT_O)/self.Init_FCP_WT_O)*100.\n print ' '\n print '===========================================================' \n print '----------------------------------- '\n print ' High Center Polygons, Wetland Tundra, All ages'\n print '-----------------------------------'\n init_total = self.Init_HCP_WT_Y + self.Init_HCP_WT_M + self.Init_HCP_WT_O\n final_total = self.Final_HCP_WT_Y + self.Final_HCP_WT_M + self.Final_HCP_WT_O\n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' High Center Polygons, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_HCP_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_HCP_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_HCP_WT_Y - self.Init_HCP_WT_Y\n print 'Percent difference: ', ((self.Final_HCP_WT_Y - self.Init_HCP_WT_Y)/self.Init_HCP_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' High Center Polygons, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_HCP_WT_M\n print 'Final Fractional Area (km2): ', self.Final_HCP_WT_M\n print 'Total Fractional Change (km2): ', self.Final_HCP_WT_M - self.Init_HCP_WT_M\n print 'Percent difference: ', ((self.Final_HCP_WT_M - self.Init_HCP_WT_M)/self.Init_HCP_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' High Center Polygons, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_HCP_WT_O\n print 'Final Fractional Area (km2): ', self.Final_HCP_WT_O\n print 'Total Fractional Change (km2): ', self.Final_HCP_WT_O - self.Init_HCP_WT_O\n print 'Percent difference: ', ((self.Final_HCP_WT_O - self.Init_HCP_WT_O)/self.Init_HCP_WT_O)*100.\n print ' '\n print '===========================================================' \n print '------------------------------------------ '\n print ' Lakes, Wetland Tundra, All sizes and ages'\n print '-------------------------------------------'\n init_total = self.Init_LargeLakes_WT_Y + self.Init_LargeLakes_WT_M + self.Init_LargeLakes_WT_O + \\\n self.Init_MediumLakes_WT_Y + self.Init_MediumLakes_WT_M + self.Init_MediumLakes_WT_O + \\\n self.Init_SmallLakes_WT_Y + self.Init_SmallLakes_WT_M + self.Init_SmallLakes_WT_O\n final_total = self.Final_LargeLakes_WT_Y + self.Final_LargeLakes_WT_M + self.Final_LargeLakes_WT_O + \\\n self.Final_MediumLakes_WT_Y + self.Final_MediumLakes_WT_M + self.Final_MediumLakes_WT_O + \\\n self.Final_SmallLakes_WT_Y + self.Final_SmallLakes_WT_M + self.Final_SmallLakes_WT_O\n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' Large Lakes, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_LargeLakes_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_LargeLakes_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_LargeLakes_WT_Y - self.Init_LargeLakes_WT_Y\n print 'Percent difference: ', ((self.Final_LargeLakes_WT_Y - self.Init_LargeLakes_WT_Y)/self.Init_LargeLakes_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Large lakes, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_LargeLakes_WT_M\n print 'Final Fractional Area (km2): ', self.Final_LargeLakes_WT_M\n print 'Total Fractional Change (km2): ', self.Final_LargeLakes_WT_M - self.Init_LargeLakes_WT_M\n print 'Percent difference: ', ((self.Final_LargeLakes_WT_M - self.Init_LargeLakes_WT_M)/self.Init_LargeLakes_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Large Lakes, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_LargeLakes_WT_O\n print 'Final Fractional Area (km2): ', self.Final_LargeLakes_WT_O\n print 'Total Fractional Change (km2): ', self.Final_LargeLakes_WT_O - self.Init_LargeLakes_WT_O\n print 'Percent difference: ', ((self.Final_LargeLakes_WT_O - self.Init_LargeLakes_WT_O)/self.Init_LargeLakes_WT_O)*100.\n print ' '\n print '----------------------------------- '\n print ' Medium Lakes, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_MediumLakes_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_MediumLakes_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_MediumLakes_WT_Y - self.Init_MediumLakes_WT_Y\n print 'Percent difference: ', ((self.Final_MediumLakes_WT_Y - self.Init_MediumLakes_WT_Y)/self.Init_MediumLakes_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Medium lakes, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_MediumLakes_WT_M\n print 'Final Fractional Area (km2): ', self.Final_MediumLakes_WT_M\n print 'Total Fractional Change (km2): ', self.Final_MediumLakes_WT_M - self.Init_MediumLakes_WT_M\n print 'Percent difference: ', ((self.Final_MediumLakes_WT_M - self.Init_MediumLakes_WT_M)/self.Init_MediumLakes_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Medium Lakes, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_MediumLakes_WT_O\n print 'Final Fractional Area (km2): ', self.Final_MediumLakes_WT_O\n print 'Total Fractional Change (km2): ', self.Final_MediumLakes_WT_O - self.Init_MediumLakes_WT_O\n print 'Percent difference: ', ((self.Final_MediumLakes_WT_O - self.Init_MediumLakes_WT_O)/self.Init_MediumLakes_WT_O)*100.\n print ' '\n print '----------------------------------- '\n print ' Small Lakes, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_SmallLakes_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_SmallLakes_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_SmallLakes_WT_Y - self.Init_SmallLakes_WT_Y\n print 'Percent difference: ', ((self.Final_SmallLakes_WT_Y - self.Init_SmallLakes_WT_Y)/self.Init_SmallLakes_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Small lakes, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_SmallLakes_WT_M\n print 'Final Fractional Area (km2): ', self.Final_SmallLakes_WT_M\n print 'Total Fractional Change (km2): ', self.Final_SmallLakes_WT_M - self.Init_SmallLakes_WT_M\n print 'Percent difference: ', ((self.Final_SmallLakes_WT_M - self.Init_SmallLakes_WT_M)/self.Init_SmallLakes_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Small Lakes, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_SmallLakes_WT_O\n print 'Final Fractional Area (km2): ', self.Final_SmallLakes_WT_O\n print 'Total Fractional Change (km2): ', self.Final_SmallLakes_WT_O - self.Init_SmallLakes_WT_O\n print 'Percent difference: ', ((self.Final_SmallLakes_WT_O - self.Init_SmallLakes_WT_O)/self.Init_SmallLakes_WT_O)*100.\n print ' '\n print '===========================================================' \n print '------------------------------------------ '\n print ' Ponds, Wetland Tundra, All ages'\n print '-------------------------------------------'\n init_total = self.Init_Ponds_WT_Y + self.Init_Ponds_WT_M + self.Init_Ponds_WT_O \n final_total = self.Final_Ponds_WT_Y + self.Final_Ponds_WT_M + self.Final_Ponds_WT_O \n print 'Initial Fractional Area (km2): ', init_total \n print 'Final Fractional Area (km2): ', final_total\n print 'Total Fractional Change (km2): ', final_total - init_total\n print 'Percent difference: ', ((final_total - init_total)/init_total)*100.\n print ' ' \n print '----------------------------------- '\n print ' Ponds, Wetland Tundra Young age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_Ponds_WT_Y\n print 'Final Fractional Area (km2): ', self.Final_Ponds_WT_Y\n print 'Total Fractional Change (km2): ', self.Final_Ponds_WT_Y - self.Init_Ponds_WT_Y\n print 'Percent difference: ', ((self.Final_Ponds_WT_Y - self.Init_Ponds_WT_Y)/self.Init_Ponds_WT_Y)*100.\n print ' '\n print '----------------------------------- '\n print ' Ponds, Wetland Tundra Medium age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_Ponds_WT_M\n print 'Final Fractional Area (km2): ', self.Final_Ponds_WT_M\n print 'Total Fractional Change (km2): ', self.Final_Ponds_WT_M - self.Init_Ponds_WT_M\n print 'Percent difference: ', ((self.Final_Ponds_WT_M - self.Init_Ponds_WT_M)/self.Init_Ponds_WT_M)*100.\n print ' '\n print '----------------------------------- '\n print ' Ponds, Wetland Tundra Old age'\n print '-----------------------------------'\n print 'Initial Fractional Area (km2): ', self.Init_Ponds_WT_O\n print 'Final Fractional Area (km2): ', self.Final_Ponds_WT_O\n print 'Total Fractional Change (km2): ', self.Final_Ponds_WT_O - self.Init_Ponds_WT_O\n print 'Percent difference: ', ((self.Final_Ponds_WT_O - self.Init_Ponds_WT_O)/self.Init_Ponds_WT_O)*100.\n print '==========================================================='", "def loop_through_units(self):\n\n if self.screenshot_only:\n self.UI.remove_UI()\n\n self.num_units_to_review = len(self.incomplete_list)\n for counter, unit_id in enumerate(self.incomplete_list):\n\n self.current_unit_id = unit_id\n self.identify_unit(unit_id, counter)\n self.add_alerts()\n\n skip_subject = self.load_unit(unit_id)\n\n if skip_subject:\n print('Skipping current subject ..')\n continue\n\n self.display_unit()\n\n # checking if batch generation of screenshots is requested\n if not self.screenshot_only:\n\n print('\\nReviewing {}'.format(unit_id))\n timer_start = timer()\n\n # this is where all the reviewing/rating/notes happen\n self.show_fig_and_wait()\n\n # capturing time elapsed by ID, in seconds\n self.timer[unit_id] = timedelta(seconds=timer() - timer_start).seconds\n\n # TODO save each rating to disk to avoid loss of work due to crach etc\n self.print_rating(unit_id)\n\n if self.quit_now:\n print('\\nUser chosen to quit..')\n break\n else:\n self.export_screenshot()\n # annot text is unit specific\n self.UI.annot_text.remove()", "def draw(self):\n for tree_idx, tree in enumerate(self.trees):\n print(\"==========================================\\nTree\",\n tree_idx)\n self._print_tree(tree)", "def drawAll(self):\r\n for x in range(len(self.model)):\r\n self.model[x].draw()", "def director(self):\n if self.stop_count:\n return\n\n printer()", "def print_design(x, D):\n\n N = round(x[0])\n ds = x[1]\n ws = x[2]\n wc = x[3]\n lc = x[4]\n g = x[5]\n\n # compute mass\n M = 2.0*(2.0*wc+ws+ds)*lc*wc*D.rowmc + \\\n (2*lc+2*wc+np.pi*ds)*ds*ws*D.kpf*D.rowwc\n # compute loss at rated current\n Prt = (2*lc+2*wc+np.pi*ds)*(N*D.irt) ** 2/(ds*ws*D.kpf*D.sigmawc)\n # compute inductance\n L = D.mu0*lc*wc*N ** 2/(2*g)\n # compute the flux density\n Brt = D.mu0*N*D.irt/(2*g)\n # current density\n Jrt = N*D.irt/(ws*ds*D.kpf)\n print('Design Data')\n print(f'Turns = {N}')\n print(f'Slot depth (m) = {ds}')\n print(f'Slot width (m) = {ws}')\n print(f'Core width (m) = {wc}')\n print(f'Core length (m) = {lc}')\n print(f'Air gap (m) = {g}')\n print(' ')\n print('Design Metrics')\n print(f'Mass (kg) = {M}')\n print(f'Loss at rated current (W) = {Prt}')\n print(' ')\n print('Constrained Quantities')\n print(f'Inductance (H) = {L}')\n print(f'Flux Density at Rated Current (T) = {Brt}')\n print(f'Current Density Rated Current (A/m**2) = {Jrt}')", "def __write_all_persons(self):\n person_nbr = 1\n print(len(self.person_handles))\n\n with self._user.progress(_(\"Person Report\"), \n _(\"Generating report\"), \n len(self.person_handles)) as step:\n \n for handle in self.person_handles:\n self.__write_person(handle, person_nbr)\n person_nbr += 1\n # increment progress bar\n step()\n \n family_nbr = 1\n print(len(self.family_handles))\n with self._user.progress(_(\"Family Report\"), \n _(\"Generating report\"), \n len(self.family_handles)) as step:\n \n for handle in self.family_handles:\n self.__write_family(handle, family_nbr)\n family_nbr += 1\n # increment progress bar\n step() \n\n source_nbr = 1\n print(len(self.source_handles))\n with self._user.progress(_(\"Source Report\"), \n _(\"Generating report\"), \n len(self.source_handles)) as step:\n \n for handle in self.source_handles:\n self.__write_source(handle, source_nbr)\n source_nbr += 1\n # increment progress bar\n step()\n\n citation_nbr = 1\n print(len(self.citation_handles)) \n with self._user.progress(_(\"Citation Report\"), \n _(\"Generating report\"), \n len(self.citation_handles)) as step:\n \n for handle in self.citation_handles:\n self.__write_citation(handle, citation_nbr)\n citation_nbr += 1\n # increment progress bar\n step() \n\n \n place_nbr = 1\n print(len(self.place_handles))\n with self._user.progress(_(\"Place Report\"), \n _(\"Generating report\"), \n len(self.place_handles)) as step:\n \n for handle in self.place_handles:\n self.__write_place(handle, place_nbr)\n place_nbr += 1\n # increment progress bar\n step()\n\n repository_nbr = 1\n print(len(self.repository_handles))\n with self._user.progress(_(\"Repository Report\"), \n _(\"Generating report\"), \n len(self.repository_handles)) as step:\n \n for handle in self.repository_handles:\n self.__write_repository(handle, repository_nbr)\n repository_nbr += 1\n # increment progress bar\n step() \n\n \n media_nbr = 1\n print(len(self.media_handles))\n with self._user.progress(_(\"Media Report\"), \n _(\"Generating report\"), \n len(self.media_handles)) as step:\n \n for handle in self.media_handles:\n self.__write_media(handle, media_nbr)\n media_nbr += 1\n # increment progress bar\n step()\n\n event_nbr = 1\n print(len(self.event_handles))\n with self._user.progress(_(\"Event Report\"), \n _(\"Generating report\"), \n len(self.event_handles)) as step:\n \n for handle in self.event_handles:\n self.__write_event(handle, event_nbr)\n event_nbr += 1\n # increment progress bar\n step()\n\n note_nbr = 1\n print(len(self.note_handles)) \n with self._user.progress(_(\"Note Report\"), \n _(\"Generating report\"), \n len(self.note_handles)) as step:\n \n for handle in self.note_handles:\n self.__write_note(handle, note_nbr)\n note_nbr += 1\n # increment progress bar\n step()", "def performSimulation(self):\n \n if self.parameters['verbose']:\n print(\"=====================\\nStarting simulation with parameters\\n\",self.parameters)\n print(\"=====================\\nInitial Graph\\n\")\n self.showState()\n print(\"=====================\")\n\n while self.parameters['steps'] > 0:\n if self.parameters['verbose']: print(\"Performing step\")\n self.performStep()\n if self.parameters['verbose']: self.showState()\n\n if self.parameters['verbose']:\n print(\"=====================\\nFinished Simulation\\n\\nResult graph:\")\n self.showState()\n #self.showGraph(self.parameters['file_name'])\n #self.showState()\n #self.showStats()", "def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]", "def main():\n \"\"\"get and format the data\"\"\"\n observation = weather.get_observation()\n forecast = weather.get_forecast()\n draw_list = build_draw_list(observation, forecast)\n\n try:\n while 1:\n for drawable in draw_list:\n print(drawable[0])\n print(drawable[1], \"\\n\")\n time.sleep(6.5)\n weather.update()\n observation = weather.get_observation()\n forecast = weather.get_forecast()\n draw_list = build_draw_list(observation, forecast)\n\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupt detected, exiting...\")", "def step4(self):\n for mr in self.mrs:\n self.log.info(\"Boot drive of controller: %d is %d\"\n % (mr.ctrl_id, mr.cli.bootdrive_vd_get()))", "def print_production_processes(sp, chemistry, model_params, rates_frame, wall_fluxes_frame, n=None, unit='m-3/s'):\n # build the production/consumption rates in volume and on surfaces\n volumetric_rates = \\\n ResultsParser.get_volumetric_rates(sp=sp, chemistry=chemistry, rates_frame=rates_frame, annotate=True)\n surface_rates = \\\n ResultsParser.get_surface_rates(sp=sp, chemistry=chemistry, model_params=model_params,\n wall_fluxes_frame=wall_fluxes_frame, annotate=True)\n all_rates = volumetric_rates.append(surface_rates, verify_integrity=True)\n production_rates = all_rates[all_rates > 0]\n # capping\n if n is None:\n n = len(production_rates)\n # print the stuff:\n print('\\nProcesses and rates of production for {} [{}]:'.format(sp, unit))\n series_to_print = \\\n production_rates.sort_values(ascending=False).iloc[:n] * ResultsParser.unit_conversion_factors[unit]\n print(series_to_print.to_string())\n if n < len(production_rates):\n print('{: ^50}'.format('...'))\n print()", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.full_neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def _deliver_printing(self):\n providers = set(self.mapped('provider_id.provider'))\n for provider_name in providers: # process by provider type\n if hasattr(self, '_%s_deliver_printing' % provider_name):\n records = self.filtered(lambda r: r.provider_id.provider == provider_name)\n getattr(records, '_%s_deliver_printing' % provider_name)()", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"\\nTest Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-associated test case ID:\", self.test_case_ID, sep='')\n test_case = get_indexed_item_from_list(self.test_case_ID, AutoResilGlobal.test_case_list)\n if test_case != None:\n test_case.printout_all(indent_level+1)\n\n print(indent, \"|-test code ID:\", self.test_code_ID, sep='')\n\n print(indent, \"|-associated challenge def ID:\", self.challenge_def_ID, sep='')\n challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)\n if challenge_def != None:\n challenge_def.printout_all(indent_level+1)\n\n if self.VNF_ID_list != None:\n if len(self.VNF_ID_list) >0:\n print(indent, \"|-associated VNFs:\", sep='')\n for VNF_ID in self.VNF_ID_list:\n VNF_item = get_indexed_item_from_list(VNF_ID, AutoResilGlobal.VNF_Service_list)\n if VNF_item != None:\n VNF_item.printout_all(indent_level+1)\n\n if self.associated_metrics_ID_list != None:\n if len(self.associated_metrics_ID_list) >0:\n print(indent, \"|-associated metrics:\", sep='')\n for Metric_ID in self.associated_metrics_ID_list:\n Metric_item = get_indexed_item_from_list(Metric_ID, AutoResilGlobal.metric_definition_list)\n if Metric_item != None:\n Metric_item.printout_all(indent_level+1)\n\n if self.recipient_ID_list != None:\n if len(self.recipient_ID_list) >0:\n print(indent, \"|-associated recipients:\", sep='')\n for recipient_ID in self.recipient_ID_list:\n recipient_item = get_indexed_item_from_list(recipient_ID, AutoResilGlobal.recipient_list)\n if recipient_item != None:\n recipient_item.printout_all(indent_level+1)\n\n if self.test_CLI_command_sent_list != None:\n if len(self.test_CLI_command_sent_list) >0:\n print(indent, \"|-associated CLI commands:\", sep='')\n for CLI_command in self.test_CLI_command_sent_list:\n print(\" \"*INDENTATION_MULTIPLIER, \"|- \", CLI_command, sep='')\n\n # TODO: self.test_API_command_sent_list (depends how API commands are stored: likely a list of strings)" ]
[ "0.83719826", "0.83719486", "0.82855046", "0.8230399", "0.80913585", "0.79925144", "0.74432874", "0.690036", "0.68810475", "0.68694586", "0.68694586", "0.68694586", "0.67919457", "0.6589304", "0.61238146", "0.58934677", "0.57532626", "0.57419646", "0.57194996", "0.5676091", "0.56760615", "0.55732596", "0.5449557", "0.5448058", "0.5438508", "0.54270333", "0.539563", "0.5386803", "0.538398", "0.53490824", "0.53403974", "0.5312334", "0.5305012", "0.52998495", "0.5299505", "0.5298387", "0.5293786", "0.5283293", "0.5275948", "0.527345", "0.52689284", "0.52610904", "0.5260069", "0.52553904", "0.5252747", "0.5224877", "0.5203857", "0.519876", "0.5198296", "0.5197341", "0.5188975", "0.5185432", "0.51808506", "0.51806515", "0.5174869", "0.51713085", "0.5169498", "0.5160795", "0.515359", "0.51516175", "0.5143343", "0.5139588", "0.5138986", "0.51296794", "0.5099523", "0.509904", "0.50973296", "0.50956696", "0.5094827", "0.5093457", "0.5086539", "0.5084237", "0.50817794", "0.5077309", "0.5075823", "0.50721985", "0.50721693", "0.5067513", "0.5054121", "0.504991", "0.5048246", "0.50479674", "0.5046293", "0.50455457", "0.5042801", "0.50401103", "0.5038393", "0.5035474", "0.5032158", "0.5025069", "0.5019418", "0.50185925", "0.5015169", "0.50104827", "0.5006678", "0.50029624", "0.49962553", "0.49960667", "0.49959376" ]
0.8240983
4
Show all the models that were printed.
def show_completed_models(completed_models): print("\nThe following models have been printed:") for completed_model in completed_models: print(completed_model)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_completed_models(completed_models):\r\n print(\"\\nThe following models have been printed:\")\r\n for completed_model in completed_models:\r\n print(completed_model)", "def show_completed_models(completed_models):\n\tprint(\"\\n The following models has been printed \")\n\tfor completed_model in completed_models:\n\t\tprint(completed_model)", "def show_completed_models(completed_models):\n\tprint(\"\\nThe following models have been printed:\")\n\tfor completed_model in completed_models:\n\t\tprint(completed_model)", "def printModel(self):\n print(self.model)", "def show_completed_models (completed_models):\n print (\"\\nThe followin models have been printed: \")\n for completed_model in completed_models:\n print (completed_model)", "def print_models (unprinted_designs,completed_models):\n \n while unprinted_designs:\n current_design = unprinted_designs.pop()\n #Simulate creating a 3D print from the desig.\n print (\"printing model: \" + current_design)\n completed_models.append (current_design)", "def print_models(unprinted_designs, completed_models):\n\twhile unprinted_designs:\n\t\tcurrent_deign = unprinted_designs.pop()\n\t\tprint(\"Printing Model: \" + current_deign)\n\t\tcompleted_models.append(current_deign)", "def printModels(cls, options):\n print \"Generating experiment requests...\"\n\n searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)", "def printall():\n print listAll()", "def print_models(unprinted_designs, completed_models):\r\n while unprinted_designs:\r\n current_designs = unprinted_designs.pop()\r\n\r\n # simulate creating a 3d print from the design\r\n print(\"Printing model: \" + current_designs)\r\n completed_models.append(current_designs)", "def show_completed_design(completed_design):\n print(\"\\nThe following models have been printed:\")\n for completed_designs in completed_design:\n print(completed_designs)", "def print_models(unprinted_designs, completed_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n \n # Simulate creating a 3D print from the design.\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)", "def print_models(unprinted_designs, completed_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n\n # Simulate creating a 3d print from the design.\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)", "def print_models(unprinted_designs, completed_models):\n while unprinted_designs:\n current_design = unprinted_designs.pop()\n\n # Simulate creating a 3d print from the design.\n print(\"Printing model: \" + current_design)\n completed_models.append(current_design)", "def print_fl_models(self,fl):\n for t_id in self.fl2t_ids[fl]:\n print t_id\n self.draw_grid(self.t_id2model[t_id])\n print \"\"", "def print_models(unprinted_design, completed_design):\n \n while unprinted_design:\n current_design = unprinted_design.pop()\n print(f\"Printing model: {current_design}\")\n completed_design.append(current_design)", "def print_modles (unprinted_designs,completed_models):\n\twhile unprinted_designs:\n\t\tcurrent_design = unprinted_designs.pop()\n\t\tprint(\"Printing the design {current_design}\")\n\t\tcompleted_models.append(current_design)", "def print_summary(self):\n self.model.summary()", "def show_database_structure(self):\n self.analyze()\n items = []\n for model in get_models():\n names = []\n # for f, m in model._meta.get_fields_with_model():\n for f in model._meta.concrete_fields:\n names.append(f.name)\n items.append(\n \"{0} : {1}\".format(fmn(model), ', '.join(names)))\n\n items = sorted(items)\n return rstgen.ul(items)", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def print_me(self):\n\n print(\"----- Model:\",self.name,\" -----\")\n print(\"Mass (in M_sun): %.5f\" % (self.glb[imass]/constants.solar_mass))\n print(\"Radius (in R_sun): %.5f\" % (self.glb[iradius]/constants.solar_radius))\n print(\"Reference frequency (in uHz): %.3f\" % self.glb[ifreq_ref])\n print(\"Temperature (in K): %.1f\" % self.glb[itemperature])\n print(\"Luminosity (in L_sun): %.3g\" % (self.glb[iluminosity]/constants.solar_luminosity))\n print(\"Age (in Myrs): %.2f\" % self.glb[iage])\n print(\"Z: %.4f\" % self.glb[iz0])\n print(\"X: %.4f\" % self.glb[ix0])\n for (name, latex_name) in config.user_params:\n print(\"{0:29} {1:.5e}\".format(name,self.glb[user_params_index[name]]))\n print(\"Modes (in muHz):\")\n size = self.modes.shape[0]\n for i in range(size):\n print(\" (n,l,freq,IK) = (%d, %d, %.15f, %.5e)\" % \\\n (self.modes['n'][i], self.modes['l'][i], \\\n self.modes['freq'][i]*self.glb[ifreq_ref],\\\n self.modes['inertia'][i]))", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()", "def print_model_generation(model):\n print('g1 = {} MW'.format(model.g[1].value))\n print('g2 = {} MW'.format(model.g[2].value))", "def visualize_model(self):\n if self.model is None:\n print(\"%s.visualize: implement me\" % (self.__class__.__name__))", "def print(self):\r\n self.print_avec_separateur()", "def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))", "def print_list(self):\r\n pass", "def printModelAndTime(self):\n import time\n self._reporter.writeOutput(\"Model name = \" + self.modelName + '\\n' +\n \"Output directory = \" + self._outputDir_ + '\\n' +\n \"Time = \" + time.asctime() + '\\n')\n return", "def print_details(self):\n self.view.print_details()", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print_model_definitions(self):\n sys.stdout.write(\"Model Dimensions\\n\")\n sys.stdout.write(\"----------------\\n\")\n for key, val in self.dimensions.iteritems():\n sys.stdout.write(\"{key}: {val}\\n\".format(key=key, val=val))", "def print_all(self):\r\n for e in self.channels:\r\n e.print()", "def print_model_functions(self):\n # TODO replace print statements with stdout.write\n # TODO get derivatives recursively\n self.functions = self.definitions.get(\"functions\", [])\n for func in self.functions:\n print \"type: \", func[\"type\"]\n print \"args: \", func[\"args\"]\n print \"derivatives: \"\n for deriv in func.get(\"deriv\", []):\n for key, val in deriv.iteritems():\n print \" \", key, val\n print \"\"", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s", "def print_results(self):\n pass", "def summary(self):\n print(self.model.summary())", "def drawall(self):\r\n for x in self.objectlist:\r\n if x.model:\r\n x.model.draw()", "def summary(self):\r\n print(self.model.summary())", "def print_entries(self):\n self.print_selected_entries(self.entries)", "def __show_all(self):\n print(\"\\nEvents:\\n\")\n self.__show_all_events()\n print(\"\\nMetrics:\\n\")\n self.__show_all_metrics()", "def print_networks(self, verbose):\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n if verbose:\n print(net)\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')", "def __repr__(self):\r\n printer = 'text model name: ' + str(self.name) + '\\n'\r\n printer += ' number of words: ' + str(len(self.words)) +'\\n'\r\n printer += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\r\n printer += ' number of stems: ' + str(len(self.stems)) + '\\n'\r\n printer += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\r\n printer += ' number of different punctuations: ' + str(len(self.punctuation)) \r\n return printer", "def show_instances():\n return get_instances()", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def printCars(self):\n for car in self.cars:\n self.logger.debug(car)", "def print(self):\n for fiction in self.fictions:\n print(fiction.__dict__)", "def display_all(self):\n print(\"Price: \" + str(self.price))\n print(\"Speed: \" + str(self.speed) + \"mph\")\n print(\"Fuel: \" + self.fuel)\n print(\"Mileage: \" + str(self.mileage) + \"mpg\")\n print(\"Tax: \" + str(self.tax))\n return self", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def print_model(self, model):\n return \"null\"", "def _debug_pyomo_print(self, m):\n print('/' + '='*80)\n print('DEBUGG model pieces:')\n print(' -> objective:')\n print(' ', m.obj.pprint())\n print(' -> variables:')\n for var in m.component_objects(pyo.Var):\n print(' ', var.pprint())\n print(' -> constraints:')\n for constr in m.component_objects(pyo.Constraint):\n print(' ', constr.pprint())\n print('\\\\' + '='*80)\n print('')", "def print_list(self):\n self.print_avec_separateur(\" \")", "def display(self):\r\n os.system('cls')\r\n index = 0\r\n for i in self.list:\r\n print(str(index) + \" \" + i.showRule())\r\n index += 1", "def showtopologies():\n middleware.protocolObj.showTopologies()", "def printTree(self):\n print self.storeTree.movies", "def show_classes():\n for obj in Classes.get_all_obj_list():\n print('\\033[33;1m[%s] [%s]校区 [%s]班级 学费[%s]\\033[0m'.center(60, '-') \\\n % (obj.school_nid.get_obj_by_uuid().name, obj.school_nid.get_obj_by_uuid().addr, \\\n obj.name, obj.tuition))", "def showModels(style_id):\n style = session.query(Style).filter_by(id=style_id).one()\n models = session.query(Model).filter_by(style_id=style_id).all()\n return render_template('models.html', models=models, style=style)", "def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def print_elements(self):\n for element in self.elements:\n element.print_element()", "def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def show(self):\n\n pass", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def show_all(self):\n self.explained_variance_score()\n self.max_error()\n self.mean_absolute_error()\n self.mean_squared_error()\n self.median_absolute_error()\n self.r2_score()\n self.mean_poisson_deviance()\n self.mean_gamma_deviance()\n self.feature_importance()\n self.learning_curve()", "def show(self):\n pass", "def print_layers(model):\r\n for i in range(len(model.layers)):\r\n print(\"Printing layer shape: %d\" % i, model.layers[i])\r\n weights = model.layers[i].get_weights()\r\n for weight in weights: # Layer type\r\n print(weight.shape)", "def print(self):\n self._print_title_and_url(self.index, self.title, self.url)\n self._print_metadata_and_abstract(self.abstract, metadata=self.metadata)", "def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())", "def print_model_params(model):\n for param, value in zip(model.param_names, model.parameters):\n print(\"{:0.4f}\\t{}\".format(value, param))", "def print_results(self, regressor=False):\n if regressor:\n for i, model in enumerate(self.list_of_models):\n print \"Model: \", model\n print \"R2 score: \", self.r2_scores[i]\n print \"MSE: \", self.mse_scores[i]\n else:\n for i, model in enumerate(self.list_of_models):\n print \"Model: \", model\n print \"F1 score: \", self.f1_scores[i]\n print \"recall score: \", self.recall_scores[i]\n print \"precision score: \", self.precision_scores[i]\n print \"accuracy score: \", self.accuracy_scores[i]", "def print_networks(self, verbose: bool) -> None:\n print('---------- Networks initialized -------------')\n for name, module in self.modules.items():\n num_params = 0\n for param in module.parameters():\n num_params += param.numel()\n if verbose:\n print(module)\n print('[Network %s] Total number of parameters: %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')\n return", "def print_database(self):\n table_names = self.catalog\n for table_name in table_names:\n table = self.parse_table(table_name)\n if not table:\n continue\n print(f'TABLE NAME: {table_name}\\r\\n')\n print(tabulate(table, headers=\"keys\"))\n print('\\r\\n\\r\\n\\r\\n\\r\\n')", "def plot_model(self,encoder=True):\n if encoder:\n print(\"\\n Summary (encoder):\")\n return tf.keras.utils.plot_model(enc_model, show_shapes=True, show_layer_names=True)\n else:\n print(\"\\n Summary (generator):\")\n return tf.keras.utils.plot_model(gen_model, show_shapes=True, show_layer_names=True)", "def print_tables(self):\n\n conn = self.engine.connect()\n self.print_table(self.nodes, conn)\n self.print_table(self.paths, conn)\n self.view_tree(connection=conn)", "def show(self):\r\n for card in self.cards_list:\r\n print(card)", "def dump_model(self):", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def print_summary(self, print_level = 0):\n\n print(\"==========================\")\n print(\"= FUNtoFEM model summary =\")\n print(\"==========================\")\n print(\"Model name:\", self.name)\n print(\"Number of bodies:\", len(self.bodies))\n print(\"Number of scenarios:\", len(self.scenarios))\n print(\" \")\n print(\"------------------\")\n print(\"| Bodies summary |\")\n print(\"------------------\")\n for body in self.bodies:\n print(\"Body:\", body.id, body.name)\n print(\" coupling group:\", body.group)\n print(\" transfer scheme:\", type(body.transfer))\n print(\" shape parameteration:\", type(body.shape))\n for vartype in body.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(body.variables[vartype]))\n if print_level >= 0:\n for var in body.variables[vartype]:\n print(' variable:', var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)\n\n print(\" \")\n print(\"--------------------\")\n print(\"| Scenario summary |\")\n print(\"--------------------\")\n for scenario in self.scenarios:\n print(\"scenario:\", scenario.id, scenario.name)\n print(\" coupling group:\", scenario.group)\n print(\" steps:\", scenario.steps)\n print(\" steady?:\", scenario.steady)\n for func in scenario.functions:\n print(' function:', func.name, ', analysis_type:', func.analysis_type)\n print(' adjoint?', func.adjoint)\n if not scenario.steady:\n print(' time range', func.start, ',', func.stop)\n print(' averaging', func.averaging)\n\n\n for vartype in scenario.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(scenario.variables[vartype]))\n if print_level >= 0:\n for var in scenario.variables[vartype]:\n print(' variable:', var.id, var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)", "def print(self):\n for word in self.words:\n print(word)", "def write(self, output_stream=sys.stdout):\n for model in self.models:\n if len(model.chains) == 0:\n continue\n if len(self.models) > 1:\n print(\"MODEL %4d\" % (model.number), file=output_stream)\n model.write(output_stream)\n if len(self.models) > 1:\n print(\"ENDMDL\", file=output_stream)\n print(\"END\", file=output_stream)", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def print(self):\n\n print(f\"{len(self._sources)} vocabularies given sensitivity {self._sensitivity}.\"\n f\" From best to worst (vocabularies with no matches are excluded):\")\n for source in self._sources:\n print(f\"{source.uri}, {self._score_type.__str__()}: {getattr(source.ranking, self._score_type.__str__())}\")", "def show(self) -> None:", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def print_everything(self):\n def print_service(service):\n print\n print '====[ %s ]==== ' % service.__repr__(path_only=True)\n print\n\n print 'Actions:'\n for name, action in service.get_actions():\n print ' - ', name, action\n print\n\n for name, subservice in service.get_subservices():\n print_service(subservice)\n\n print_service(self.root)", "def print_all(self):\n if not request:\n raise exceptions.Warning(_(''), _(''))\n session_id = request.session.sid\n config = self.env['ir.config_parameter']\n addons_url = config.get_param('addons_path')\n phantomjs_path = config.get_param('phantomjs_path')\n phantomjs_path = 'phantomjs' if not phantomjs_path else phantomjs_path\n print_url = self.env.context.get('protocol_url', False)\n if print_url:\n print_urls = [print_url]\n else:\n print_urls = self._get_print_urls()\n if not print_urls:\n return\n phantom = [\n phantomjs_path,\n addons_url +\n '/quality_protocol_report/static/src/js/phantom_url_to_pdf.js',\n session_id, \"/tmp\"] + print_urls\n process = subprocess.Popen(phantom)\n process.communicate()\n filenames = []\n for url in print_urls:\n fname = url.replace('/', '').replace(':', '')\n weight_pos = fname.find('?weight=')\n if weight_pos > -1:\n fname = fname[weight_pos+8:weight_pos+10] + '-' + fname[:weight_pos]\n filenames.append('/tmp/' + fname + '.pdf')\n filepath = self._merge_pdf(sorted(filenames))\n fildecode = open(filepath, 'r')\n encode_data = fildecode.read()\n fildecode.close()\n active_model = self.env.context.get('active_model', False)\n active_id = self.env.context.get('active_id', False)\n ungrouped_also = self.env.context.get('print_ungrouped_also', False)\n if active_model and active_id and not ungrouped_also:\n active_name = self.env[active_model].browse([active_id]).name\n else:\n dt = fields.Datetime.context_timestamp(self, datetime.now())\n active_name = dt.strftime('%d-%m-%Y_%Hh%M')\n filename = 'protocolo.pdf' if print_url else \\\n 'protocolos_' + str(active_name).lower() + '.pdf'\n attachment_data = {\n 'name': filename,\n 'datas_fname': filename,\n 'datas': base64.b64encode(encode_data),\n 'res_model': active_model,\n 'res_id': 0 if print_url else self.env.context.get('active_id', False),\n }\n self.env['ir.attachment'].search(\n [('name', '=', attachment_data['name']),\n ('res_id', '=', attachment_data['res_id']),\n ('res_model', '=', attachment_data['res_model'])]).unlink()\n attachment = self.env['ir.attachment'].create(attachment_data)\n\n filenames.append(filepath)\n for my_file in filenames:\n os.remove(my_file)\n\n if print_url:\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/binary/saveas?model=ir.attachment&field=datas' +\n '&filename_field=name&id=%s' % (attachment.id),\n 'target': 'self',\n }\n else:\n return {'type': 'ir.actions.act_window_close'}", "def show(self, type = 'all'):\n import mayavi_plotting\n\n mayavi_plotting.show_model(self, type)", "def print(self):\n for l in range(self.h+1):\n print(\"Weight matrix between layer \" + str(l) + \" and layer \" + str(l+1))\n print(self.W[l])", "def print_vectors(self):\n print(\"Vectors:\")\n for name, vector in self.get_vectors():\n self.print_vector(name, vector.items)", "def print(self):\n for var in self.variables:\n print(var)", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(\"\\nModel Name: \\\"{}\\\"\".format(name))\n print(model)\n print(\"The number of parameters: {}\".format(num_params))" ]
[ "0.7550581", "0.7505565", "0.74779797", "0.7463235", "0.7299305", "0.689944", "0.6806887", "0.67993057", "0.6793462", "0.67158437", "0.6685057", "0.6670043", "0.6669607", "0.6669607", "0.663256", "0.66235834", "0.66213685", "0.66103745", "0.6587718", "0.6491061", "0.6472574", "0.64552635", "0.6428352", "0.63843733", "0.63615626", "0.6347247", "0.6292509", "0.6288611", "0.62873876", "0.62758833", "0.6244994", "0.6236598", "0.6183756", "0.61820364", "0.6175692", "0.61713964", "0.61662114", "0.6165303", "0.612452", "0.6120478", "0.61134154", "0.611003", "0.6101925", "0.61004853", "0.6097579", "0.60969186", "0.6079823", "0.60676825", "0.6067241", "0.6058297", "0.60420847", "0.6040918", "0.60373694", "0.6024156", "0.60178065", "0.60153764", "0.6013112", "0.6012826", "0.60122836", "0.6011238", "0.5990772", "0.5974263", "0.59728026", "0.59644926", "0.59610903", "0.5960349", "0.5951344", "0.59507734", "0.59507734", "0.59507143", "0.59285307", "0.59226906", "0.5921765", "0.59162754", "0.5910214", "0.59080505", "0.59075505", "0.58969885", "0.58953595", "0.58938044", "0.58712095", "0.5861993", "0.58602655", "0.5859389", "0.5859075", "0.5856426", "0.58539265", "0.58510745", "0.5847828", "0.5831986", "0.58306867", "0.582985", "0.582854", "0.58265066", "0.5817226", "0.58163214", "0.58088607", "0.5803557" ]
0.75561774
2
Build a dictionary containing everything we know about a user.
def build_profile(first, last, **user_info): profile = {} profile['first_name'] = first profile['last_name'] = last for key, value in user_info.items(): profile[key] = value return profile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def get_user_info_by_id(self, user_id: int) -> dict:", "def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }", "def user2dict(self):\n d = {}\n d['username'] = self.username\n d['level'] = self.level\n d['name'] = self.name\n d['email'] = self.email\n d['creation'] = self.creation\n d['update'] = self.update\n d['nsentences'] = self.nsentences\n d['nsessions'] = self.nsessions\n d['score'] = self.score\n d['pw_hash'] = self.pw_hash\n return d", "def user_data(self):\n itemuser = self.data['user']\n my_user_dict = {'user_id': itemuser['id'], 'user_name': itemuser['name'],\n 'user_handle': itemuser['screen_name'], 'user_desc': itemuser['description'],\n 'twitter_birthday': itemuser['created_at'], 'user_location': itemuser['location'],\n 'followers': itemuser['followers_count'], 'favorites': itemuser['favourites_count'],\n 'statuses': itemuser['statuses_count']}\n return my_user_dict", "def get_user(self, username):\n return {}", "def get_user_info_by_name(self, username: str) -> dict:", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct", "def _generate_users(self):\n users = {}\n args = self._add_user()\n #Grab info from args\n users[args[\"userID\"]] = {}\n users[args[\"userID\"]][\"name\"] = args[\"name\"]\n users[args[\"userID\"]][\"webhook_url\"] = args[\"webhook_url\"]\n users[args[\"userID\"]][\"blacklist\"] = args[\"blacklist\"]\n #Try to grab override info, default to blank if doesn't exist\n users[args[\"userID\"]][\"override_user\"] = args.get(\"overrideUser\", \"\")\n users[args[\"userID\"]][\"override_userid\"] = args.get(\"overrideUserID\", \"\")\n users[args[\"userID\"]][\"override_oauth\"] = args.get(\"overrideOauth\", \"\")\n fileIO.save_json(\"users.json\", users)", "def get_users(self):\n return {key: value.user for key, value in self}", "def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))", "def user_to_dict(self, user):\n udd = user._to_dict() #pylint: disable=protected-access\n response_dict = {}\n for arg in self.signup_args:\n response_dict[arg] = udd.get(arg)\n response_dict['user_id'] = user.get_id()\n response_dict['user_key'] = user.key.urlsafe()\n return response_dict", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo", "def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }", "def user_info(self):\n response = self.query('user_info')\n return response", "def _get_user_info(self):\n\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n # Add access token to the headers\n add_headers = dict(self._default_headers)\n add_headers['Authorization'] = self._access_token\n\n resp = requests.get(BASE_URL + \"user/{}\".format(self._user_id), headers=add_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to retrieve user info: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n # Print generic user info\n print(\"\")\n print(\"== USER INFO ==\")\n print(\"Username: {}\".format(vals.get('user').get('username')))\n print(\"Nickname: {}\".format(vals.get('user').get('nickname')))\n print(\"Usage: {} MB / {} MB\".format(int(int(vals.get('user').get('quota').get('usage')) / (1024*1024)),\n int(int(vals.get('user').get('quota').get('limit')) / (1024*1024))))\n print(\"\")\n\n # Grab folder ids we care about\n self._user_sync_folders_url = vals.get('user').get('syncfolders')", "def user(request):\n if request.user.is_anonymous() or not request.org:\n is_admin = False\n partner = None\n is_faq_only = True\n else:\n is_admin = request.user.can_administer(request.org)\n partner = request.user.get_partner(request.org)\n is_faq_only = request.user.must_use_faq()\n\n return {\n 'user_is_admin': is_admin,\n 'user_partner': partner,\n 'user_is_faq_only': is_faq_only\n }", "def _get_user_data(self):\n return {\"key\": self._key}", "def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }", "def get_user_info(self) -> Dict:\n try:\n return self.client.get_user()\n except ClientError as cerr:\n if cerr.response['Error']['Code'] == 'AccessDenied':\n # If the user doesn't have access rights to IAMClient\n # we can find the user name in the error response\n user_name = StrUtils.find_expression(str(cerr), self._USER_NAME_REGEX)\n return {'UserName' : user_name,\n 'User' : {'UserName' : user_name,\n 'UserId' : ''}}\n raise cerr\n except Exception as ex:\n raise GetUserInfoError(error_msg=ex)", "def user_info(self):\n resp = self._get(get_url('user'))\n raise_on_error(resp)\n ret = resp.json()\n return UserInfo(ret)", "async def userinfo(user: User = Security(require_user)):\n user = await user.query.gino.first()\n return user.to_dict()", "def to_dict(self):\n user_idt = self.user_idt_format.format(user=self.user_id)\n\n return {'user': self.user_id,\n 'is_admin': self.is_admin,\n 'read_only': self.read_only,\n 'show_deleted': self.show_deleted,\n 'auth_token': self.auth_token,\n 'request_id': self.request_id,\n 'roles': self.roles,\n 'user_identity': user_idt,\n 'user_name': self.user_name}", "def generate_user_headers(self):\n return {**self.generate_client_headers(), **{\"username\": self._user,\n \"password\": self._password,\n }}", "def get_user_info(self, project):\n return {\n 'is_admin': project.is_admin(self.context.get('user')),\n 'can_contribute': project.can_contribute(self.context.get('user')),\n 'is_involved': project.is_involved(self.context.get('user')),\n 'can_moderate': project.can_moderate(self.context.get('user'))\n }", "def _user_status():\n rv = {\n 'messages': map(unicode, get_flashed_messages()),\n }\n if current_user.is_anonymous:\n rv.update({\n 'logged_id': False\n })\n else:\n rv.update({\n 'logged_in': True,\n 'name': current_user.display_name\n })\n return rv", "def get_serialized_user(cls, user):\n return {\n 'email': user.email,\n 'is_superuser': user.is_superuser,\n 'name': user.name,\n 'sodar_uuid': str(user.sodar_uuid),\n 'username': user.username,\n }", "def user_info(self) -> UserInfo:\n return self.__userInfo", "def get_information_of(user: User, lang: str) -> Dict[str, Any]:\n if user.nickname == nick_of_anonymous_user:\n return _get_special_infos(lang)\n ret_dict = dict()\n ret_dict['public_nick'] = user.global_nickname\n ret_dict['last_action'] = sql_timestamp_pretty_print(user.last_action, lang)\n ret_dict['last_login'] = sql_timestamp_pretty_print(user.last_login, lang)\n ret_dict['registered'] = sql_timestamp_pretty_print(user.registered, lang)\n ret_dict['group'] = start_with_capital(user.group.name)\n\n ret_dict['is_male'] = user.gender == 'm'\n ret_dict['is_female'] = user.gender == 'f'\n ret_dict['is_neutral'] = user.gender != 'm' and user.gender != 'f'\n\n arg_votes, stat_votes = get_mark_count_of(user, False)\n db_reviews_duplicate = DBDiscussionSession.query(ReviewDuplicate).filter_by(detector_uid=user.uid).all()\n db_reviews_edit = DBDiscussionSession.query(ReviewEdit).filter_by(detector_uid=user.uid).all()\n db_reviews_delete = DBDiscussionSession.query(ReviewDelete).filter_by(detector_uid=user.uid).all()\n db_reviews_optimization = DBDiscussionSession.query(ReviewOptimization).filter_by(detector_uid=user.uid).all()\n db_reviews = db_reviews_duplicate + db_reviews_edit + db_reviews_delete + db_reviews_optimization\n\n get_tv_dict = get_textversions(user, lang)\n ret_dict['statements_posted'] = len(get_tv_dict.get('statements', []))\n ret_dict['edits_done'] = len(get_tv_dict.get('edits', []))\n ret_dict['reviews_proposed'] = len(db_reviews)\n ret_dict['discussion_arg_votes'] = arg_votes\n ret_dict['discussion_stat_votes'] = stat_votes\n ret_dict['avatar_url'] = get_profile_picture(user, 120)\n ret_dict['discussion_stat_rep'], _ = get_reputation_of(user)\n\n return ret_dict", "def get_users_info(): \n \n data = user_obj.get_users_info()\n return data", "def view_user(user):\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"email\": user.email,\n \"profile_pic\": user.profile_pic,\n }", "def get_user_variable_dict(self):\n user_variable_keys = ['User variable {}'.format(i) for i in range(len(self.user_variables))]\n return dict(zip(user_variable_keys, self.user_variables))", "def get_user_details(self, response):\n fullname, first_name, last_name = self.get_user_names(\n response.get(\"fullName\"),\n response.get(\"firstName\"),\n response.get(\"lastName\"),\n )\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\") or \"\",\n \"fullname\": fullname,\n \"first_name\": first_name,\n \"last_name\": last_name,\n }", "def user_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time()*1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/users/me', param, self.timeout)", "def User(rank, registered_at, authenticated_at, password_hash):\n return {'rank': rank, 'registered_at': registered_at,\n 'authenticated_at': authenticated_at, 'password_hash': None }", "def json(self):\n\n this_user_detail = dict(\n arn=self.arn,\n create_date=self.create_date,\n id=self.user_id,\n inline_policies=self.inline_policies_json,\n inline_policies_count=len(self.inline_policies_json),\n # groups=self.groups,\n groups=self.groups_json,\n path=self.path,\n managed_policies_count=len(self.attached_managed_policies),\n managed_policies=self.attached_managed_policies_pointer_json,\n risks=self.consolidated_risks\n )\n return this_user_detail", "def view_user(self):\n\n logged_in = authenticated_userid(self.request)\n return {\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n }", "def fetch_user(user_id):\n user = user_collection.find_one({\"_id\": user_id})\n user_bookmarks = list()\n for project_id in user[\"bookmarks\"]:\n project = project_collection.find_one({\"_id\": project_id})\n if project is None:\n continue\n bookmark_details = {\n \"PROJECT_ID\": str(project_id),\n \"projectTitle\": project[\"projectTitle\"],\n \"projectDescription\": project[\"projectDescription\"],\n }\n user_bookmarks.append(bookmark_details)\n user_contributions = list()\n for project_id in user[\"contributions\"]:\n project = project_collection.find_one({\"_id\": project_id})\n if project is None:\n continue\n contribution_details = {\n \"projectTitle\": project[\"projectTitle\"],\n \"projectDescription\": project[\"projectDescription\"],\n }\n user_contributions.append(contribution_details)\n user_dict = {\n \"username\": user[\"username\"],\n \"userid\": user[\"userid\"],\n \"email\": user[\"email\"],\n \"avatar\": user[\"avatar\"],\n \"githubURL\": user[\"githubURL\"],\n \"linkedinURL\": user[\"linkedinURL\"],\n \"stackoverflowURL\": user[\"stackoverflowURL\"],\n \"skills\": user[\"skills\"],\n \"bookmarks\": user_bookmarks,\n \"contributions\": user_contributions,\n }\n return user_dict", "def _set_user_info(self):\n sha = sha1(self.email).hexdigest()\n user_info = redis.hgetall(\"sl:account:{}\".format(sha))\n\n if (type(user_info) != dict or\n user_info.get(\"password\") != self.password):\n user_info = {}\n\n try:\n self.plan = Plan.from_id(user_info.get(\"plan\"))\n except SleekException:\n self.plan = None\n self.customer_token = str_to_none(\n user_info.get(\"customer_token\")\n )\n self.subscription_token = str_to_none(\n user_info.get(\"subscription_token\")\n )\n self.subscription_end = str_to_none(\n user_info.get(\"subscription_end\")\n )", "def userinfo(self, **kwargs):\n metadata = self.load_server_metadata()\n resp = self.get(metadata['userinfo_endpoint'], **kwargs)\n resp.raise_for_status()\n data = resp.json()\n return UserInfo(data)", "def format_user(self, user):\n user_obj = User.FromString(user)\n return {\n \"username\": user_obj.username,\n \"gender\": \"MALE\" if user_obj.gender == 0 else (\n \"FEMALE\" if user_obj.gender == 1 else \"UNDEFINED\"),\n \"birthday\": user_obj.birthday,\n \"user_id\": str(user_obj.user_id),\n }", "def userinfo(self):\n return self._userinfo", "def _get_user_info_cookie_data(request, user):\n\n # Set a cookie with user info. This can be used by external sites\n # to customize content based on user information. Currently,\n # we include information that's used to customize the \"account\"\n # links in the header of subdomain sites (such as the marketing site).\n header_urls = {'logout': reverse('logout')}\n\n # Unfortunately, this app is currently used by both the LMS and Studio login pages.\n # If we're in Studio, we won't be able to reverse the account/profile URLs.\n # To handle this, we don't add the URLs if we can't reverse them.\n # External sites will need to have fallback mechanisms to handle this case\n # (most likely just hiding the links).\n try:\n header_urls['account_settings'] = reverse('account_settings')\n header_urls['learner_profile'] = reverse('learner_profile', kwargs={'username': user.username})\n except NoReverseMatch:\n pass\n\n # Add 'resume course' last completed block\n try:\n header_urls['resume_block'] = retrieve_last_sitewide_block_completed(user)\n except User.DoesNotExist:\n pass\n\n header_urls = _convert_to_absolute_uris(request, header_urls)\n\n image_urls = {}\n try:\n image_urls = get_profile_image_urls_for_user(user)\n except UserProfile.DoesNotExist:\n pass\n\n image_urls = _convert_to_absolute_uris(request, image_urls)\n\n user_info = {\n 'version': settings.EDXMKTG_USER_INFO_COOKIE_VERSION,\n 'username': user.username,\n 'header_urls': header_urls,\n 'user_image_urls': image_urls,\n }\n\n return user_info", "def get_data(self, user_id: str) -> dict:\n data = {\n 'id': str(user_id),\n 'first_name': '',\n 'last_name': '',\n 'fullname': '',\n 'email': '',\n 'internal': False,\n }\n try:\n _ = UUID(user_id) # noQA\n except (ValueError, AttributeError):\n logger.error(f'Actor id is not a valid UUID: {user_id}')\n else:\n if user_id == SystemUser.id:\n raw_data = SystemUser\n data['first_name'] = raw_data.first_name\n data['last_name'] = raw_data.last_name\n data['fullname'] = raw_data.title\n data['email'] = raw_data.email\n data['internal'] = raw_data.internal\n\n return data", "def get_user_info(self) -> str:\n return self._searcher.get_user_info()", "def get_user_details():\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing username parameter\"')\n return jsonify({\"msg\": \"Missing username parameter\"}), 400\n\n try:\n username = User.get_username_by_id(current_user)\n result = UserDetail.get_printable_user_detail(username)\n\n if result['userType'] == 'adopter':\n animal_preference = Adopter.get_animal_preference(username)\n result['animalPreference'] = animal_preference\n\n dispositions = UserDetail.get_user_dispositions(User.get_username_by_id(current_user))\n result['dispositions'] = dispositions['dispositions']\n elif result['userType'] == 'shelter worker':\n result['shelter'] = ShelterWorker.get_shelter_by_username(username)\n\n except Exception as e:\n return jsonify(message='{}'.format(e)), 510\n\n if result:\n return jsonify(message=result), 200\n else:\n return jsonify(message='User {} not found'.format(username)), 511", "def fetch_user_data(self, user_id):\n\n log.info('Fetching user data from Twitter for ID %s' % user_id)\n user = self.api.get_user(user_id)\n props = user.__dict__ # user properties\n\n del props['_api'], props['status'] # no embedded objects\n\n props['accessed'] = datetime.datetime.now()\n props['detail'] = 'full'\n props['type'] = 'user'\n\n return props", "def extract_user_info(client_config):\n # test if there isn't a system user or if there isn't a name for that\n # user, return None\n if ('system user' not in client_config or\n 'name' not in client_config['system user']):\n return None\n\n user_info = dict()\n user_info['system_key'] = dict(\n user=client_config['system user']['name'],\n access_key=client_config['system user']['access key'],\n secret_key=client_config['system user']['secret key'],\n )\n return user_info", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def get_user_info(self):\n user_info = self.data_source.get_user_info(self.user_id)\n\n return user_info", "def getUserInfo(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n displayName = j['displayName']\n name = j['name']\n uid = j['id']\n isBanned = j['isBanned']\n joinDate = j['created']\n description = j['description']\n return displayName,name,uid,isBanned,joinDate,description", "def json(self):\n result = {}\n for user in self.users:\n result[user.user_id] = user.json\n return result", "def users_view():\n data = get_data()\n return [{'user_id': i, 'name': 'User {0}'.format(str(i))}\n for i in data.keys()]", "def users_instance():\n return {\n \"blocked\": False,\n \"created_at\": \"2022-10-21T04:10:34.240Z\",\n \"email\": \"[email protected]\",\n \"email_verified\": False,\n \"family_name\": \"Kerluke\",\n \"given_name\": \"Nick\",\n \"identities\": [\n {\n \"user_id\": \"15164a44-8064-4ef9-ac31-fb08814da3f9\",\n \"connection\": \"Username-Password-Authentication\",\n \"provider\": \"auth0\",\n \"isSocial\": False,\n }\n ],\n \"name\": \"Linda Sporer IV\",\n \"nickname\": \"Marty\",\n \"picture\": \"https://secure.gravatar.com/avatar/15626c5e0c749cb912f9d1ad48dba440?s=480&r=pg&d=https%3A%2F%2Fssl.gstatic.com%2Fs2%2Fprofiles%2Fimages%2Fsilhouette80.png\",\n \"updated_at\": \"2022-10-21T04:10:34.240Z\",\n \"user_id\": \"auth0|15164a44-8064-4ef9-ac31-fb08814da3f9\",\n \"user_metadata\": {},\n \"app_metadata\": {},\n }", "def data(self, user=None):\n return {\n \"provider\": self.BACKEND,\n \"access_token\": self.access_token,\n \"client_id\": self.client_id,\n \"honor_code\": \"true\",\n \"country\": \"US\",\n \"username\": user.username if user else \"test_username\",\n \"name\": user.first_name if user else \"test name\",\n \"email\": user.email if user else \"[email protected]\"\n }", "async def retrieve_user_events(self, user_id: int) -> Dict[int, BaseEvent]:\n user_events: Dict[int, BaseEvent] = {}\n event: BaseEvent\n for event_id, event in self.upcoming_events.items():\n if event.organizer.id == user_id:\n user_events[event_id] = event\n\n return user_events", "def fetch_user_info(self) -> UserInfo:\n url = buildCommandUrl(\n self.server, \"/as/user/keep\", self.__userInfo.strToken)\n result = json_request(\"GET\", url, token=self.__userInfo.strToken)", "def _get(self, query=None):\n if not query:\n user_data = DB_USER_TABLE.all()\n else:\n user_data = DB_USER_TABLE.search(query)\n\n res = {\n \"total_queried\" : len(user_data),\n \"_embedded\" : {\n \"users\" : self.embed_user_data_in_result(user_data)\n },\n \"_links\" : self.make_links({\n \"self\" : UserList.get_self_url(),\n \"contained_in\" : Root.get_self_url()\n })\n }\n return res", "def get_users(self):\n users = {}\n\n for index, row in self.users.iterrows():\n\n user_id = int(row[0])\n description = row[1]\n user_ratings = []\n\n ratings = self.ratings.loc[self.ratings[0] == user_id]\n\n # Get user ratings\n for index_ratings, row_rating in ratings.iterrows():\n\n user_ratings.append((row_rating[1], float(row_rating[2]))) # (movie, score)\n\n # Append user object in dictionary\n users[user_id] = User(id_user=user_id, description=description, ratings=user_ratings)\n\n return users", "def get_users(self):\n\n users = {}\n command = \"/user print terse\"\n output = self._send_command(command)\n\n for user in parse_terse_output(output):\n users[user.get('name')] = {\n \"group\": user.get('group')\n }\n\n return users", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile", "def get_user_details(self, response):\n values = {\n 'username': unquote(response['nick']),\n 'email': unquote(response['email']),\n 'first_name': unquote(response['first_name']),\n 'last_name': unquote(response['last_name'])\n }\n\n if values['first_name'] and values['last_name']:\n values['fullname'] = '%s %s' % (values['first_name'],\n values['last_name'])\n return values", "def to_dict(self):\n user_idt = self.user_idt_format.format(\n user=self.user_id or '-',\n tenant=self.project_id or '-',\n domain=self.domain_id or '-',\n user_domain=self.user_domain_id or '-',\n p_domain=self.project_domain_id or '-')\n\n return {'user': self.user_id,\n 'tenant': self.project_id,\n 'domain': self.domain_id,\n 'user_domain': self.user_domain_id,\n 'project_domain': self.project_domain_id,\n 'is_admin': self.is_admin,\n 'read_only': self.read_only,\n 'show_deleted': self.show_deleted,\n 'auth_token': self.auth_token,\n 'request_id': self.request_id,\n 'resource_uuid': self.resource_uuid,\n 'roles': self.roles,\n 'user_identity': user_idt,\n 'is_admin_project': self.is_admin_project}", "def get_user_details(self, response):\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\"),\n \"fullname\": response.get(\"username\"),\n }", "def get_user_data(prs, client_id, client_secret):\n users = {}\n for owner, repo, number, pr in prs:\n username = pr.username\n\n # Initialize the User if needed\n if username not in users:\n print(pr.user_url, file=sys.stderr)\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n resp = requests.get(pr.user_url, params=payload)\n\n # Abort if the return is an error\n out = resp.json()\n if 'message' in out:\n pprint.pprint(out, file=sys.stderr)\n raise Exception(resp.text)\n\n user = User(out)\n users[username] = user\n\n users[username].add_pr(pr)\n\n return users", "def getUserInfo(data):\n\tusername = data[\"session_username\"]\n\tuser = Users.objects.filter(username=username).first()\n\n\tresponse = {}\n\n\tif not user:\n\t\treturn {\"Success\": False, \"Error\": \"Unable to retrieve the user information from database\"}\n\n\tresponse[\"Success\"] = True\n\tresponse[\"Username\"] = user.username\n\tresponse[\"Email\"] = user.email\n\tresponse[\"Verified\"] = user.verified\n\tresponse[\"Level\"] = user.level\n\tresponse[\"Experience\"] = user.experience\n\tresponse[\"Coins\"] = user.coins\n\tresponse[\"Preferences\"] = {\"Grid Opacity\": user.pref_grid}\n\n\treturn response", "def extract_user_gql(data):\n return {\n \"pk\": int(data[\"id\"]),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data[\"is_private\"],\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n \"media_count\": data[\"edge_owner_to_timeline_media\"][\"count\"],\n \"follower_count\": data[\"edge_followed_by\"][\"count\"],\n \"following_count\": data[\"edge_follow\"][\"count\"],\n \"biography\": data[\"biography\"],\n \"external_url\": data[\"external_url\"],\n \"is_business\": data[\"is_business_account\"],\n }", "def get_user_info(username: str) -> dict:\n api = f\"https://api.github.com/users/{username}\"\n\n return requests.get(api).json()", "def lookup_user_info(self, user_id: str) -> Optional[Dict]:\n user_info = None\n try:\n user_info = self.web_client.users_info(user=user_id)\n except Exception:\n LOGGER.exception('Cannot get user info for {}'.format(user_id))\n return user_info", "def user(request):\n\tprofile = {}\n\tif (request.user.is_authenticated()==True) and(request.user is not None):\n\t\tprofile = UserProfile.objects.get(user_id=request.user)\n\treturn {\n\t\t'user': request.user,\n\t\t'profile':profile\n\t}", "def getUserInfosFromLoadedUsers(self,loaded_users,date_type):\n user_infos={}\n def addToUserInfo(loaded_user):\n user_infos[str(loaded_user.getUserID())]=loaded_user.getUserInfo(date_type) #python xmlrpc required keys not to be integers\n \n map(addToUserInfo,loaded_users)\n return user_infos", "def generate_accounts_dict(john):\n users = {}\n # Read in cracked password from John output and update user object in dictionary\n jlines = john.read().splitlines()\n for j in jlines:\n if \":\" in j:\n if not j.split(\":\")[0].endswith(\"$\"): # Eliminate machine hashes\n # print \"%s : %s\" % (j.split(\":\")[0], j.split(\":\")[1])\n users[j.split(\":\")[0]] = j.split(\":\")[1]\n return users", "def user_info(self):\n return self.auth.get_user_by_session()", "def get_logging_values(self):\n values = {'user_name': self.user_name}\n values.update(self.to_dict())\n return values", "def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)", "def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)", "def _add_user(data: dict) -> dict:\n user = create_user()\n name = []\n if 'first_name' in data:\n name.append(data['first_name'])\n if 'middle_name' in data:\n name.append(data['middle_name'])\n if 'last_name' in data:\n name.append(data['last_name'])\n user['name'] = ' '.join(name)\n if 'role' in data:\n user['exp']['exp']['title'] = data['role']\n if 'affiliation' in data:\n user['abs'] = data['affiliation']\n user['exp']['exp']['company'] = data['affiliation']\n elif 'organization' in data:\n user['abs'] = data['organization']\n user['exp']['exp']['company'] = data['organization']\n phone = []\n if 'phone' in data:\n phone.append(data['phone'])\n if 'phone_ext' in data:\n phone.append(data['phone_ext'])\n user['contact']['phone'] = '-'.join(phone)\n user['contact']['email'] = data['email'] if 'email' in data else ''\n if 'degrees' in data:\n if not user.title:\n user['edu']['degree'] = data['degrees']\n if len(user['name']) < 0:\n user['name'] = user['contact']['email'] if len(user['contact']['email']) > 0 else 'Anonymous'\n return user", "def itemize_user(user_id):\n item = {}\n user = User.query.get(user_id)\n if user:\n item['user_name'] = user.username\n # end if user\n return item", "def globals(self, user):\n objects = {\n \"history\": History.objects(user=user),\n \"user\": User.objects(id=user)\n }\n\n methods = {\n \"isonce\": functools.partial(self.isonce, *[user]),\n \"last_history\": functools.partial(self.last_history, *[user]),\n }\n return dict(\n **objects,\n **methods\n )", "def userinfo(self, access_token: str) -> dict[str, Any]:\n data: dict[str, Any] = self.client.get(\n url=f\"{self.protocol}://{self.domain}/userinfo\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n return data", "def user_data(self, access_token, *args, **kwargs):\n response = self.request(\n \"https://openapi.naver.com/v1/nid/me\",\n headers={\n \"Authorization\": f\"Bearer {access_token}\",\n \"Content_Type\": \"text/json\",\n },\n )\n\n data = response.json()\n\n return {\n \"id\": self._fetch(data, \"id\"),\n \"email\": self._fetch(data, \"email\"),\n \"username\": self._fetch(data, \"name\"),\n \"nickname\": self._fetch(data, \"nickname\"),\n \"gender\": self._fetch(data, \"gender\"),\n \"age\": self._fetch(data, \"age\"),\n \"birthday\": self._fetch(data, \"birthday\"),\n \"profile_image\": self._fetch(data, \"profile_image\"),\n }", "async def user_data(self, ctx, user=None):\n if user is None:\n user = ctx.author\n\n for member in ctx.guild.members:\n if member.mention == user:\n user = member\n\n conc, c = await utilities.load_db()\n c.execute(\"SELECT uid, karma FROM members WHERE uid = (:uid)\", {'uid': user.id})\n uid, karma = c.fetchall()[0]\n\n await utilities.single_embed(\n channel=ctx,\n title='User Info',\n thumb_url=user.avatar_url,\n name=user.name,\n value=f'**Nickname**: {user.nick}\\n'\n f'**Karma**: {karma}\\n'\n f'**User ID**: {user.id}\\n'\n f'**Joined Discord**: {user.created_at}\\n'\n f'**Joined {user.guild.name}**: {user.joined_at}\\n'\n f'**Roles**: {\", \".join([role.name for role in user.roles if role.name != \"@everyone\"])}'\n )", "def get_random_user(self):\r\n from provider.models import User\r\n u = User.objects.order_by('?')[0]\r\n return {\"username\": u.username, \"password\": u.password, \"fullname\": u.fullname}", "def get(self):\n return self.context.as_dict(self.user)", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def get_kwargs(self):\n return {\n 'user': self.user,\n }", "def __to_dict(self):\n our_dict = {'username': self.username, 'email': self.email,\n 'name': self.name, 'enable': self.enable}\n return our_dict", "def get_user_details(self, response):\n\n return {\n 'email': response.get('email'),\n 'id': response.get('id'),\n 'full_name': response.get('name')\n }", "def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None", "def getCurrentUserData(self):\r\n userDict = {}\r\n for c in range(self.view.userTable.columnCount()):\r\n colName = self.view.userTable.horizontalHeaderItem(c).text()\r\n userDict[colName] = self.view.userTable.item(self.view.userTable.currentRow(), c).text()\r\n \r\n return userDict", "def get_users(self):\n # remove some user media fields that we can't submit back\n def clean_media(entry):\n entry.pop(\"mediaid\", None)\n entry.pop(\"userid\", None)\n entry.pop(\"description\", None)\n return entry\n zabbix_users = self.conn.user.get(selectMedias=\"extend\", selectUsrgrps=\"extend\")\n zabbix_users = {user[\"alias\"].lower(): User(\n id=user[\"userid\"],\n name=user[\"name\"],\n surname=user[\"surname\"],\n alias=user[\"alias\"],\n groups=set(g[\"usrgrpid\"] for g in user[\"usrgrps\"]),\n media=[clean_media(entry) for entry in user[\"medias\"]],\n ) for user in zabbix_users}\n return zabbix_users", "def get_user(user_id) -> dict:\n user = (\n db.session.query(Users.username, Users.name, Users.classnum, Users.email)\n .filter_by(id=user_id)\n .first()\n )\n return {\n \"username\": user.username,\n \"name\": user.name,\n \"classnum\": user.classnum,\n \"email\": user.email,\n }", "def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]", "def message_query_users_wo_data(self) -> dict:\n message = dict(\n version=self.protocol_version,\n session_id=self.session_id,\n type=AuthenticationMessages.MessageTypes.GET_USERS.value,\n )\n\n self.logger.info(self.json_dump_pretty(message))\n\n return message" ]
[ "0.739899", "0.739899", "0.73685396", "0.7349614", "0.7152368", "0.7149312", "0.6997632", "0.68651915", "0.6837776", "0.68375385", "0.68306404", "0.68195456", "0.67827326", "0.6781717", "0.67752063", "0.6750619", "0.6731149", "0.6635023", "0.6614327", "0.6569148", "0.65338075", "0.6502224", "0.6462913", "0.64578867", "0.64478874", "0.6415348", "0.6384353", "0.6377659", "0.63619936", "0.6346344", "0.63364285", "0.63290894", "0.63282305", "0.63135314", "0.6313205", "0.6258577", "0.62549776", "0.6240729", "0.62032527", "0.6200676", "0.61894196", "0.6187415", "0.6182336", "0.6145506", "0.6140101", "0.61388505", "0.6126329", "0.6111324", "0.6108687", "0.60958314", "0.6093953", "0.6082277", "0.6077777", "0.60724914", "0.60664785", "0.6066131", "0.6054861", "0.6049761", "0.60467625", "0.6043376", "0.6039398", "0.60349786", "0.6032816", "0.6032816", "0.6030689", "0.60289884", "0.6027954", "0.6024734", "0.60116744", "0.6003756", "0.6002717", "0.600183", "0.6000682", "0.5999994", "0.5986748", "0.5986439", "0.59834194", "0.59754467", "0.59747034", "0.59747034", "0.5965991", "0.59640276", "0.59503585", "0.5949628", "0.5947094", "0.59391594", "0.5935728", "0.5933466", "0.5933326", "0.593091", "0.59264606", "0.5926175", "0.59212166", "0.59189487", "0.5916743", "0.59133875", "0.59114546", "0.5909158" ]
0.6014076
69
Register the message handlers that every journal should support.
def register_message_handlers(journal): journal.dispatcher.register_message_handler( DumpQuorumMessage, _dumpquorumhandler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)", "def registerMessageHandler(self, message_handler, message_priority_list):\n if isinstance(message_handler, MessageHandler):\n for key in message_priority_list:\n rule = (message_priority_list[key], message_handler)\n self.message_handlers[key].append(rule)\n self.message_handlers[key].sort() # Keep priority order\n else:\n self.logger.critical(\n \"MessageHandler registration failed. Object \" +\n repr(message_handler) +\" is invalid type.\")\n raise TypeError(\"Only MessageHandlers can be registered!\")\n self.logger.debug(\"MessageHandler '\" + str(message_handler) +\n \"' registered to the message bus.\")", "def register_service(self, service):\n for message_handler in service.iter_message_handlers():\n self.message_handlers[message_handler.name] = message_handler", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def register_handler(logger):\n # Register exit handler\n atexit.register(res_mgr)\n\n # Register SIGINT and SIGTERM\n signal.signal(signal.SIGINT, _signal_handler)\n signal.signal(signal.SIGTERM, _signal_handler)\n\n ResourceManager._register_exception_handler(logger)", "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def setup_signal_handlers(self):\n signal.signal(signal.SIGUSR1, self.handle_logging_signal)\n signal.signal(signal.SIGUSR2, self.handle_logging_signal)", "def register_handlers(dp, di_container: di.Container):\n general.router.register_handlers(dp)\n\n di_container.wire(packages=[sys.modules[__name__]])", "def add_topic_handlers(self):\n self.client.message_callback_add(deployment_topic, self.on_deployment_topic)\n self.client.message_callback_add(illumination_topic, self.on_illumination_topic)", "def register_handler(config):\n\n @respond_to(\".*\")\n def handle(message):\n \"\"\"Respond to every Slack message and dispatch to another handler based\n on the contents of the message.\n\n This duplicates a little bit of the work that slackbot does, but allows\n us to define handlers dynamically based on the job config.\n \"\"\"\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def add_package_handler(self, package_name, cls):\n for module in messages.MESSAGES:\n if self._fuzzy_module_name_eq(module, package_name):\n for name in module.DESCRIPTOR.message_types_by_name:\n self.add_handler(name, getattr(cls, 'on_' + name.lower()))", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass", "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def register(self, msg_type, handler):\n # Should check type is valid\n if not handler and msg_type in self.handlers.keys():\n del self.handlers[msg_type]\n return\n self.handlers[msg_type] = handler", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def register_transaction_types(journal):\n journal.dispatcher.register_message_handler(\n PermissionedValidatorRegistryTransactionMessage,\n transaction_message.transaction_message_handler)\n journal.add_transaction_store(PermissionedValidatorRegistryTransaction)\n set_global_permissioned_validators(journal.permissioned_validators)", "def register(self, events=[]):\n self.events = events\n if not self in manager.handler:\n manager.handler.append(self)", "def install_event_handlers(self, categories=None, handlers=None):\n if categories is not None and handlers is not None:\n raise ValueError(\"categories and handlers are mutually exclusive!\")\n\n from .events import get_event_handler_classes\n if categories:\n raise NotImplementedError()\n handlers = [cls() for cls in get_event_handler_classes(categories=categories)]\n else:\n handlers = handlers or [cls() for cls in get_event_handler_classes()]\n\n self._event_handlers = handlers", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def _register(self, comm, handler):", "def u2handlers(self):\n return []", "def get_handlers(self):\n raise NotImplementedError()", "def register_handlers(path = EXPLOIT_FOLDER):\n\n exploit_folder = './{}/{}'.format(os.path.dirname(__file__), path)\n handlers = []\n\n for module in os.listdir(exploit_folder):\n\n if not module.endswith(\".py\") or module == \"__init__.py\":\n continue\n\n # Execute the script\n # We assume that each executed script registers himself to the handlers dictionary.\n try:\n execfile('./{}/{}'.format(path, module))\n except Exception as e:\n log.failure(\"Could not register handler '{}' : {}\".format(module, e))\n\n log.info(\"Registered {} handler(s).\".format(len(handlers)))\n for handler in handlers:\n\n handler_name = handler.__name__\n log.info(\"- Registered '{}' handler\".format(handler_name))\n\n return handlers", "def add_handler(self, handler):\n pass", "def register_handler(self, topic, handler):\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def _install_signal_handlers(workers_socket, manager_socket):\n\n def sighup_handler(signal, frame):\n logger.info(\"hangup signal (SIGHUP) received; reloading configuration\")\n workers_socket.close()\n manager_socket.close()\n main()\n\n signal.signal(signal.SIGHUP, sighup_handler)\n\n def cleanup():\n workers_socket.close()\n manager_socket.close()\n context.destroy()\n\n def sigint_handler(signal, frame):\n logger.info(\"interrupt signal (SIGINT or Ctrl-C) received; shutting down\")\n cleanup()\n raise SystemExit\n\n signal.signal(signal.SIGINT, sigint_handler)\n\n def sigterm_handler(signal, frame):\n logger.info(\"termination signal (SIGTERM) received; shutting down\")\n cleanup()\n raise SystemExit\n\n signal.signal(signal.SIGTERM, sigterm_handler)", "def enable_callbacks(self):\n\n onObjectUpdate_received = self.message_handler.register('ObjectUpdate')\n onObjectUpdate_received.subscribe(self.onObjectUpdate)\n\n onObjectUpdateCached_received = self.message_handler.register('ObjectUpdateCached')\n onObjectUpdateCached_received.subscribe(self.onObjectUpdateCached)\n\n onObjectUpdateCompressed_received = self.message_handler.register('ObjectUpdateCompressed')\n onObjectUpdateCompressed_received.subscribe(self.onObjectUpdateCompressed)\n\n onImprovedTerseObjectUpdate_received = self.message_handler.register('ImprovedTerseObjectUpdate')\n onImprovedTerseObjectUpdate_received.subscribe(self.onImprovedTerseObjectUpdate)\n \n onObjectProperties_received = self.message_handler.register('ObjectProperties')\n onObjectProperties_received.subscribe(self.onObjectProperties)\n\n onKillObject_received = self.message_handler.register('KillObject')\n onKillObject_received.subscribe(self.onKillObject)\n\n # uncomment these to view packets sent back to simulator\n # onObjectName_sent = self.message_handler.register('ObjectName')\n # onObjectName_sent.subscribe(self.helpers.log_packet, self)\n\n # onDeRezObject_sent = self.message_handler.register('DeRezObject')\n # onDeRezObject_sent.subscribe(self.helpers.log_packet, self)", "def register_handler(self, handler):\r\n self.handler = handler", "def register_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.subscribe(self, callback)", "def send(self, *args, **kw):\n result = []\n for handler in self.registry.values():\n result.append(handler(*args, **kw))\n return result", "def makeHandlers(self):\n\n yield self.loadGrids.start(funcSelf=self)\n yield self.updateClientWatchedGrids.start(funcSelf=self)\n logger.debug(\"RPCs started\")", "def collect_handlers(log, base_url, validation):\n base_bookstore_pattern = url_path_join(base_url, '/bookstore')\n base_bookstore_api_pattern = url_path_join(base_url, '/api/bookstore')\n\n handlers = []\n # Always enable the version handler for the API\n handlers.append((base_bookstore_api_pattern, BookstoreVersionHandler))\n\n if validation['publish_valid']:\n log.info(f\"[bookstore] Enabling bookstore publishing, version: {version}\")\n handlers.append(\n (\n url_path_join(base_bookstore_api_pattern, r\"/publish%s\" % path_regex),\n BookstorePublishAPIHandler,\n )\n )\n else:\n log.info(\"[bookstore] Publishing disabled. s3_bucket or endpoint are not configured.\")\n\n if validation['s3_clone_valid']:\n log.info(f\"[bookstore] Enabling bookstore cloning, version: {version}\")\n handlers.append(\n (url_path_join(base_bookstore_api_pattern, r\"/clone(?:/?)*\"), BookstoreCloneAPIHandler)\n ),\n handlers.append(\n (url_path_join(base_bookstore_pattern, r\"/clone(?:/?)*\"), BookstoreCloneHandler)\n )\n else:\n log.info(f\"[bookstore] bookstore cloning disabled, version: {version}\")\n\n if validation['fs_clone_valid']:\n log.info(f\"[bookstore] Enabling filesystem cloning, version: {version}\")\n handlers.append(\n (url_path_join(base_bookstore_pattern, r\"/fs-clone(?:/?)*\"), BookstoreFSCloneHandler)\n )\n handlers.append(\n (\n url_path_join(base_bookstore_api_pattern, r\"/fs-clone(?:/?)*\"),\n BookstoreFSCloneAPIHandler,\n )\n ),\n else:\n log.info(f\"[bookstore] bookstore cloning disabled, version: {version}\")\n return handlers", "def init_handlers(self, root_logger, default_stream='stderr'):\n\n if default_stream == 'stdout':\n default_stream = self.stdout\n elif default_stream == 'stderr':\n default_stream = self.stderr\n\n # default handler for display to terminal\n default_handler = TerminalHandler(self, strm=default_stream)\n if config.verbose_output:\n default_handler.setLevel(VERBOSE)\n else:\n default_handler.setLevel(INFO)\n # this handler ignores levels above INPUT\n default_handler.addFilter(MaxLevelFilter(INPUT))\n default_handler.setFormatter(\n TerminalFormatter(fmt=\"%(message)s%(newline)s\"))\n root_logger.addHandler(default_handler)\n\n # handler for level STDOUT\n output_handler = TerminalHandler(self, strm=self.stdout)\n output_handler.setLevel(STDOUT)\n output_handler.addFilter(MaxLevelFilter(STDOUT))\n output_handler.setFormatter(\n TerminalFormatter(fmt=\"%(message)s%(newline)s\"))\n root_logger.addHandler(output_handler)\n\n # handler for levels WARNING and higher\n warning_handler = TerminalHandler(self, strm=self.stderr)\n warning_handler.setLevel(logging.WARNING)\n warning_handler.setFormatter(\n TerminalFormatter(fmt=\"%(levelname)s: %(message)s%(newline)s\"))\n root_logger.addHandler(warning_handler)", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "def _register_comm(self, comm):\n def handle_msg(msg):\n \"\"\"Handle a comm_msg message\"\"\"\n if comm._msg_callback:\n comm._msg_callback(msg)\n comm.handle_msg = handle_msg\n super(FrontendComm, self)._register_comm(comm)", "def setup_signal_handlers():\n # type: () -> None\n for signum in [signal.SIGINT, signal.SIGTERM]:\n signal.signal(signum, log_and_exit_handler)\n\n signal.signal(signal.SIGUSR1, dump_thread_handler)", "def register_handler(self, handler):\n if handler.key in self.handlers.keys():\n raise ValueError(f'Key {handler.key} already registered')\n self.handlers[handler.key] = handler", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "def _register_handler(self, callback, cmd, helphint, hidden, handlers,\n synonyms=(), plugin=None):\n # Register any synonyms (done before we frig with the handlers)\n for entry in synonyms:\n self._register_handler(callback, entry, helphint, True, handlers,\n plugin=plugin)\n\n # Allow simple commands to be passed as strings\n cmd = cmd.split() if isinstance(cmd, (str, unicode)) else cmd\n\n for part in cmd:\n handlers = handlers.subcommands.setdefault(part, Handlers([], {}))\n handlers.handlers.append(Registration(callback, \" \".join(cmd),\n helphint, hidden, plugin))", "def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))", "def install_signal_handlers(self):\n log = logging.getLogger('mailman.runner')\n # Set up our signal handlers. Also set up a SIGALRM handler to\n # refresh the lock once per day. The lock lifetime is 1 day + 6 hours\n # so this should be plenty.\n def sigalrm_handler(signum, frame): # noqa: E306\n self._lock.refresh()\n signal.alarm(SECONDS_IN_A_DAY)\n signal.signal(signal.SIGALRM, sigalrm_handler)\n signal.alarm(SECONDS_IN_A_DAY)\n # SIGHUP tells the runners to close and reopen their log files.\n def sighup_handler(signum, frame): # noqa: E306\n reopen()\n for pid in self._kids:\n os.kill(pid, signal.SIGHUP)\n log.info('Master watcher caught SIGHUP. Re-opening log files.')\n signal.signal(signal.SIGHUP, sighup_handler)\n # SIGUSR1 is used by 'mailman restart'.\n def sigusr1_handler(signum, frame): # noqa: E306\n for pid in self._kids:\n os.kill(pid, signal.SIGUSR1)\n log.info('Master watcher caught SIGUSR1. Exiting.')\n signal.signal(signal.SIGUSR1, sigusr1_handler)\n # SIGTERM is what init will kill this process with when changing run\n # levels. It's also the signal 'mailman stop' uses.\n def sigterm_handler(signum, frame): # noqa: E306\n for pid in self._kids:\n os.kill(pid, signal.SIGTERM)\n log.info('Master watcher caught SIGTERM. Exiting.')\n signal.signal(signal.SIGTERM, sigterm_handler)\n # SIGINT is what control-C gives.\n def sigint_handler(signum, frame): # noqa: E306\n for pid in self._kids:\n os.kill(pid, signal.SIGINT)\n log.info('Master watcher caught SIGINT. Restarting.')\n signal.signal(signal.SIGINT, sigint_handler)", "def cacheHandlers(self):\n\n def collect_handlers(module):\n\n def wanted(member):\n return (isclass(member) and\n issubclass(member, handlers.HandlerBase) and\n member.__name__.endswith('Handler'))\n\n m = {}\n for name, obj in getmembers(module, wanted):\n m[name] = obj(self.skype)\n m[name].init()\n return m\n\n self.handlers = collect_handlers(handlers)\n if custom_handlers:\n self.handlers.update(collect_handlers(custom_handlers))", "def register_handler(self, method, handler):\n self.handlers[method] = handler", "def register_websock_handlers(self, service, new_client, new_message, close_client):\n if service in self.websock_handlers:\n L.error(\"Error: service:\" + service + \" is already registered\")\n return False\n handlers = {\n \"new_client\":new_client,\n \"new_message\":new_message,\n \"close_client\":close_client\n }\n self.websock_handlers[service] = handlers\n return True", "def get_handlers():\n\n js_path_opts = {\"path\": abspath(join(dirname(__file__), \"js\"))}\n\n return [\n (\"/networktables/ws\", NetworkTablesWebSocket),\n (\"/networktables/(.*)\", NonCachingStaticFileHandler, js_path_opts),\n ]", "def _event(self, level=None, message=None):\n for i in eventhandlers:\n if level == 'write':\n i.write( object_definition=self, message=message )\n else:\n i.debug( object_definition=self, message=message )", "def register_func_list(self, func_and_handler):\n for func, handler in func_and_handler:\n self._function_dispatch.register(func, handler)\n self.dispatch.cache_clear()", "def addHandler(self, fn):\n self.handlers.append(fn)", "def _register_services(self) -> None:\n\n for isr in self.immediate_services_with_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n isr_instance = isr()\n for handler_type in isr.message_handler_types():\n # for each explicitly supported type, add it to the router\n self.immediate_msg_with_reply_router[handler_type] = isr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_with_reply_router[\n handler_type_subclass\n ] = isr_instance\n\n for iswr in self.immediate_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n iswr_instance = iswr()\n for handler_type in iswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.immediate_msg_without_reply_router[handler_type] = iswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_without_reply_router[\n handler_type_subclass\n ] = iswr_instance\n\n for eswr in self.eventual_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n eswr_instance = eswr()\n for handler_type in eswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.eventual_msg_without_reply_router[handler_type] = eswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.eventual_msg_without_reply_router[\n handler_type_subclass\n ] = eswr_instance\n\n # Set the services_registered flag to true so that we know that all services\n # have been properly registered. This mostly exists because someone might\n # accidentally delete (forget to call) this method inside the __init__ function\n # of a sub-class of Node.\n self.services_registered = True", "def _handle(self, content):\n for func, args, kwargs in self.handlers:\n func(content, *args, **kwargs)\n if not self.handlers:\n self.error = \"No handlers specified\"\n logger.error(self.error)\n raise Exception(self.error)", "def __initHandlersUser(self):\n handlers = {}\n handlers['WRITE_FILE'] = self.write_file\n handlers['READU_FILE'] = self.read_file\n handlers['DELET_FILE'] = self.delete_file\n handlers['STATUS_SRV'] = self.status_server\n handlers['RSYNC_FILE'] = self.rsync_file\n handlers['WSYNC_FILE'] = self.wsync_file\n return handlers", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def add_handler ( handler_list, handler_function ):\n if not (handler_function in handler_list):\n handler_list.append ( handler_function )\n \n #cellblender_added_handlers", "def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def add(self, handler, on_error=None):\n self.handlers.append(handler)", "def notify_message_listeners(self, name, msg):\n\n # handle the message specific listeners\n for fn in self._message_listeners.get(name, []):\n try:\n fn(self, name, msg)\n except Exception as e:\n i = 1\n #print(\"[CONNECTION ERROR] unable to handle message listener for \" + name)\n #print(e)\n\n # handle the listeners that are registered for all messages\n for fn in self._message_listeners.get('*', []):\n try:\n fn(self, name, msg)\n except Exception as e:\n i = 1\n #print(\"[CONNECTION ERROR] unable to handle * message listener for \" + name)\n #print(e)", "def RegisterMessageHandler(self, handler, lease_time, limit=1000):\n self.UnregisterMessageHandler()\n\n self.handler_stop = False\n self.handler_thread = threading.Thread(\n name=\"message_handler\",\n target=self._MessageHandlerLoop,\n args=(handler, lease_time, limit))\n self.handler_thread.daemon = True\n self.handler_thread.start()", "def fileHandlers(self):\n fileHandlers = list()\n handlers = self.logger.handlers\n for handler in handlers:\n try:\n if handler._name.startswith(\"LogFile-\"):\n fileHandlers.append(handler)\n except:\n pass\n return fileHandlers", "def add_message_handler(self,message_handler,message_filter=Filters.text):\n\t\tif(callable(message_handler)):\n\t\t\tself.message_handlers.append((message_handler,message_filter))\n\t\telse:\n\t\t\traise NotCallableException(\"{} is not callable\".format(type(message_handler)))", "def add_loggers(self):\n pass", "def register_handler(cls, handler):\n with cls._lock:\n cls._handlers[cls] = handler", "def chat_handler(self, regex, order=100):\n def decorator(func):\n self.register_handler(regex, func, order)\n return func\n\n return decorator", "def iter_message_handlers(self):\n for name in dir(self):\n attr = getattr(self, name)\n if isinstance(attr, MessageHandler):\n yield attr", "def create_job_loggers(self, jobs):\n self.add_filehandler(\"apscheduler.executors.default\")\n self.add_filehandler(\"apscheduler.scheduler\")\n self.add_filehandler(\"flask_apscheduler\")\n for x in jobs:\n # Creating a logger for each job and adding a seperate filehandler for each logger. Job ids have to have the\n # same logger name of the functions that the jobs invoke. \n self.add_filehandler(x[\"id\"])", "def register_handler(self, token, handler):\r\n self._handlers[token] = handler", "def add_handler(handler_list, handler_function):\n if not handler_function in handler_list:\n handler_list.append(handler_function)", "def __setupCommandHandlerTypes(self):\n # dict saving all command handler types\n self.__commandHandlers = {'channel': {}, 'query': {}, 'not_authed_dcc': {}, 'authed_dcc': {}}", "def register(self, handler):\n self.handlers.add(handler)\n return self", "def publish(self, path, handler):\n path = ensure_slash(path)\n self.handlers[path] = handler", "def set_jwt_handlers(jwt):\n\n @jwt.authentication_handler\n def authenticate(username, password):\n pass\n\n @jwt.jwt_error_handler\n def error_handler(error):\n return 'Auth Failed: {}'.format(error.description), 400\n\n @jwt.jwt_payload_handler\n def make_payload(user):\n return {\n 'user_id': str(user.id),\n 'exp': (datetime.datetime.utcnow() +\n current_app.config['JWT_EXPIRATION_DELTA']).isoformat()\n }\n\n @jwt.request_handler\n def load_user(payload):\n pass", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def get_registered_handlers(self):\n return list(self._registry.values())", "def cleanup_handlers():\n # There's nothing to set up so we immediately yield control.\n yield\n # After the with block ends we cleanup any output handlers.\n for match_func in match_stream_handler, match_syslog_handler:\n handler, logger = find_handler(logging.getLogger(), match_func)\n if handler and logger:\n logger.removeHandler(handler)", "def _set_up_io_handlers(self):\n\n # Build a simple subordinate write function that's closed over the current device,\n # and which knows how to send data.\n def send(packet):\n self.send_packet(packet)\n\n # Create our I/O connection and our USB sniffer handlers.\n self.io = IOConnection(send, self.regs)\n self.sniffer = USBSniffer(send)\n\n # Create our SDRam read handler, and register our sniffer with it, so stored USB\n # packets can be forwarded to the USB sniffer.\n sdram_handler = SDRAMHandler(send)\n sdram_handler.register_packet_handler(self.sniffer)\n \n # Register our core packet handlers to handle received packets.\n self.register_packet_handler(self.io)\n self.register_packet_handler(LFSRTest(send))\n self.register_packet_handler(self.sniffer)\n self.register_packet_handler(sdram_handler)\n self.register_packet_handler(DummyHandler(send))", "def register_handler(self, handler, idx=None, args=(), kwargs={}):\n if idx is None:\n idx = len(self.handlers)\n if callable(handler):\n logger.debug(\"Adding handler (%s) in position %d\" % (str(handler), idx))\n if handler in [x[0] for x in self.handlers]:\n w = \"Multiple instances of %s registered\" % str(handler)\n logger.warn(w)\n warnings.warn(w)\n self.handlers.insert(idx, (handler, args, kwargs))\n else:\n self.error = \"Handler \\\"%s\\\" is not callable\" % str(handler)\n logger.error(self.error)\n raise Exception(self.error)", "def init_signal_handler():\n signal.signal(signal.SIGUSR1, sig_handler)\n signal.signal(signal.SIGTERM, term_handler)\n #logger.warning(\"Signal handler installed.\")", "def u2handlers(self):\n handlers = suds.transport.http.HttpTransport.u2handlers(self)\n if self.ssl_context:\n try:\n handlers.append(HTTPSHandler(context=self.ssl_context,\n check_hostname=self.verify))\n except TypeError:\n # Python 2.7.9 HTTPSHandler does not accept the\n # check_hostname keyword argument.\n #\n # Note that even older Python versions would also\n # croak on the context keyword argument. But these\n # old versions do not have SSLContext either, so we\n # will not end up here in the first place.\n handlers.append(HTTPSHandler(context=self.ssl_context))\n return handlers", "def create_hooks(self, hclass):\n for extension in self.db.values():\n self.events.register_callbacks_from_inst(hclass, extension)", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def get_file_handlers(self):\n return []", "def register_arrived_message_handler(self, arrived_message_class, handler):\n message_handler = IMessageHandler()\n message_handler.on_message = handler\n self.__connection.get_processor().set_arrived_msg_handler(arrived_message_class, message_handler)", "def _initChangeHandlers(self, handlers):\n if hasattr(self, \"_changeHandlerSet\") :\n return\n if isinstance(handlers, BaseChangeHandler):\n self._changeHandlerSet = set([handlers])\n elif hasattr(handlers, '__iter__'):\n self._changeHandlerSet = set(\n [h for h in handlers if isinstance(h, BaseChangeHandler)])\n else: \n self._changeHandlerSet = set()", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def get_app_handlers(self):\n return []", "def register_error_handlers(self):\n\n def error_handler(error):\n if not isinstance(error, exceptions.HTTPException):\n error = exceptions.InternalServerError()\n return response.Response(bootstrap.card(body=_.span[_.p(style='color:#888')[error.description or ''],\n _.img(src=flask.url_for('mara_app.static',\n filename='mara.jpg'),\n style='margin-top:30px;max-width:100%;')]),\n title=f'{error.code} {error.name}',\n status=error.code)\n\n for cls in exceptions.HTTPException.__subclasses__():\n self.register_error_handler(cls, error_handler)", "def import_handlers(self):\n if not self._import_handlers:\n self._initialize_handlers()\n\n return self._import_handlers", "def register(self):\n REGISTERED_SIGNALS.setdefault(self.path, []).append(self)", "def register_handler(self, method, path, fn):\n if not(method in self.handlers):\n self.handlers[method] = {}\n self.handlers[method][path] = fn", "def remove_handlers():\n handlers = []\n for handler in logging.root.handlers:\n if not isinstance(handler, logging.StreamHandler):\n handlers.append(handler)\n logging.root.handlers = handlers", "def setup_handlers(web_app):\n\n mlw_handlers = [\n ('/mlw/load_workspace', MLW_load_workspace_handler),\n ('/mlw/save_workspace', MLW_save_workspace_handler),\n ('/mlw/install_requirements', MLW_install_requirements_handler),\n ('/mlw/notify_still_alive', MLW_notify_still_alive_handler)\n ]\n\n # add the baseurl to our paths\n base_url = web_app.settings['base_url']\n mlw_handlers = [\n (ujoin(base_url, x[0]), x[1])\n for x in mlw_handlers\n ]\n print(\"base_url: {}\".format(base_url))\n print(mlw_handlers)\n\n web_app.add_handlers('.*', mlw_handlers)", "def set_signal_handlers(cls, signals):\n for sig in signals:\n try:\n original_handler = signal.getsignal(sig)\n if original_handler == cls.signal_handler:\n continue\n signal.signal(sig, cls.signal_handler)\n cls.__signal_handlers[sig] = original_handler\n except Exception as e:\n pass", "def register_cls_list(\n self, cls_and_handler, no_singledispatch: bool = False\n ):\n for cls, handler in cls_and_handler:\n if no_singledispatch:\n self._direct_dispatch[cls] = handler\n else:\n self._single_dispatch.register(cls, handler)\n self.dispatch.cache_clear()", "def configure(logger, level):\n logger.setLevel(level)\n for d_handler in DEFAULT_HANDLERS:\n logger.addHandler(d_handler)\n return logger", "def _default_handler(self, topic, messages):\n self.logger.error('No handler assigned for topic %s' % topic)", "def get_handlers():\n handlers = list()\n\n #login\n handlers.append((r'/login', Login))\n handlers.append((r'/logout', Logout))\n\n # main\n handlers.append((r'/', Index))\n\n\n #user\n handlers.extend(get_routes(UserController))\n\n #role\n handlers.extend(get_routes(RoleController))\n\n\n handlers.extend(get_routes(ApiServiceController))\n\n handlers.extend(get_routes(InventarioController))\n\n return handlers" ]
[ "0.72620213", "0.70435107", "0.66157293", "0.65716064", "0.6314305", "0.62207043", "0.6180045", "0.60054207", "0.6003531", "0.5998502", "0.5990488", "0.59491366", "0.5929773", "0.5925832", "0.5805171", "0.58019876", "0.5768111", "0.575978", "0.5712927", "0.5671215", "0.56642425", "0.5644854", "0.5639432", "0.5635607", "0.5624606", "0.56242144", "0.5588481", "0.5579341", "0.5577335", "0.5525336", "0.551606", "0.5505948", "0.5491678", "0.543411", "0.5421989", "0.5412546", "0.5399147", "0.5334624", "0.5333874", "0.5329247", "0.532805", "0.5325932", "0.53256255", "0.5325441", "0.5317116", "0.53130347", "0.5306569", "0.52886546", "0.5287686", "0.528439", "0.5265715", "0.5257275", "0.5251036", "0.52395165", "0.52246267", "0.52214104", "0.52083164", "0.5199843", "0.5176758", "0.51673645", "0.51659876", "0.5153436", "0.51445633", "0.51384", "0.5134071", "0.51263666", "0.51209307", "0.5120738", "0.5113528", "0.5110684", "0.50696766", "0.50574964", "0.5045863", "0.504218", "0.5033827", "0.5032845", "0.5024583", "0.5016803", "0.50164115", "0.5015815", "0.5013675", "0.50098157", "0.5005893", "0.5003386", "0.499773", "0.49842933", "0.4978512", "0.49673885", "0.49530488", "0.49507907", "0.49492913", "0.49484676", "0.49466166", "0.49436876", "0.49431828", "0.4929554", "0.4926722", "0.4921653", "0.49191782", "0.4910137" ]
0.8429424
0
Constructor for DumpQuorumMessage class.
def __init__(self, minfo=None): if minfo is None: minfo = {} super(DumpQuorumMessage, self).__init__(minfo) self.IsSystemMessage = False self.IsForward = True self.IsReliable = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump(self):\n result = super(DumpQuorumMessage, self).dump()\n return result", "def __init__(self):\n super().__init__()\n\n self.__encoded_msg = ''", "def __init__(self, msg):\n\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def __init__(self, message):\r\n self.__message = message", "def __init__(self, msg=\"\"):\n self._msg = msg\n super().__init__()", "def __init__(self, message):\n self.message = message", "def __init__(self, message):\n self.message = message", "def __init__(self, message):\n self.message = message", "def __init__(self):\n self.type = None\n self.msg = \"\"\n self.process = None\n self.edge_id = None", "def __init__(self, message):\n super().__init__(message)", "def __init__(self, connectionPool, timeout=10):\n MsgPackProtocol.__init__(self, timeout)\n self.connectionPool = connectionPool\n self.log = Logger(system=self)\n self.storage = self.connectionPool.storage\n self.peersKeyId = None", "def __init__(self, message=None):\n self.message = message", "def __init__(self, message=None):\n self.message = message", "def __init__(self, msg: str):\n self.msg = msg", "def __init__(self, message=None) -> None:\n super().__init__(message)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(DahuaQrcodeScanData, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.Header is None:\n self.Header = std_msgs.msg.Header()\n if self.x_pos is None:\n self.x_pos = 0\n if self.y_pos is None:\n self.y_pos = 0\n if self.angle is None:\n self.angle = 0\n if self.code_type is None:\n self.code_type = 0\n if self.code_num is None:\n self.code_num = 0\n else:\n self.Header = std_msgs.msg.Header()\n self.x_pos = 0\n self.y_pos = 0\n self.angle = 0\n self.code_type = 0\n self.code_num = 0", "def __init__(self, msg_id=0, xtd=0, rtr=0, ndata=0, data=() ):\r\n self.msg_id = msg_id\r\n self.rtr = rtr\r\n self.xtd = xtd\r\n self.ndata = ndata\r\n self.data = data # tuple with length 0..8\r\n self.timestamp = time.time() # Timestamp of object creation\r", "def __init__(self, message):\n super().__init__(message)\n self.message = message", "def __init__(self, message=None):\n\n self._message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str) -> None:\n\n super().__init__(message)", "def __init__(self, message: str) -> None:\n\n super().__init__(message)", "def __init__(self, message: str):\n self.message = message", "def __init__(self, message: str):\n self.message = message", "def __init__(self, message: str):\n self.message = message", "def __init__(self, message: str) -> None:\n super().__init__(message)", "def __init__(self, message: str) -> None:\n self.message = message", "def __init__(self, command=None, data_length=0, data=[]):\n if command is not None:\n self.command = command\n self.data_length = data_length\n self.data = data\n self.encode()\n else:\n self.message_length = 0\n self.command = 0\n self.data_length = 0\n self.data = []\n self.string = \"\"", "def __init__(self, message):\n super().__init__()\n self._message = message", "def __init__(self,piece,peer):\n \n self.pieceHash = HashStringNoB2A(piece)\n self.size = len(piece)\n self.peer = peer\n self.status = 'STORED'", "def __init__(self,\n compression_type=CompressionType.NONE,\n required_acks=1,\n timeout=10000):\n # {topic_name: {partition_id: MessageSet}}\n self.msets = defaultdict(\n lambda: defaultdict(\n lambda: MessageSet(compression_type=compression_type)\n ))\n self.required_acks = required_acks\n self.timeout = timeout\n self._message_count = 0 # this optimization is not premature", "def __init__(self):\n self.msg_dict = dict() # msg: last_print_time_stamp", "def __init__(self):\n self._msg_dict = {}", "def __init__(self, msg):\n super().__init__(msg)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Paraset, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.command is None:\n self.command = 0\n if self.set_num is None:\n self.set_num = 0\n if self.paraset_byte54 is None:\n self.paraset_byte54 = 0\n if self.paraset_byte53 is None:\n self.paraset_byte53 = 0\n if self.paraset_byte52 is None:\n self.paraset_byte52 = 0\n if self.paraset_byte51 is None:\n self.paraset_byte51 = 0\n if self.paraset_byte50 is None:\n self.paraset_byte50 = 0\n if self.paraset_byte49 is None:\n self.paraset_byte49 = 0\n if self.paraset_byte48 is None:\n self.paraset_byte48 = 0\n if self.paraset_byte47 is None:\n self.paraset_byte47 = 0\n if self.paraset_byte46 is None:\n self.paraset_byte46 = 0\n if self.paraset_byte45 is None:\n self.paraset_byte45 = 0\n if self.paraset_byte44 is None:\n self.paraset_byte44 = 0\n if self.paraset_byte43 is None:\n self.paraset_byte43 = 0\n if self.paraset_byte42 is None:\n self.paraset_byte42 = 0\n if self.paraset_byte41 is None:\n self.paraset_byte41 = 0\n if self.paraset_byte40 is None:\n self.paraset_byte40 = 0\n if self.paraset_byte39 is None:\n self.paraset_byte39 = 0\n if self.paraset_byte38 is None:\n self.paraset_byte38 = 0\n if self.paraset_byte37 is None:\n self.paraset_byte37 = 0\n if self.paraset_byte36 is None:\n self.paraset_byte36 = 0\n if self.paraset_byte35 is None:\n self.paraset_byte35 = 0\n if self.paraset_byte34 is None:\n self.paraset_byte34 = 0\n if self.paraset_byte33 is None:\n self.paraset_byte33 = 0\n if self.paraset_byte32 is None:\n self.paraset_byte32 = 0\n if self.paraset_byte31 is None:\n self.paraset_byte31 = 0\n if self.paraset_byte30 is None:\n self.paraset_byte30 = 0\n if self.paraset_byte29 is None:\n self.paraset_byte29 = 0\n if self.paraset_byte28 is None:\n self.paraset_byte28 = 0\n if self.paraset_byte27 is None:\n self.paraset_byte27 = 0\n if self.paraset_byte26 is None:\n self.paraset_byte26 = 0\n if self.paraset_byte25 is None:\n self.paraset_byte25 = 0\n if self.paraset_byte24 is None:\n self.paraset_byte24 = 0\n if self.paraset_byte23 is None:\n self.paraset_byte23 = 0\n if self.paraset_byte22 is None:\n self.paraset_byte22 = 0\n if self.paraset_byte21 is None:\n self.paraset_byte21 = 0\n if self.paraset_byte20 is None:\n self.paraset_byte20 = 0\n if self.paraset_byte19 is None:\n self.paraset_byte19 = 0\n if self.paraset_byte18 is None:\n self.paraset_byte18 = 0\n if self.paraset_byte17 is None:\n self.paraset_byte17 = 0\n if self.paraset_byte16 is None:\n self.paraset_byte16 = 0\n if self.paraset_byte15 is None:\n self.paraset_byte15 = 0\n if self.paraset_byte14 is None:\n self.paraset_byte14 = 0\n if self.paraset_byte13 is None:\n self.paraset_byte13 = 0\n if self.paraset_byte12 is None:\n self.paraset_byte12 = 0\n if self.paraset_byte11 is None:\n self.paraset_byte11 = 0\n if self.paraset_byte10 is None:\n self.paraset_byte10 = 0\n if self.paraset_byte9 is None:\n self.paraset_byte9 = 0\n if self.paraset_byte8 is None:\n self.paraset_byte8 = 0\n if self.paraset_byte7 is None:\n self.paraset_byte7 = 0\n if self.paraset_byte6 is None:\n self.paraset_byte6 = 0\n if self.paraset_byte5 is None:\n self.paraset_byte5 = 0\n if self.paraset_byte4 is None:\n self.paraset_byte4 = 0\n if self.paraset_byte3 is None:\n self.paraset_byte3 = 0\n if self.paraset_byte2 is None:\n self.paraset_byte2 = 0\n if self.paraset_byte1 is None:\n self.paraset_byte1 = 0\n else:\n self.header = std_msgs.msg.Header()\n self.command = 0\n self.set_num = 0\n self.paraset_byte54 = 0\n self.paraset_byte53 = 0\n self.paraset_byte52 = 0\n self.paraset_byte51 = 0\n self.paraset_byte50 = 0\n self.paraset_byte49 = 0\n self.paraset_byte48 = 0\n self.paraset_byte47 = 0\n self.paraset_byte46 = 0\n self.paraset_byte45 = 0\n self.paraset_byte44 = 0\n self.paraset_byte43 = 0\n self.paraset_byte42 = 0\n self.paraset_byte41 = 0\n self.paraset_byte40 = 0\n self.paraset_byte39 = 0\n self.paraset_byte38 = 0\n self.paraset_byte37 = 0\n self.paraset_byte36 = 0\n self.paraset_byte35 = 0\n self.paraset_byte34 = 0\n self.paraset_byte33 = 0\n self.paraset_byte32 = 0\n self.paraset_byte31 = 0\n self.paraset_byte30 = 0\n self.paraset_byte29 = 0\n self.paraset_byte28 = 0\n self.paraset_byte27 = 0\n self.paraset_byte26 = 0\n self.paraset_byte25 = 0\n self.paraset_byte24 = 0\n self.paraset_byte23 = 0\n self.paraset_byte22 = 0\n self.paraset_byte21 = 0\n self.paraset_byte20 = 0\n self.paraset_byte19 = 0\n self.paraset_byte18 = 0\n self.paraset_byte17 = 0\n self.paraset_byte16 = 0\n self.paraset_byte15 = 0\n self.paraset_byte14 = 0\n self.paraset_byte13 = 0\n self.paraset_byte12 = 0\n self.paraset_byte11 = 0\n self.paraset_byte10 = 0\n self.paraset_byte9 = 0\n self.paraset_byte8 = 0\n self.paraset_byte7 = 0\n self.paraset_byte6 = 0\n self.paraset_byte5 = 0\n self.paraset_byte4 = 0\n self.paraset_byte3 = 0\n self.paraset_byte2 = 0\n self.paraset_byte1 = 0", "def __init__(self, message=\"\"):\n\n self._message = message\n self._startTime = time.time()", "def __init__(self,msg) -> None:\n\n super().__init__(self)\n self.msg=msg", "def __init__(self):\n print '[' + str(self.__class__) + ']: instance created.'\n self.messages = dict()", "def __init__(self, buf=None, *args, **kwargs):\n super(Message, self).__init__(buf, *args, **kwargs)\n self.__initialized = True", "def __init__(self, message, *args, **kwargs):\n self.message = message\n super().__init__(*args, **kwargs)", "def __init__(self, args=None, inventory=None, version=None,\n identifier=None, created=None, message=None, name=None, address=None):\n self.id = identifier\n self.created = created\n self.message = message\n self.name = name\n self.address = address\n if args is not None:\n self.created = args.created\n self.message = args.message\n self.name = args.name\n self.address = args.address\n elif inventory is not None:\n self.from_inventory(inventory, version)", "def __init__(self):\n self.messageSet = set()\n self.messageQueue = deque()", "def __init__(self, message='', incomplete=True):\n super().__init__(message)\n self.incomplete = incomplete", "def __init__(self, msg, final=False):\n self.__msg = msg\n self.final = final", "def __init__(self, message):\n self.vars = {}\n self.vars['message'] = message", "def __init__(self, queue_id=None):\n super().__init__()\n self.queue_id = queue_id", "def __init__(self, msg):\n\n super(DBSyntaxError, self).__init__(msg)\n self.msg = msg", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(DrivetrainCommand, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n if self.gear is None:\n self.gear = 0\n if self.front_diff is None:\n self.front_diff = 0\n if self.rear_diff is None:\n self.rear_diff = 0\n else:\n self.header = std_msgs.msg._Header.Header()\n self.gear = 0\n self.front_diff = 0\n self.rear_diff = 0", "def __init__(self, peer, text, direction, status=None, timestamp=None, **kargs):\n super(Message, self).__init__()\n storage = None\n\n self.peer = TelNumber.as_type(peer)\n self.text = Text.as_type(text)\n self.timestamp = Time.as_type(timestamp)\n # TODO: use a boolean here\n assert direction in ['in', 'out'], direction\n self.direction = direction\n self.status = status or direction == 'out' and 'read' or 'unread'\n assert self.status in ['read', 'unread', 'unsent', 'sent'], status", "def __init__(self,Q=None):\n \n self.Q = Q", "def __init__(self, status=None):\n if status is None:\n status = self.STATUS_NONE\n self.status = status\n self.messages = []\n self.last_command = None\n self.message_ids = []", "def __init__(self, msg: str) -> None:\n ...", "def __init__(self, *args):\n this = _ida_hexrays.new_qstring_printer_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, message: str, value, expected):\n super().__init__(message, value, expected)\n self.message = message\n self.value = value\n self.expected = expected", "def __init__(self, topic, partition, offset, key, message):\n self.topic = topic\n self.partition = partition\n self.offset = offset\n self._rawKey = key\n self._rawMessage = message\n self._keyDecoder = utf8_decoder\n self._valueDecoder = utf8_decoder", "def __init__( self, \n workerId, \n logPath, \n nameserver, \n qin = None, \n sqout = None, \n eqout = None,\n mqout = None,\n metaQin = None, \n metaQout = None, \n geoip = None ):\n\n super( dnsBroker, self ).__init__( workerId = workerId, \n workerPurpose = \"Probe\",\n logPath = logPath,\n qin = qin, \n metaQin = metaQin,\n metaQout = metaQout )\n\n self.state.update( {\n\n # DNS Probe\n 'probe' : Probe( workerId = workerId, \n logPath = logPath, \n nameserver = nameserver ),\n\n # Google MX Regex\n 'rgmx' : reg_compile( \"([0-9]+)\\s(.*\\.google(?:mail)?\\.com$)\" ),\n\n # SPF Regex\n 'rgspf' : reg_compile( '^\"v\\=(spf[0-9].*)\"$' ),\n \n # Output Queues\n 'qout' : [ sqout, eqout, mqout ],\n\n # GeoIp Db Wrapper\n 'geoip' : geoip,\n \n } )", "def __init__(self, msg):\n\n super(DBConnectionError, self).__init__(msg)\n self.msg = msg", "def __init__(self, level=None, message=None, process_id=None, user_name=None, timestamp=None):\n\n self._level = None\n self._message = None\n self._process_id = None\n self._user_name = None\n self._timestamp = None\n\n if level is not None:\n self.level = level\n if message is not None:\n self.message = message\n if process_id is not None:\n self.process_id = process_id\n if user_name is not None:\n self.user_name = user_name\n if timestamp is not None:\n self.timestamp = timestamp", "def __init__(self, queue_name, **kwargs):\n super(Queue, self).__init__(**kwargs)\n self.value = queue_name", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(KomodoSpeechRecCommand, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.cmd is None:\n self.cmd = ''\n if self.cat is None:\n self.cat = ''\n else:\n self.header = std_msgs.msg.Header()\n self.cmd = ''\n self.cat = ''", "def __init__(self, message=None, details=None, **kw):\n if not message:\n message = self.defaultMessage\n\n self.message = message\n self.details = details\n self.traceback = traceback.format_exc()", "def __init__(self, node_id, edges, name, msg_q, dl):\n global debug_level\n # Required for GHS operation\n self.state = State.sleep\n self.name = name\n self.msg_q = msg_q\n self.level = 0\n self.node_id = node_id\n\n self.father = -1 # Index of the edge along the father of the node\n self.edges = edges\n self.num_neighbors = len(edges)\n\n # Process variables\n self.rec = 0\n self.test_edge = -1\n self.best_edge = -1\n self.best_weight = INF\n self.completed = False\n self.num_messages = 0\n debug_level = dl", "def __init__(\n self,\n acknowledged=0,\n unacknowledged=0,\n total=0,\n started=0,\n abandoned=0,\n unclosed=0,\n starting=0,\n stopped=0\n ):\n self.acknowledged = acknowledged\n self.unacknowledged = unacknowledged\n self.total = total\n self.started = started\n self.abandoned = abandoned\n self.unclosed = unclosed\n self.starting = starting\n self.stopped = stopped", "def __init__(self, msg: dict):\n\n for key, value in msg.items():\n setattr(self, key, value)\n self.data = msg\n self.dt = datetime.fromisoformat(self.timestamp)", "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "def __init__(self):\n\n self.message = \"Executando ...\"", "def __init__(self):\n\n\t\tself.count = 0\n\t\tself.messages = []", "def serialize(self):\n\n # The len must be multiple of 4 bits to convert unambiguously\n\n id_len = self.id.bit_length()\n while (id_len % 4)!= 0:\n id_len += 1\n if self.payload:\n pay_len = self.payload.bit_length()\n while (pay_len % 4)!= 0:\n pay_len += 1\n else: pay_len = 0\n if self.command:\n com_len = self.command.bit_length()\n while (com_len % 4)!= 0:\n com_len += 1\n else: com_len = 0\n\n values = {\n \"id\": self.id,\n \"id_len\": id_len,\n \"payload\": self.payload,\n \"payload_len\": pay_len,\n \"command\": self.command,\n \"command_len\": com_len\n }\n\n\n if self.id == Message.MEASURE or self.id == Message.SINGLE_MEASURE:\n serial_format = (\n \"uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n else:\n serial_format = (\n \"0x23, uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n\n message = bitstring.pack(serial_format, **values)\n\n rospy.logdebug(\"Sent command '0x%s'\", message.hex)\n\n return message.tobytes()", "def __init__(self):\n\n # universal message variables\n self.text = None\n self.files = []\n self.keywords = []\n self.performers = []\n self.hasPerformers = False # used to flag files from performer folders\n ## message only variables\n self.price = None # $3 - $100\n self.users = [] # users to send to\n ## post only variables\n self.expiration = None\n self.poll = None\n self.schedule = None\n ##\n self.gotten = False", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Track, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.status is None:\n self.status = 0\n if self.index is None:\n self.index = 0\n if self.range is None:\n self.range = 0.\n if self.range_rate is None:\n self.range_rate = 0.\n if self.range_accl is None:\n self.range_accl = 0.\n if self.azimuth is None:\n self.azimuth = 0.\n if self.lateral_rate is None:\n self.lateral_rate = 0.\n if self.width is None:\n self.width = 0.\n if self.is_mr_update is None:\n self.is_mr_update = False\n if self.is_lr_update is None:\n self.is_lr_update = False\n if self.amplitude is None:\n self.amplitude = 0\n else:\n self.header = std_msgs.msg.Header()\n self.status = 0\n self.index = 0\n self.range = 0.\n self.range_rate = 0.\n self.range_accl = 0.\n self.azimuth = 0.\n self.lateral_rate = 0.\n self.width = 0.\n self.is_mr_update = False\n self.is_lr_update = False\n self.amplitude = 0", "def __init__(self):\n self._values = {\n 'typeName': None,\n 'message': None,\n 'hasFullStack': True,\n }\n self._initialize()", "def __init__(self, message=None, opcode=None, version=None,\r\n masking_key=None, final=False, rsv1=0, rsv2=0, rsv3=0):\r\n self.version = get_version(version)\r\n if opcode is None and message is not None:\r\n opcode = 0x1 if is_text_data(message) else 0x2\r\n message = to_bytes(message or b'')\r\n self.payload_length = len(message)\r\n if opcode is None:\r\n raise WebSocketProtocolError('opcode not available')\r\n self.version = version\r\n self.opcode = opcode\r\n if masking_key:\r\n masking_key = to_bytes(masking_key)\r\n if len(masking_key) != 4:\r\n raise WebSocketProtocolError('Masking key must be 4 bytes long')\r\n self.masking_key = masking_key\r\n self.fin = 0x1 if final else 0\r\n self.rsv1 = rsv1\r\n self.rsv2 = rsv2\r\n self.rsv3 = rsv3\r\n self.body = message\r\n self.msg = self.build_frame(message)", "def __init__(self, target, message=\"\", barLength=40,\n barChar=\"#\", emptyChar=\"-\"):\n\n super().__init__(message)\n self._target = target\n\n self._current = 0\n # ETA's __init__ makes sure that target > 0\n self._eta = ETA(target) if target else None\n self._bar = self._FillingBar(barLength, barChar, emptyChar) \\\n if target \\\n else self._OscillatingBar(barLength)\n\n # format string for text after bar\n if target:\n targetStr = f\"{target:d}\"\n self._postFmt = \"] ({:\"+str(len(targetStr))+\"d}/\"+targetStr+\") \"\n else:\n self._postFmt = \"] ({:3d}/?)\"", "def __repr__(self):\n return 'Queue({})'.format(self.length())", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(SemMap, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.namespace is None:\n self.namespace = ''\n if self.id is None:\n self.id = ''\n if self.prefixes is None:\n self.prefixes = []\n if self.imports is None:\n self.imports = []\n if self.address is None:\n self.address = knowrob_semantic_map_msgs.msg.SemMapAddress()\n if self.objects is None:\n self.objects = []\n if self.actions is None:\n self.actions = []\n if self.object_properties is None:\n self.object_properties = []\n if self.data_properties is None:\n self.data_properties = []\n else:\n self.header = std_msgs.msg.Header()\n self.namespace = ''\n self.id = ''\n self.prefixes = []\n self.imports = []\n self.address = knowrob_semantic_map_msgs.msg.SemMapAddress()\n self.objects = []\n self.actions = []\n self.object_properties = []\n self.data_properties = []", "def __init__(self,\n msg_id=0x00,\n payload=b''):\n self.sync = MESSAGE_TX_SYNC\n self._msg_id = msg_id\n\n self.is_extended_message = False\n self.flag_byte = None\n self._extended_data_bytes = bytearray()\n self._payload = bytearray()\n self.set_payload(payload)" ]
[ "0.7168633", "0.57381815", "0.5708409", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.569133", "0.5681516", "0.5638713", "0.5632342", "0.5632342", "0.5632342", "0.5629523", "0.5628895", "0.56131965", "0.55871975", "0.55871975", "0.55618376", "0.55437434", "0.5537567", "0.552138", "0.55166525", "0.5509274", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.54968506", "0.5485104", "0.5485104", "0.5471981", "0.5471981", "0.5471981", "0.54569674", "0.5431936", "0.5404556", "0.53967077", "0.5372922", "0.53707594", "0.5330049", "0.5324429", "0.5317021", "0.5314072", "0.53079563", "0.53008187", "0.52945507", "0.5284174", "0.5268657", "0.5256618", "0.5228894", "0.52100515", "0.5194206", "0.5191017", "0.51876193", "0.5182819", "0.515911", "0.51521105", "0.51517534", "0.51289445", "0.51257443", "0.5122405", "0.51074326", "0.50933164", "0.5090696", "0.50889194", "0.50845164", "0.5081115", "0.50796324", "0.50779384", "0.50743455", "0.50676167", "0.50627005", "0.50548494", "0.50548494", "0.50548494", "0.50528985", "0.5050747", "0.50464505", "0.5040306", "0.5033472", "0.5026299", "0.5022843", "0.50185466", "0.5013301", "0.50082743", "0.50061756" ]
0.7561885
0
Returns a dict with information about the dump quorum message.
def dump(self): result = super(DumpQuorumMessage, self).dump() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dumps(self) -> Dict[str, Any]:\n return {\n \"commitId\": self.commit_id,\n \"parentCommitId\": self.parent_commit_id,\n \"message\": self.message,\n \"committer\": self.committer.dumps(),\n }", "def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(DumpQuorumMessage, self).__init__(minfo)\n\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True", "def msg_info_dict(self):\n return self._msg_info_dict", "def dump(self) -> dict[Any, str]:\r\n ...", "def dump(self):\n return {\"data\": self.data, \"encoding\": self.encoding,\n \"type\": self.type_name}", "def messages(self):\n return {}", "def dump(self):\n res = []\n #res.append(\"Submeshes: %d\" % len(self.submeshes))\n #res.append(\"IdxBuf: 0x%04X bytes\" % len(self.idx_buf))\n #res.append(\"PrimFmt: 0x%04X (%s)\" % (\n # self.prim_fmt_id, self.prim_fmt))\n #res.append(\"IdxType: 0x%02X (%s)\" % (\n # self.header['idx_type'], self.idx_fmt,\n #))\n #res.append(\"IdxCnt: %d\" % self.header['idx_cnt'])\n #res.append(\"VisGrp: %d\" % self.header['visibility_group'])\n #res.append(\"Unknown: 0x%08X 0x%08X 0x%08X\" % (\n # self.header['unk08'],\n # self.header['unk10'],\n # self.header['unk34'],\n #))\n #return '\\n'.join(res).replace('\\n', '\\n ')\n\n return \"%4d│%04X│%04X %-24s│%02X %s│%5d│%5d│%08X│%08X│%08X\" %(\n len(self.submeshes),\n len(self.idx_buf),\n self.prim_fmt_id, self.prim_fmt,\n self.header['idx_type'], self.idx_fmt,\n self.header['idx_cnt'],\n self.header['visibility_group'],\n self.header['unk08'], self.header['unk10'],\n self.header['unk34'],\n )", "def get_message_payload(self):\n return {\n 'ts': self.timestamp,\n 'channel': self.channel,\n 'username': self.username,\n 'icon_emoji': self.icon_emoji,\n 'blocks': [self._get_message_block()],\n }", "def status(self):\n return {\n 'id': 'status',\n 'protocol_version': 'PV62',\n 'network': self.origin_node.network.name,\n 'td': self.origin_node.chain.head.header.difficulty,\n 'best_hash': self.origin_node.chain.head.header.hash,\n 'genesis_hash': self.origin_node.chain.genesis.header.hash,\n 'size': kB_to_MB(self._message_size['status'])\n }", "def as_dict(self):\n return {'message':self.message, 'line': self.line}", "def get_info(self):\n return {'q_ref': self.q_ref, 'v_ref': self.v_ref, 'U': self.U, 'type': 'POD'}", "async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj", "def DumpCommand(database):\n if(database.Keys()):\n return \", \".join(database.Keys())\n else:\n return \"Nothing to dump\"", "def get_dump_status(self, uid: str) -> Dict[str, str]:\n return self.http.get(\n self.config.paths.dumps + '/' + str(uid) + '/status'\n )", "def dump():\n\t\treturn self.__dict__;", "def dumps(self):\n return {\n 'version': self.version(), # str version (M.m.s)\n 'region': self.region(), # integer type\n 'name': self.name(), # str type\n 'id': self._id, # previous integer unique id\n 'created': self._created, # created timestamp\n 'stage': self._stage, # \"entry\" if self._stage == Region.STAGE_ENTRY else \"exit\" if self._stage == Region.STAGE_EXIT else \"both\",\n 'direction': self._dir, # \"long\" if self._dir == Region.LONG else \"short\" if self._dir == Region.SHORT else \"both\",\n 'timeframe': self._timeframe, # timeframe_to_str(self._timeframe),\n 'expiry': self._expiry, # datetime.fromtimestamp(self._expiry).strftime('%Y-%m-%dT%H:%M:%S'),\n }", "def report_dump_runinfo(dump_items):\n runinfo_lines = [\"name:%s; status:%s; updated:%s\" %\n (item.name(), item.status(), item.updated())\n for item in dump_items]\n runinfo_lines.reverse()\n txt_content = \"\\n\".join(runinfo_lines)\n content = {}\n content['txt'] = txt_content + \"\\n\"\n # {\"jobs\": {name: {\"status\": stuff, \"updated\": stuff}}, othername: {...}, ...}\n content_json = {\"jobs\": {}}\n for item in sorted(dump_items, reverse=True, key=lambda job: job.name()):\n content_json[\"jobs\"][item.name()] = {'status': item.status(), 'updated': item.updated()}\n content['json'] = json.dumps(content_json)\n return content", "def dump(self):\n\n result = {\n 'size': self.size,\n 'type': self.type,\n 'filename': self.fullpath,\n 'changed': self.changed,\n }\n\n return result", "def getData(self):\n return dict(self._dump_data)", "def _dump_queue(self):\n outfile = self.registryValue('dumpFile')\n with open(outfile, 'w') as h:\n i = 1\n for nick, msg in self._queue:\n if msg is None:\n msg = '[no message]'\n h.write(\"% 2d\\t%s\\t%s\\n\" % (i, nick, msg))\n i += 1", "def dumps(self) -> Dict[str, Any]:\n return {\"number\": self.number, \"title\": self.title}", "def message2std(message):\n message['query_graph'] = message.pop('question_graph')\n for node in message['query_graph']['nodes']:\n node['node_id'] = node.pop('id')\n for edge in message['query_graph']['edges']:\n edge['edge_id'] = edge.pop('id')\n return message", "def dump(self):\n avps = self.get_all_avps_contents()\n auth = self.compute_authenticator(avps)\n header = struct.pack(RadiusMessage.RADIUS_HDR_TMPL, self.code,\n self.pid, len(self), auth)\n return b\"\".join([header, avps])", "def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'run_uuid': self.run_uuid,\n 'exc_info': self.exc_info,\n 'exc_text': self.exc_text,\n 'filename': self.filename,\n 'func_name': self.func_name,\n 'level_name': self.level_name,\n 'level_no': self.level_no,\n 'line_no': self.line_no,\n 'message': self.message,\n 'module': self.module,\n 'name': self.name,\n 'pathname': self.pathname,\n 'process': self.process,\n 'process_name': self.process_name,\n 'relative_created': self.relative_created,\n 'stack_info': self.stack_info,\n 'thread': self.thread,\n 'thread_name': self.thread_name,\n 'time_collected': datetime_to_str(self.time_collected),\n }", "def msg_info_multiple_dict(self):\n return self._msg_info_multiple_dict", "def help_dump(self):\n print(DUMP)", "def messages(self) -> dict:\n raise NotImplementedError", "def json_dump(self):\n return {\n 'log': {\n 'title': self.str_title,\n 'body': self.str_payload\n }\n }", "def printMixData(self):\n\t\tprint \"OPERATED MIXNODE: Name: %s, address: (%d, %s), PubKey: %s\" % (self.name, self.port, self.host, self.pubk)", "def dump(self) -> dict:\n d = {}\n for item in self.__dict__:\n if item in ['parsed', 'dump', 'parse_data', 'iter_list', 'safe_load']:\n continue\n if isinstance(self.__dict__[item], ConfigKey):\n d[item] = self.__dict__[item].dump()\n elif isinstance(self.__dict__[item], list):\n d[item] = self.iter_list_dump(self.__dict__[item])\n else:\n d[item] = self.__dict__[item]\n return d", "def summarize (self):\n return {\n 'UnhandledMsgTypeCodes': self.__unhandled_messages,\n 'TotalFramesReceived': self.__frame_sequence,\n 'MsgTypeCounts': self.__msg_counts\n }", "def to_dict(self):\n return dumpd(self)", "def dump(self):\n\n result = {\n 'verb': self.verb,\n 'whitelist_name': self.whitelist_name,\n 'permissioned_public_keys': self.permissioned_public_keys,\n 'permissioned_addrs': self.permissioned_addrs\n }\n return result", "def std2message(query):\n message = query['query_message']\n message['question_graph'] = message.pop('query_graph')\n for node in message['question_graph']['nodes']:\n node['id'] = node.pop('node_id')\n for edge in message['question_graph']['edges']:\n edge['id'] = edge.pop('edge_id')\n return message", "def dumpDmesg(self):\n pass", "def dump(self):\n return self.dump_internal(0)", "def dump(self):\n schema = _dict2schema(self._fields)()\n dump_result = schema.dump(self._values)\n return dump_result.data if MARSHMALLOW_VERSION_INFO[0] < 3 else dump_result", "def _describe(self) -> Dict[str, Any]:\n return {\n \"run_id\": self._run_id,\n \"prefix\": self._prefix,\n }", "def ping_data_raw(self) -> dict:\n return self._ping_data_raw", "async def dict(self):\n\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def to_dict(self) -> Dict[str, Any]:\n return {\"status\": self.status.name, \"message\": self.message}", "def store_queue_for_restart(queue):\n if TEST_MODE:\n return queue.__dict__\n if not queue.currentM:\n logger.error('Message was not found in queue for restart daemon.')\n return None\n return {\n 'conn_region': queue.conn.region.name,\n 'queue_name': queue.q.name,\n 'body': queue.currentM.get_body(),\n 'attributes': queue.currentM.attributes,\n 'md5_message_attributes': queue.currentM.md5_message_attributes,\n 'message_attributes': queue.currentM.message_attributes,\n 'receipt_handle': queue.currentM.receipt_handle,\n 'id': queue.currentM.id,\n 'md5': queue.currentM.md5\n }", "def dump(self, packet):\n # packet is already decoded\n msg = {\n SRCE: packet[DEST], \n DEST: packet[SRCE], \n TYPE: TABL,\n MESG: list(map(lambda r: ({ \n NTWK: r[MESG][NTWK],\n NMSK: r[MESG][NMSK],\n PEER: r[SRCE],\n }),\n self.routes))\n }\n self.sockets[packet[SRCE]].send(json.dumps(msg).encode())\n return True", "def dump(self):\n return dict([(k, v) for k, v in vars(self).items() if not k.startswith('_')])", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T,\n 'C': self.C.to_dictionary(), 'D': self.D.to_dictionary(), 'sigma': self.sigma.to_dictionary()}", "def _messages(self):\n q = [json.loads(i)['message'] for i in self.client.kv.get(\n 'rhumba.q.testqueue', [])]\n return q", "def info() -> Dict[str, Any]:", "def dump_dict(self):\n\n dump_dict = dict()\n\n dump_dict['Structure'] = self.name\n\n # Refer to the __set_format__ method for an explanation\n # of the following construct.\n for keys in self.__keys__:\n for key in keys:\n\n val = getattr(self, key)\n if isinstance(val, int) or isinstance(val, long):\n if key == 'TimeDateStamp' or key == 'dwTimeStamp':\n try:\n val = '0x%-8X [%s UTC]' % (val, time.asctime(time.gmtime(val)))\n except exceptions.ValueError, e:\n val = '0x%-8X [INVALID TIME]' % val\n else:\n val = ''.join(filter(lambda c:c != '\\0', str(val)))\n\n dump_dict[key] = {'FileOffset': self.__field_offsets__[key] + self.__file_offset__,\n 'Offset': self.__field_offsets__[key],\n 'Value': val}\n\n return dump_dict", "def database_dump(self):\r\n print('=====Dumping database=====')\r\n self.database_table_dump(query.TABLE_STATS)\r\n print()\r\n self.database_table_dump(query.TABLE_TWEETS)\r\n print()\r\n self.database_table_dump(query.TABLE_POSTS)\r\n print()\r\n self.database_table_dump(query.TABLE_FOLLOWS)", "def dump(self):\n return []", "def dump(self):\n result = super(BattleshipTransaction, self).dump()\n\n result['Name'] = self._name\n result['Action'] = self._action\n result['Ships'] = self._ships\n if self._action == 'JOIN':\n result['Board'] = self._board\n if self._action == 'FIRE':\n result['Row'] = self._row\n result['Column'] = self._column\n if self._reveal_space is not None:\n result['RevealSpace'] = self._reveal_space\n if self._reveal_nonce is not None:\n result['RevealNonce'] = self._reveal_nonce\n\n return result", "def msg(self):\n if \"msg\" in self._json:\n return self._json[\"msg\"]\n elif \"detail\" in self._json:\n return self._json[\"detail\"]\n else:\n return self._json", "def info(self) -> dict:", "def dump(self):\n result = super(PermissionedValidatorRegistryTransaction, self).dump()\n\n result['Update'] = self.update.dump()\n\n return result", "def makeAMQPmsg(self):\n msg = {'msgType' : 'AgentUpdate',\n 'AgentType': 'Bus',\n 'Extnum':self.Extnum,\n 'Vm': self.cv['Vm'],\n 'Va': self.cv['Va'],\n }\n return msg", "def dumps(self) -> Dict[str, Any]:\n return {\"name\": self.name, \"date\": self.date}", "def info(self):\n return {}", "def serialized(self):\r\n return {'name':self._group.name, 'ip':self._ip}", "def to_dict(self):\n return {\n \"uuid\": self.uuid,\n \"name\": self.name,\n \"description\": self.description,\n # \"boxes\": [box.uuid for box in self.boxes],\n }", "def __repr__(self) -> str:\n dump_conf = copy.deepcopy(self)\n string = \"\"\n for k in dump_conf:\n v = dump_conf[k]\n if k == \"wpscan_args\":\n v = safe_log_wpscan_args(v)\n if k == \"smtp_pass\" and v != \"\":\n v = \"***\"\n if isinstance(v, (list, dict)):\n v = json.dumps(v)\n else:\n v = str(v)\n string += f\"\\n{k:<25}\\t=\\t{v}\"\n return string", "def dump(self):\n return self._data.dump()", "def dump(self, packet):\n #self.print_table()\n src = packet[\"dst\"]\n dst = packet[\"src\"]\n routes_dump = []\n for route in self.routes:\n for verat in route[\"varats\"]:\n routes_dump.append({\"network\": verat[\"network\"], \"netmask\": verat[\"netmask\"],\n \"peer\": route[\"peer\"]})\n \n a = {\"src\": src, \"dst\": dst, \"type\": \"table\", \"msg\": routes_dump}\n return a", "def serialize(self):\n return {\n 'oid' : self.oid,\n 'title' : self.title,\n 'pickupaddr' : self.pickupaddr,\n 'pickuptime' : self.pickuptime.strftime(DATETIME_FORMAT) if self.pickuptime else '',\n 'did' : self.did,\n 'totalcargosize' : self.totalcargosize,\n 'trucksize' : self.trucksize,\n 'initialfee' : self.initialfee,\n 'perstopfee' : self.perstopfee,\n 'status' : self.status,\n 'drivername' : self.drivername,\n 'driverphone' : self.driverphone,\n 'deliverdate' : self.deliverdate.strftime(DATETIME_FORMAT) if self.deliverdate else '',\n 'finishedtime' : self.finishedtime.strftime(DATETIME_FORMAT) if self.finishedtime else '',\n 'pickupaddr_lat': self.pickupaddr_lat,\n 'pickupaddr_lng': self.pickupaddr_lng,\n 'participants': self.participants\n }", "def info(dump_alloc_table: bytes, /) -> None:", "def dump(msg_or_socket, format_msg=None):\n if isinstance(msg_or_socket, zmq.Socket):\n # it's a socket, call on current message\n msg = msg_or_socket.recv_multipart()\n else:\n msg = msg_or_socket\n content = \"-\" * 40 + \"\\n\"\n msg = msg if isinstance(msg, list) else [msg]\n for part in msg:\n content += f\"[{len(part):03d}] \"\n try:\n content += format_msg(part) if format_msg else part.decode()\n except UnicodeDecodeError:\n content += f\"0x{binascii.hexlify(part).decode()}\"\n content += '\\n'\n sys.stdout.write(content)\n sys.stdout.flush()", "def message(self) -> str:\n return self.fields.get('msg', self.raw_string)", "def get(self):\n server = self.get_argument(\"server\")\n redis_info = self.stats_provider.get_info(server)\n databases=[]\n\n for key in sorted(redis_info.keys()):\n if key.startswith(\"db\"):\n database = redis_info[key]\n database['name']=key\n databases.append(database)\n\n total_keys=0\n for database in databases:\n total_keys+=database.get(\"keys\")\n\n if(total_keys==0):\n databases=[{\"name\" : \"db0\", \"keys\" : \"0\", \"expires\" : \"0\"}]\n\n redis_info['databases'] = databases\n redis_info['total_keys']= self.shorten_number(total_keys)\n\n uptime_seconds = redis_info['uptime_in_seconds']\n redis_info['uptime'] = self.shorten_time(uptime_seconds)\n\n commands_processed = redis_info['total_commands_processed']\n commands_processed = self.shorten_number(commands_processed)\n redis_info['total_commands_processed_human'] = commands_processed\n\n self.write(redis_info)", "def sync_dict(self):\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def get_signal_dump():\n divider = '-' * 80\n frequency = [SendSignal.__SIGNAL_SETTINGS['frequency'], SendSignal.__SIGNAL_OBJ.getFreq()]\n baud_rate = [SendSignal.__SIGNAL_SETTINGS['baud_rate'], SendSignal.__SIGNAL_OBJ.getMdmDRate()]\n\n print(\"SIGNAL TRANSMIT INFORMATION\")\n print(divider)\n print('== Frequency Configuration ==')\n print(\"Frequency : {0} MHz - {1} MHz\".format(frequency[0], frequency[1][0]))\n print(\"Channel : {0}\".format(SendSignal.__SIGNAL_OBJ.getChannel()))\n print(\"\\n== Modem Configuration ==\")\n print(\"Modulation : {0}\".format(SendSignal.__SIGNAL_OBJ.getMdmModulation()))\n print(\"Baud rate : {0} MHz - {1} MHz\".format(baud_rate[0], baud_rate[1]))\n print(\"Channel bandwidth : {0} MHz\".format(SendSignal.__SIGNAL_OBJ.getMdmChanBW()))\n print(\"Channel spacing : {0} MHz\".format(SendSignal.__SIGNAL_OBJ.getMdmChanSpc()))\n print(\"Deviation : {0} MHz\".format(SendSignal.__SIGNAL_OBJ.getMdmDeviatn()))\n print(\"Sync Mode : {0}\".format(SendSignal.__SIGNAL_OBJ.getMdmSyncMode()))\n print(\"Preamble : {0}\".format(SendSignal.__SIGNAL_OBJ.getMdmNumPreamble()))\n print(\"\\n== Packet Configuration ==\")\n print(\"Sync Word : {0}\".format(SendSignal.__SIGNAL_OBJ.getMdmSyncWord()))\n print(\"\\n== Signal ==\")\n print(\"Text : {0}\".format(SendSignal.__SIGNAL_SETTINGS['text_message']))\n print(\"Repeats : {0}\".format(SendSignal.__SIGNAL_SETTINGS['repeats']))\n print(divider)", "def rpc_getblockchaininfo(self) -> dict:\n return self._call_command([\"getblockchaininfo\"])", "def print_ofpt_echo_reply(msg):\n if len(msg.data.value) > 0:\n hexdump(msg.data.value)", "def serialize(self):\n return {\n 'id': self.id,\n 'ip': self.ip,\n 'starttime': self.starttime,\n 'processtime': self.processtime,\n 'error': self.error,\n 'misc': self.misc\n }", "def new_message(self):\n msg = {}\n msg['data'] = []\n msg['type'] = self.plugin_type\n msg['source'] = self.name\n msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()\n return msg", "def to_dump(self):\n s = []\n for k in self.keys():\n if isinstance(self[k], int) or isinstance(self[k], long):\n s.append(\"%s=%d\" % (k, self[k]))\n elif isinstance(self[k], float):\n s.append(\"%s=%f\" % (k, self[k]))\n else:\n for v2 in self.list(k):\n if isinstance(v2, str):\n s.append(\"%s=%s\" % (k, v2))\n else:\n s.append(\"%s=%s\" % (k, util.encode(v2)))\n s.append(\"~format=%s\" % self.format)\n s.append(\"\")\n return \"\\n\".join(s)", "def get_result(self):\n print('''message: {}\nopen key: ({}, {})\nencoded message: {}'''.format(self.msg, self.n, self.e, self.__encoded_msg))", "def serialize(self):\n\n # The len must be multiple of 4 bits to convert unambiguously\n\n id_len = self.id.bit_length()\n while (id_len % 4)!= 0:\n id_len += 1\n if self.payload:\n pay_len = self.payload.bit_length()\n while (pay_len % 4)!= 0:\n pay_len += 1\n else: pay_len = 0\n if self.command:\n com_len = self.command.bit_length()\n while (com_len % 4)!= 0:\n com_len += 1\n else: com_len = 0\n\n values = {\n \"id\": self.id,\n \"id_len\": id_len,\n \"payload\": self.payload,\n \"payload_len\": pay_len,\n \"command\": self.command,\n \"command_len\": com_len\n }\n\n\n if self.id == Message.MEASURE or self.id == Message.SINGLE_MEASURE:\n serial_format = (\n \"uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n else:\n serial_format = (\n \"0x23, uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n\n message = bitstring.pack(serial_format, **values)\n\n rospy.logdebug(\"Sent command '0x%s'\", message.hex)\n\n return message.tobytes()", "def serialize(self):\n return {\n 'time_stamp' : self.time_stamp,\n 'email' : self.email,\n 'amount_deposit' : self.amount_deposit,\n 'amount_withdraw' : self.amount_withdraw,\n }", "def get_alerts(node: CephAdmin) -> dict:\n cmd = \"ceph health detail\"\n all_alerts = {}\n out, err = node.shell([cmd])\n regex = r\"(\\(MUTED[\\w\\s,-]*\\))?\\s*\\[\\w{3}\\]\\s([\\w_]*):\"\n alerts = re.findall(regex, out)\n all_alerts[\"active_alerts\"] = [alert[1] for alert in alerts if not alert[0]]\n all_alerts[\"muted_alerts\"] = [alert[1] for alert in alerts if alert[0]]\n return all_alerts", "def serialize(self):\n return {\n 'special_messages': self.special_messages,\n 'description': self.description,\n 'name': self.name,\n 'id': self.id,\n }", "def dataForMonitoring(self):\n dict = MinderBase.dataForMonitoring(self)\n \n dict['nTests'] = len(self.tests.keys())\n dict['done'] = self.isDone()\n dict['nTestsSuccess'] = len([s for s in self.finishedTests if s.result == 0])\n dict['nTestsFailure'] = len([s for s in self.finishedTests if s.result != 0])\n dict['nRetries'] = self.errorStateCurRetry\n dict['ppFailure'] = (self.postProcessingResult == 'error')\n dict['ppSuccess'] = (self.postProcessingResult == 'success')\n\n return dict", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "def _read_message(self):\n header = self._read_amt(9)\n msg_size = struct.unpack_from(\">q\", header, 1)[0]\n return header + self._read_amt(msg_size - 9)", "def stat_message(message):\n return {\n 'id': message['id'],\n 'age': message['age'],\n 'created': message['created'],\n }", "async def dump(self, data: dict, file: IO):", "def to_dict(self):\n config = {'min_length': self.min_length, 'max_length': self.max_length}\n return {'node_type': 'Bytes', 'config': config}", "def dump_state(self, p):\n big_data = {\n \"players\": [p.dump() for p in self.players],\n \"nplayers\": self.nplayers,\n \"state\": self.state,\n\n \"turn\": self.turn,\n \"idx\": self.idx,\n \"mission_results\": self.mission_results,\n\n \"game_setup\": self.game_setup,\n \"lady\": self.lady,\n \"mission_size\": self.mission_size,\n \"fails_required\": self.fails_required,\n \"imposition_in\": self.imposition_in,\n\n \"good_wins\": self.good_wins,\n\n \"self\": {}\n }\n\n if p:\n big_data[\"self\"] = p.format_info(self.players)\n\n return big_data", "def asdict(self):\n return OrderedDict({\n 'name': self.name,\n 'fullname': self.fullname,\n 'msgtype': self.msgtype,\n 'rostype_name': self.rostype_name,\n })", "def redis_info(self):\n def func(server):\n return server.info()\n self.__run_redis_cmd(func)", "def get_payload(self):\n return {'message': 'bar'}", "def to_replication_dict(self):\n return {}", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def test_execute_dump_var_transaction(self):\n\n instruction = Instruction(\"dump(3)\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n self.assertEqual(output, \"{'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}\")", "def print_metadata():\n data = {\n 'python_implementation': platform.python_implementation(),\n 'python_version_info': tuple(sys.version_info),\n 'pickle_protocol': pickle.HIGHEST_PROTOCOL,\n }\n if sys.version_info < (3,):\n out_stream = sys.stdout\n else:\n out_stream = sys.stdout.buffer\n out_stream.write(json.dumps(data).encode(_IPC_ENCODING) + b'\\n')", "def as_create_dict(self) -> dict:\n out_dict = self.as_dict()\n try:\n out_dict.pop('numShards', None)\n out_dict.pop('mongoURI', None)\n out_dict.pop('mongoDBVersion', None)\n out_dict.pop('mongoURIUpdated', None)\n out_dict.pop('mongoURIWithOptions', None)\n out_dict.pop('paused', None)\n out_dict.pop('srvAddress', None)\n out_dict.pop('links', None)\n out_dict.pop('state_name', None)\n except KeyError:\n pass\n try:\n out_dict['replicationSpecs'][0].__delitem__('id')\n except KeyError:\n pass\n return out_dict", "def messages(self) -> Mapping[str, wrappers.MessageType]:\n return collections.OrderedDict(\n (k, v) for k, v in self.all_messages.items()\n if not v.meta.address.parent\n )", "def __repr__(self):\n return \"\\n\".join((\"bpq_kind: %d\" % self.bpq_kind,\n \"matching_rule: %d\" % self.matching_rule,\n \"creation_ts: %d\" % self.creation_ts,\n \"creation_eq: %d\" % self.creation_seq,\n \"src_eid_len: %d\" % self.src_eid_len,\n \"src_eid: %s\" % self.src_eid,\n \"bpq_id_len: %d\" % self.bpq_id_len,\n \"bpq_id: %s\" % self.bpq_id,\n \"bpq_val_len: %d\" % self.bpq_val_len,\n \"bpq_val: %s\" % self.bpq_val,\n \"frag_cnt: %s\\n\" %self.frag_cnt)) + \\\n \"\".join(map(lambda d : \"offset: %d, length: %d\\n\" % (d[\"frag_offset\"],\n d[\"frag_len\"]),\n self.frag_desc))", "def get_update_packet(self):\n send_messages = []\n while len(self.unread_messages) > 0:\n pkt = self.unread_messages.pop(0)\n send_messages.append(pkt.data)\n done_text = None\n if self.state.is_final() and \\\n self.get_status() != AssignState.STATUS_DONE:\n done_text = self.state.get_inactive_command_text()[0]\n return {\n 'new_messages': send_messages,\n 'all_messages': self.state.get_messages(),\n 'wants_message': self.wants_message,\n 'disconnected': self.disconnected,\n 'agent_id': self.id,\n 'worker_id': self.worker_id,\n 'conversation_id': self.conversation_id,\n 'task_done': self.state.is_final(),\n 'done_text': done_text,\n 'status': self.state.get_status(),\n }", "def procinfo(self):\n\n info = {}\n info[\"state\"] = self.state\n info[\"user\"] = self.user\n info[\"ruser\"] = self.ruser\n info[\"uid\"] = self.uid\n info[\"ruid\"] = self.ruid\n info[\"gid\"] = self.gid\n info[\"rgid\"] = self.rgid\n info[\"pid\"] = self.pid\n info[\"ppid\"] = self.ppid\n info[\"pgid\"] = self.pgid\n info[\"pri\"] = self.pri\n info[\"pcpu\"] = self.pcpu\n info[\"pmem\"] = self.pmem\n info[\"vsz\"] = self.vsz\n info[\"rss\"] = self.rss\n info[\"time\"] = self.time\n info['timesec'] = self.timeconv(self.time)\n info[\"stime\"] = self.stime\n info[\"f\"] = self.f\n info[\"tty\"] = self.tty\n info[\"nice\"] = self.nice\n info[\"wchan\"] = self.wchan\n info[\"comm\"] = self.comm\n info[\"args\"] = self.args\n info[\"procname\"] = self.procname\n\n return info", "def dump(self):\n return", "def msg(self):\n\t\treturn self.message" ]
[ "0.6006111", "0.567213", "0.5670017", "0.5648441", "0.5578808", "0.53971803", "0.5394207", "0.5387473", "0.53841007", "0.5374053", "0.5371038", "0.53614724", "0.53580964", "0.5316283", "0.5305731", "0.52922034", "0.52715415", "0.51807743", "0.5176574", "0.51549894", "0.5138521", "0.51342803", "0.512532", "0.5109059", "0.50799423", "0.507258", "0.5060591", "0.5055935", "0.5043304", "0.49712044", "0.49593332", "0.49486017", "0.49481243", "0.49325508", "0.49242467", "0.49236068", "0.49192616", "0.49173278", "0.4914605", "0.49104974", "0.490288", "0.48977005", "0.48721597", "0.4853593", "0.48535693", "0.4851623", "0.48423302", "0.48226607", "0.48120147", "0.48092267", "0.48021522", "0.4801439", "0.47981253", "0.4789115", "0.4788879", "0.47655112", "0.4743837", "0.47423017", "0.47314814", "0.47253478", "0.47218665", "0.47193053", "0.47192487", "0.47105414", "0.46993706", "0.46915516", "0.46772522", "0.46763495", "0.46712527", "0.46612474", "0.46597055", "0.46574858", "0.4655367", "0.4655229", "0.46527547", "0.46523333", "0.46485585", "0.46428764", "0.46425807", "0.46384278", "0.46348694", "0.4630579", "0.46286651", "0.46168035", "0.4614804", "0.4614407", "0.46112683", "0.46047002", "0.45996067", "0.45991942", "0.459539", "0.45906025", "0.45836008", "0.4583256", "0.4582609", "0.45808575", "0.4575249", "0.45674258", "0.45668206", "0.45602152" ]
0.7622692
0
The entry endpoint of our API.
def api_root(request, format=None): return Response({ 'users': reverse('user-list', request=request), 'groups': reverse('group-list', request=request), 'pools': reverse('pool-list', request=request), })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api():\n\treturn \"The API call\"", "def home():\n return(\n f\"Available Routes: <br/>\"\n\n f\"For Precipitation: /api/v1.0/precipitation<br/>\"\n f\"Returns Jsonify dictionary of dates and Precepitation<br/><br/>\"\n\n f\"For list of Stations: /api/v1.0/stations<br/>\"\n f\"Returns Jasonify list of stations <br/><br/>\"\n\n f\"For last year temperatures: /api/v1.0/tobs<br/>\"\n f\"Returns Jsonify dictionary of Temperature Observations for last year<br/><br/>\"\n\n f\"Temperature result from the date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures from given start date of dataset<br/><br/>\"\n\n f\"Temperature result from start date to end date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range\"\n\n )", "def index():\n return (\n f\"Welcome to the Climate App API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt;<br/>\"\n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt;\"\n )", "def get(self) -> Response:\n return set_response_headers(jsonify(get_doc().entrypoint.get()))", "def api_endpoint(self) -> str:\n return pulumi.get(self, \"api_endpoint\")", "def api_endpoint(*args):\n endpoint = HOIIO_API_ENDPOINT + '/'.join(args)\n return endpoint", "def api(self) -> str:", "def api_endpoint():\n return 'localhost'", "def entry_point():", "def entry_point():", "def entry_point():", "def index():\n return (\n f\"Welcome to my Hawaii trip info!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def main():\n return execute_api(Freta(), [Endpoint], __version__)", "def Home():\n return (\n f\"Welcome to the Climate App<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def welcome():\n return (\n f\"/api/v1.0/precipitation<br/>Returns a JSON list of percipitation data for the dates between 8/23/16 and 8/23/17<br/><br/>\"\n f\"/api/v1.0/stations<br/>Return a JSON list of stations from the dataset\"\n f\"/api/v1.0/tobs<br/>Return a JSON list of temperature observations (TOBS) for the previous year.\"\n f\"/api/v1.0/<start><br/>Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given\"\n f\"/api/v1.0/<start>/<end>Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given\"\n )", "def cmd_entry():\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Web based frontend to the health record system databaser\"\n )\n parser.add_argument('-c', '--config', required=True, help=\"Config file to load\")\n args = parser.parse_args()\n\n main(args.config)", "def api():\n api_routes = [\n \"/api/v1.0/beer\",\n \"/api/v1.0/breweries\",\n ]\n return render_template(\"api.html\", api_routes = api_routes)", "def overview():\n return render_template('api/api.html', title='API Overview')", "def home():\n return (\n f\"Welcome to the Hawaii Weather API<br/>\"\n \"<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date<br/>\"\n f\"/api/v1.0/start_date/end_date<br/>\"\n \"<br/>\"\n f\"Date format: YYYY-MM-DD\"\n )", "def index():\n return 'Your api is up and running!'", "def homepage():\n # thread gets created to service the request\n return(\n f\"(Dates ranges from 2010-01-01 to 2017-08-23). <br><br>\"\n f\"Available Routes: <br>\"\n\n f\"/api/v1.0/precipitation<br/>\"\n f\"Returns dates and temperature from last year. <br><br>\"\n\n f\"/api/v1.0/stations<br/>\"\n f\"Returns json list of stations. <br><br>\"\n\n f\"/api/v1.0/tobs<br/>\"\n f\"Returns list of Temperature Observations(tobs) for previous year. <br><br>\"\n\n f\"/api/v1.0/<start><br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given start date.<br><br>\"\n\n f\"/api/v1.0/<start>/<end><br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range.\"\n\n \n )", "def home():\n return (\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n \n )", "def api_url(self, endpoint):\n\n return '{}/{}'.format(self.api_root, endpoint)", "def getAPI(self):\n return self.api_url", "def get_endpoint(self, *args):\n\t\traise NotImplementedError", "def entry(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entry\")", "def index():\n # Message to the user\n message = {\n 'api_version': 'v1.0',\n 'status': '200',\n 'message': 'Welcome to the Flask API'\n }\n # Making the message looks good\n resp = jsonify(message)\n\n # Returning the object\n return resp", "def root(self):\n return self.app.get('/',headers=self.headers)", "async def async_get(\n self, endpoint: str | None = None, entry: str | None = None\n ) -> Any:\n url = f\"{self.url}/api/\"\n if endpoint:\n url = self.endpoints[endpoint]\n if entry:\n url = f\"{url}{entry}\"\n with async_timeout.timeout(10):\n resp = await self.session.get(\n url=url,\n headers=self.headers,\n raise_for_status=True,\n )\n\n return await resp.json()", "def entry_point():\n return render_template(\"index.html\")", "async def api():\n return {\n \"links\": {\n \"datasets\": \"/datasets\",\n \"natural_earth_feature\": \"/natural_earth_feature\",\n \"viewport\": \"/viewport\"\n }\n }", "def welcome():\n return (\n f\"Welcome to the climate Analysis API!<br/>\"\n f\"Here are available API routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def get_api_event(self):\n pass", "def entry_point():\n pass", "def home():\n return(\n f\"Available Routes:<br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperature for one year: /api/v1.0/tobs<br/>\"\n f\"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n f\"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n )", "def index(self):\n self.client.get(f\"{host}/\")", "def endpoint(self) -> str:\n return pulumi.get(self, \"endpoint\")", "def endpoint(self) -> str:\n return pulumi.get(self, \"endpoint\")", "def welcome():\n return (\n f\"Avalable Routes:<br/>\"\n f\"/api/v1.0/precipitation - List of Precipitation Observations from the previous year<br/>\"\n\n f\"/api/v1.0/stations\"\n f\"- List of observation stations<br/>\"\n\n f\"/api/v1.0/tobs\"\n f\"- List of Temperature Observations (tobs) for the previous year<br/>\"\n\n f\"/api/v1.0/temps/&ltstart&gt/&ltend&gt\"\n f\"- Min, avg, max temp for start or start-end date range (format yyyy-mm-dd)<br/>\"\n\n )", "def _api_endpoint(self) -> str:\n return ApiEndpoints.ARTIFACT_TYPES.value", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )", "def api_url(self):\n return self.get_api_url()", "def home():\n return (\n f\"These are the available routes:</br>\"\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/< start ></br>\"\n f\"/api/v1.0/< start >/< end ></br>\"\n )", "def index():\n return 'Thanks for using the Bird Stats API.'", "def index():\n return \"Attendance Flask server\"", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def home():\n rkwargs = {\n 'description': 'Shows API info',\n 'message': 'Welcome to the lung cancer prediction API!',\n 'links': {algo: '{}{}/predict/'.format(request.url_root, algo) for\n algo in PREDICTORS.keys()}\n }\n\n return jsonify(**rkwargs)", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix=\"/\")", "def index():\n response = jsonify(\n {'message':'Hello, RESTful API development!'}\n )\n \n return response, 200", "def index():\n\n return redirect(api)", "def render_GET(self, request):\n timestamp = int(self.url_matches[\"timestamp\"])\n \n if request.api_mode == \"prod\":\n mode_string = \"I'm production baby!\"\n elif request.api_mode == \"test\":\n mode_string = \"I'm in testing mode. :(\"\n else:\n mode_string = \"I have no clue what mode I'm in.\"\n \n response = \"PONG! Right back at ya. %s \" % mode_string\n response = response + \" (Timestamp Val: %d) \" % timestamp\n response = response + \"(API: %s, Version: %s, Mode: %s)\" % (request.api_name,\n request.api_version,\n request.api_mode)\n webapi.write_json(request, response)", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)/end_date(yyyy-mm-dd)<br/>\")", "def url(self):\n ...", "def entry(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entry\")", "def Home():\n return(\n f\"Hawaii Climate Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"and<br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def get_blueprint():\n return REQUEST_API", "def api_root(request, format=None):\n return Response({\n 'judges' : reverse('user-list',request=request),\n 'pilots': reverse('pilot-list', request=request),\n 'marks': reverse('mark-list', request=request),\n 'results' : reverse('results-detail', request=request),\n })", "def api_root():\n\treturn jsonify({\n\t\t\"/\": ['GET'],\n\t\t\"/status\": ['GET'],\n\t\t\"/refresh\": ['GET'],\n\t\t\"/credit\": ['GET', 'PUT', 'PATCH'],\n\t\t\"/items\": ['GET', 'POST'],\n\t\t\"/items/<string:name>\": ['GET', 'PUT', 'PATCH', 'DELETE'],\n\t\t\"/items/<string:name>/price\": ['GET'],\n\t\t\"/channels\": ['GET', 'POST'],\n\t\t\"/channels/<int:channel>\": ['GET', 'PUT', 'PATCH', 'DELETE'],\n\t\t\"/channels/<int:channel>/price\": ['GET'],\n\t\t\"/channels/<int:channel>/vend\": ['POST'],\n\t\t\"/vend\": ['POST'],\n\t})", "def home():\r\n return (\r\n \"<h1><center>WELCOME TO SURF'S UP!</center></h1><br/>\"\r\n \"<h2><center>Please plug in the browser any of the available routes:</h2></center><br/>\"\r\n \"<h3><center>/api/v1.0/precipitation</h3></center><br/>\"\r\n \"<h3><center>/api/v1.0/stations</h3></center><br/>\"\r\n \"<h3><center>/api/v1.0/tobs</h3></center><br/>\"\r\n \"<h3><center>/api/v1.0/<start></h3></center>\"\r\n \"<center>Note: Type the start date in the form of %mm-%dd</center>\"\r\n \"<h3><center>/api/v1.0/<start>/<end></h3></center>\"\r\n \"<center>Note: API request takes two parameters: Start date / End date</center>\"\r\n \"<center>Type dates in the form of %yyyy-%mm-%dd</center>\"\r\n \"<br/>\"\r\n \"<br/>\"\r\n \"<br/>\"\r\n \"<center>MJV</center>\"\r\n )", "def route(self):\n pass", "def api_root(request, format=None):\n\n return Response({\n 'entities': reverse('entity-list', request=request),\n 'budgets': reverse('budget-list', request=request),\n 'actuals': reverse('actual-list', request=request),\n })", "def entry_point(self, entry_point):\n\n self._entry_point = entry_point", "def api_root(request, format=None):\n return Response({\n 'users': reverse('user-list', request=request),\n 'tweets': reverse('tweet-list', request=request),\n })", "def entry_page():\n return redirect(url_for('index'))", "def api_root(request, format=None):\n return Response(\n {\n 'users': reverse('cloud:user-list', request=request),\n 'configuration': reverse('cloud:configuration-detail', request=request),\n 'statistics': reverse('cloud:statistics-detail', request=request),\n 'state': reverse('cloud:state-detail', request=request)\n }\n )", "def endpoint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint\")", "def endpoint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint\")", "def welcome():\r\n return (\r\n f\"Welcome to the Climate App<br/>\"\r\n f\"<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"Available Routes:<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"- JSON list of last year's precipitation data<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"- JSON list of station data<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"- JSON list of temperature observation data from the stations<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/start<br/>\"\r\n f\"- JSON list of the minimum, average and maximum temperature when given the start date only (YYYY-MM-DD), for dates greater than and equal to the start date<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/start/end<br/>\"\r\n f\"- JSON list of the minimum, average and maximum temperature when given the start and end dates (YYYY-MM-DD) for dates between the start and end date inclusive:<br/>\"\r\n f\"<br/>\"\r\n\r\n )", "def __entry_point():\n method = request.json.get('method')\n if method is None:\n return json.dumps({'error': 'No method provided.'})\n if method not in authorized_methods:\n # unauthorized query\n return json.dumps({'error': 'Unauthorized method.'})\n fct = globals().get(method)\n if fct is None:\n # unknown method, the method is authorized, but does not exists...\n return json.dumps({'error': 'Unknown method.'})\n try:\n result = fct(request.json)\n return json.dumps(result, cls=SetEncoder)\n except Exception as e:\n return json.dumps({'error': 'Something went wrong. \\n{}'.format(e)})", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix='/api')", "def endpoint(self):\r\n return self._endpoint", "def welcome():\n return(\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/etf_info<br/>\"\n f\"/api/v1.0/mutualfunds_info\"\n )", "def index():\n pass", "def endpoint(self):\n return self.Endpoint", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )", "def root():\n import renku\n\n return jsonify({\"service_version\": renku.__version__, \"spec_url\": url_for(\"apispec.openapi\")})", "def get(self, request):\n pass", "def apiurl(self):\n return self._apiurl", "def index():\n return Response(\n \"Welcome to basic-http-server, you're ready to add some methods!\\n\" +\n str(request) + \"\\n\", mimetype='text/plain'\n )", "def get(self) -> Response:\n response = {\"@context\": get_doc().entrypoint.context.generate()}\n return set_response_headers(jsonify(response))", "def add_url(self, endpoint):\n api = r'(?P<resource_name>%s)/(?P<endpoint>%s)%s$' % (\n self._meta.resource_name, endpoint, trailing_slash())\n endpoint = endpoint.replace('/', '_')\n return url(api, self.wrap_view(endpoint), name='api_%s' % endpoint)", "def index():\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION\n g.data['oar_version'] = VERSION\n g.data['links'] = []\n #endpoints = ('resources', 'jobs', 'config', 'admission_rules')\n endpoints = ('resources', 'jobs')\n for endpoint in endpoints:\n g.data['links'].append({\n 'rel': 'collection',\n 'href': url_for('%s.index' % endpoint),\n 'title': endpoint,\n })", "def get_api_path(self):\n return self._get_api_base() + '/' + self._get_resource_suffix()", "def cci_api():\n\n\n\t\t\treturn render_template( 'api.html' )", "def entry_member(request,member_id):\n return EntryView.__index(request,member_id)", "def index():\n webapp_config = {\n 'cloudmadeApiKey': config.cloudmade_api_key,\n 'peerserverApiKey': config.peerserver_api_key,\n }\n return render_template('index.html', config=webapp_config)", "def getEndpoint(self):\n port = \"\"\n endpoint = \"\"\n keyConfig = self.getKeyConfig()\n\n if \"port\" in keyConfig:\n port = \":\" + keyConfig[\"port\"]\n elif self._data[\"port\"] != self.PORT:\n port = \":\" + self._data[\"port\"]\n\n if \"endpoint\" in keyConfig:\n endpoint = keyConfig[\"endpoint\"]\n else:\n endpoint = self._data[\"endpoint\"]\n\n return \"https://%s%s/%s/\" % (endpoint, port, self._data[\"api_version\"])", "def welcome(): \n return (\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/<start>\"\n )", "def view_article():\n\n return render_template('article.html', api_endpoint = Parameters()[\"api.endpoint\"])", "def index():\n return make_json_response(ENDPOINT_LIST)", "def api_url(self):\n return self._api_url", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/precip<br/>\"\n\t\tf\"/api/stations<br/>\"\n\t\tf\"/api/tobs<br/>\"\n\t\tf\"/api/<start><br/>\"\n f\"/api/<start>/<end>\"\n )", "def index():\n\n if request.method == 'POST':\n app.logger.info('Event: ' + str(request.get_json()))\n process_event(request.get_json())\n return ''\n else:\n return render_template('index.html')", "def single_entry(cls, entryid):\n data = \"invalid URL,Try again\"\n response = jsonify({\"data\": data})\n response.status_code = 404\n for info in Diary.entries:\n if info['entry_id'] == entryid:\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def welcome():\n return (\n f\"Welcome to Hawaii Climate Analysis API :-)<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n )", "def welcome():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"- List of prior year rain totals from all stations<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"- List of Station numbers and names<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"- List of prior year temperatures from all stations<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"- When given the start date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for all dates greater than and equal to the start date<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/start/end<br/>\"\r\n f\"- When given the start and the end date (YYYY-MM-DD), calculate the MIN/AVG/MAX temperature for dates between the start and end date inclusive<br/>\"\r\n )", "def welcome():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/<start>start_date<br/>\"\r\n f\"/api/v1.0/<start>start_date/<end>end_date\"\r\n )", "def _construct_endpoints(self):\n # Functions\n async def get_function_list_data(request: web.Request):\n entrypoints = [elm.to_dict() for elm in self._function_manager.definitions.values()]\n return web.json_response(entrypoints)\n\n async def get_function_list_text(request: web.Request):\n rows = []\n for definition in self._function_manager.definitions.values():\n rows.append(definition.function_name)\n rows.append(' URL:')\n rows.append(f' async api: /{definition.function_name}')\n rows.append(f' block api: /{definition.function_name}/keep-connection')\n rows.append(f' Max Concurrency: {definition.max_concurrency}')\n rows.append(' Description:')\n rows.append(f' {definition.description}')\n if len(definition.arg_definitions) == 0:\n rows.append(' No Args')\n else:\n rows.append(' Args')\n for arg in definition.arg_definitions:\n rows.append(f' {arg.name} {arg.type.name} {\"Requiered\" if arg.is_required else \"NOT-Required\"}')\n if arg.description != '':\n rows.append(f' {arg.description}')\n rows.append(f' Timeout: {definition.timeout} sec')\n rows.append('\\n')\n\n return web.Response(text='\\n'.join(rows))\n\n # function\n async def get_function_definition(request: web.Request):\n function_name = request.match_info['function_name']\n\n if function_name not in self._function_manager.definitions:\n raise web.HTTPNotFound()\n\n return web.json_response(self._function_manager.definitions[function_name].to_dict())\n\n async def get_function_running_count(request: web.Request):\n function_name = request.match_info['function_name']\n\n ret = self._function_manager.get_current_number_of_execution(function_name)\n if ret is None:\n raise web.HTTPNotFound()\n\n return web.json_response(ret)\n\n # Tasks\n async def get_task_info(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.to_dict())\n\n async def get_task_done(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.is_done())\n\n async def get_task_result(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n return web.json_response(task_info.result)\n\n async def get_task_list(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n tasks = self._function_manager.list_task_info(function_name)\n if tasks is None:\n raise web.HTTPNotFound()\n\n return web.json_response([elm.to_dict() for elm in tasks])\n\n # Termination\n async def post_terminate_function(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n self._function_manager.terminate_function(function_name)\n return web.json_response({})\n\n async def post_terminate_task(request: web.Request, task_id: str):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n self._function_manager.terminate_task(task_id)\n\n return web.json_response({})\n\n api_list = [\n web.get('/function/list/data', get_function_list_data),\n web.get('/function/list/text', get_function_list_text),\n web.get(r'/function/definition/{function_name}', get_function_definition),\n web.get(r'/function/running-count/{function_name}', get_function_running_count),\n web.get(r'/task/info/{task_id}', get_task_info),\n web.get(r'/task/done/{task_id}', get_task_done),\n web.get(r'/task/result/{task_id}', get_task_result),\n web.get(r'/task/list/{function_name}', get_task_list),\n web.post(r'/terminate/function/{function_name}', post_terminate_function),\n web.post(r'/terminate/task/{task_id}', post_terminate_task),\n ]\n\n async def index(request: web.Request):\n return web.Response(text='\\n'.join([elm.path for elm in api_list])+'\\n')\n\n self._app.add_routes([*api_list, web.get('/', index)])", "def read_home():\n return {'message': 'API live!'}", "def welcome():\n return(\n f\"Available Routes: <br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)/end_date(yyyy-mm-dd)\")", "def entry():\n return render_template(\n 'entry.html',\n title='Welcome to search4letters on the web!',\n year=datetime.now().year\n )" ]
[ "0.6402737", "0.6304572", "0.6296278", "0.6259337", "0.61859983", "0.6181998", "0.6150856", "0.6121735", "0.6036392", "0.6036392", "0.6036392", "0.5975089", "0.59478664", "0.5932474", "0.5926881", "0.59074867", "0.5892754", "0.5886604", "0.5877651", "0.58573645", "0.5808704", "0.5797145", "0.57618374", "0.57617617", "0.57513964", "0.5723966", "0.5717822", "0.570656", "0.5706273", "0.57036453", "0.5697159", "0.5694004", "0.5685675", "0.567974", "0.56757116", "0.56744003", "0.56726813", "0.56726813", "0.5650663", "0.56382", "0.56297386", "0.5627946", "0.5626731", "0.56250185", "0.5624714", "0.5621993", "0.5620833", "0.5604364", "0.5596099", "0.5578093", "0.55773073", "0.5566393", "0.5557783", "0.55569637", "0.5554401", "0.555232", "0.55503404", "0.5542547", "0.55386955", "0.5523348", "0.55220616", "0.5520781", "0.55192375", "0.55133855", "0.5504205", "0.5503932", "0.5503932", "0.5497126", "0.54934007", "0.54907143", "0.54813486", "0.5476725", "0.5475461", "0.54725945", "0.5471003", "0.5458981", "0.5453881", "0.54536206", "0.5445381", "0.5443791", "0.5429654", "0.54268795", "0.5425147", "0.54250145", "0.54242074", "0.5417072", "0.54132384", "0.5406274", "0.54032797", "0.5388766", "0.5386367", "0.5383656", "0.5382423", "0.5380576", "0.53799915", "0.53769433", "0.5376864", "0.53745127", "0.5373475", "0.5369078", "0.53675514" ]
0.0
-1
List all pools, or create a new pool.
def pool_list(request, format=None): if request.method == 'GET': pools = storage.models.Pool.objects.all() serializer = serializers.PoolSerializer(pools) return Response(serializer.data) elif request.method == 'POST': serializer = serializers.PoolSerializer(data=request.DATA) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pool_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n ret[pool_record[\"name_label\"]] = pool_record\n return ret", "def list_pools(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('pools', self.pools_path, retrieve_all,\r\n **_params)", "def create_pool(self, body=None):\r\n return self.post(self.pools_path, body=body)", "def pool_list(mnode):\n cmd = \"gluster pool list\"\n return g.run(mnode, cmd)", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def get_pools():\n poolinfostr = fork_and_get_output(\"zpool list -H -o all\".split())\n header = get_zpool_header()\n poolinfo = poolinfostr.splitlines()\n poolobjs = []\n for poolstr in poolinfo:\n poolobjs.append(DataZFS(poolstr, header, 'pool'))\n return poolobjs", "def test_list_pools_sort(self):\r\n resources = \"pools\"\r\n cmd = pool.ListPool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def _list_pool_vm(args):\n _logger.debug('_list_pool_vm')\n #\n #\n _data_struct = {'name': {'head': 'Name', 'func': 'name', 'type': 'str'},\n 'uuid': {'head': 'UUID', 'func': 'UUIDString', 'type': 'str'},\n 'autostart': {'head': 'Autostart', 'func': 'autostart', 'type': 'yesno', 'convert': get_yesno},\n 'active': {'head': 'Active', 'func': 'isActive', 'type': 'yesno', 'convert': get_yesno},\n 'persistent': {'head': 'Persistent', 'func': 'isPersistent', 'type': 'yesno', 'convert': get_yesno},\n 'volumes': {'head': 'Volumes', 'func': 'numOfVolumes', 'type': 'int'},\n 'state': {'head': 'State', 'func': 'info', 'type': 'list', 'index': 0, 'convert': get_pool_state},\n 'capacity': {'head': 'Capacity', 'func': 'info', 'type': 'list', 'index': 1, 'convert': format_size},\n 'allocation': {'head': 'Allocation', 'func': 'info', 'type': 'list', 'index': 2, 'convert': format_size},\n 'available': {'head': 'Available', 'func': 'info', 'type': 'list', 'index': 3, 'convert': format_size},\n 'type': {'head': 'Type', 'func': None, 'type': 'str'}\n }\n #\n # get the pools\n _sps_fs, _sps_netfs = _get_pools()\n _sps = _sps_fs + _sps_netfs\n if len(_sps) == 0:\n _logger.info('No pools found.')\n return\n #\n # initialise the column widths\n _data_struct = initalise_column_lengths(_data_struct)\n #\n # column cantains only 'fs' or 'net fs'\n _data_struct['type']['len'] = 6\n #\n # format data and determine optimal length of fields.\n pool_data = list()\n for _sp in _sps:\n _sp_data = dict()\n for key, value in _data_struct.items():\n value_data = get_value_data(_sp, _data_struct[key])\n _sp_data[key] = value_data[0]\n val_length = value_data[1]\n _data_struct[key]['collen'] = max(val_length, _data_struct[key]['collen'])\n _sp_data['type'] = 'fs' if _sp in _sps_fs else 'net fs'\n pool_data.append(_sp_data)\n #\n # compose data\n _title = 'VM pool Information:'\n _columns = list()\n for key, value in _data_struct.items():\n _columns.append([value['head'], value['collen']+2, key])\n #\n printerKlass = get_row_printer_impl(args.output_mode)\n printer = printerKlass(title=_title, columns=_columns)\n printer.printHeader()\n #\n # print\n for _sp in pool_data:\n printer.rowBreak()\n printer.printRow(_sp)\n printer.printFooter()\n printer.finish()\n return", "def pools(self, summary=True, tags_intersect=None, tags=None):\n return list(self.all_pools(summary=summary, tags=tags, tags_intersect=tags_intersect))", "def list_pools(self):\n search_opts = {'router:external': True}\n return [FloatingIpPool(pool) for pool\n in self.client.list_networks(**search_opts).get('networks')]", "def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs", "def get_pools():\n pools = ch_core.hookenv.action_get('pools')\n if pools:\n return [p.strip() for p in pools.split(',')]\n return None", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)", "def _get_objects(cls, lb, names, minimal=False):\n\n if not names:\n return []\n\n pools = cls.factory.create(names, lb)\n\n if not minimal:\n active_member_count = cls._lbcall(lb, 'get_active_member_count',\n names)\n description = cls._lbcall(lb, 'get_description', names)\n lbmethod = cls._lbcall(lb, 'get_lb_method', names)\n members = cls._lbcall(lb, 'get_member', names)\n minimum_active_member = cls._lbcall(lb, 'get_minimum_active_member',\n names)\n minimum_up_member = cls._lbcall(lb, 'get_minimum_up_member',\n names)\n slow_ramp_time = cls._lbcall(lb, 'get_slow_ramp_time', names)\n statistics = cls._lbcall(lb, 'get_statistics', names)\n\n for idx,pool in enumerate(pools):\n pool._active_member_count = active_member_count[idx]\n pool._description = description[idx]\n pool._lbmethod = lbmethod[idx]\n pool._minimum_active_member = minimum_active_member[idx]\n pool._minimum_up_member = minimum_up_member[idx]\n pool._slow_ramp_time = slow_ramp_time[idx]\n pool._statistics = statistics['statistics'][idx]\n\n pool._members = f5.PoolMember._get_objects(lb, [pool],\n [members[idx]], minimal=True)\n\n return pools", "def pools_refresh(self):\n path = \"%s/commands/poolsRefresh\" % self.__base_path\n response = self.__session.post(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def test_create_pool_with_all_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n provider = 'lbaas'\r\n args = ['--admin-state-down',\r\n '--description', description,\r\n '--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n '--provider', provider]\r\n position_names = ['admin_state_up', 'description', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id', 'provider']\r\n position_values = [False, description, lb_method, name,\r\n protocol, subnet_id, tenant_id, provider]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)", "def pool_create(self, pool_name):\n self.core.api.os.shell.cmd('{0} add apppool /name:\"{1}\"'.format(\n self.APP_CMD, pool_name\n ))", "def get_or_create_connection_pool(self, params):\r\n key = frozenset((k, repr(v)) for (k, v) in params.items())\r\n if key not in self._pools:\r\n self._pools[key] = self.get_connection_pool(params)\r\n return self._pools[key]", "def all_pools(self, summary: bool = True, tags: List[str] = None, tags_intersect: List[str] = None):\n return self._all_pages(self.pools_page, summary=summary, tags=tags, tags_intersect=tags_intersect)", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def storage_pools_create(context, storage_pools):\n session = get_session()\n storage_pool_refs = []\n with session.begin():\n\n for storage_pool in storage_pools:\n LOG.debug('adding new storage_pool for native_storage_pool_id {0}:'\n .format(storage_pool.get('native_storage_pool_id')))\n if not storage_pool.get('id'):\n storage_pool['id'] = uuidutils.generate_uuid()\n\n storage_pool_ref = models.StoragePool()\n storage_pool_ref.update(storage_pool)\n storage_pool_refs.append(storage_pool_ref)\n\n session.add_all(storage_pool_refs)\n\n return storage_pool_refs", "def pool_create_from_dict(self, parameters: dict):\n pool_name = parameters[KnownParameters.SITE_NAME.value]\n parameters[KnownParameters.POOL_NAME.value] = pool_name\n for pool in self.get_app_pool_list():\n if pool.name.lower() == pool_name.lower():\n return\n return self.pool_create(pool_name)", "def make_pool(self) -> pool.SimpleConnectionPool:\n\n return pool.SimpleConnectionPool(\n minconn=1, maxconn=self.pool_size, **self._kwargs\n )", "def update_minion_pool():\n pool = fetch_minion_pool()\n save_minion_pool(pool)\n return pool", "def list_resources(px, pools):\n result = []\n for pool in pools:\n for i in px.pools.get(pool)[\"members\"]:\n result.append(\n {\n \"pool\": pool,\n \"vmid\": i[\"vmid\"],\n \"name\": i[\"name\"],\n \"status\": i[\"status\"],\n \"type\": i[\"type\"],\n }\n )\n return result, {\n \"pool\": \"pool(s)\",\n \"vmid\": \"vmid\",\n \"name\": \"name\",\n \"status\": \"status\",\n \"type\": \"type\",\n }", "def get_pools():\n gclass = get_class(\"dhcpPool\")\n if gclass is None:\n logger.error(\"failed to get dhcpPool\")\n return None\n\n pools = {}\n for obj in gclass:\n if \"attributes\" in obj[obj.keys()[0]]:\n attr = obj[obj.keys()[0]][\"attributes\"]\n for r in [\"className\", \"dn\", \"id\", \"type\", \"startIp\", \n \"endIp\", \"freeIPs\"]:\n if r not in attr:\n logger.error(\"missing %s, invalid object: %s\" % (\n r, pretty_print(obj)))\n return None\n ip = ipv4_to_int(attr[\"startIp\"])\n if ip is None:\n logger.error(\"failed to convert ipv4 address for %s\" % obj)\n return None\n p = {\n \"className\": attr[\"className\"],\n \"dn\": attr[\"dn\"],\n \"id\": attr[\"id\"],\n \"type\": attr[\"type\"],\n \"address\": ip,\n \"address_str\": attr[\"startIp\"],\n \"freeIPs\": attr[\"freeIPs\"]\n }\n if ip not in pools:\n pools[ip] = {\"bad_lease\":[], \"good_lease\":[], \"pools\":[],\n \"type\":attr[\"className\"], \"state\":\"\", \"address\":ip}\n pools[ip][\"pools\"].append(p)\n\n # loop through all entries in pool and update state\n for ip in pools:\n state = \"recovery\"\n for p in pools[ip][\"pools\"]:\n if p[\"type\"]!=\"recovery\": state = p[\"type\"]\n pools[ip][\"state\"] = state\n return pools", "def get_pool():\n app = get_app()\n return app['pool']", "def create_pool(self, cnx_settings):\n connections_settings = self._get_connections_settings(cnx_settings)\n\n # Subscribe client if it does not exists\n if cnx_settings.get(\"client_id\", \"No id\") not in self.__pools:\n self.__pools[cnx_settings.get(\"client_id\", \"No id\")] = []\n\n # Create a pool for each router\n for router_name, settings in connections_settings:\n if self._pool_exists(cnx_settings.get(\"client_id\", \"No id\"), router_name):\n continue\n pool = self.__pools.get(cnx_settings.get(\"client_id\", \"No id\"), [])\n pool.append(ConnectionPool(router_name, **settings))", "def newPool(name: str, superPool, types: [], cls):\n try:\n if name == \"colorholder\":\n superPool = P0(len(types), cls)\n return superPool\n elif name == \"abstractnode\":\n superPool = P1(len(types), cls)\n return superPool\n elif name == \"node\":\n superPool = P2(len(types), superPool, cls)\n return superPool\n \n elif name == \"subnode\":\n superPool = P3(len(types), superPool, cls)\n return superPool\n \n else:\n if superPool is None:\n superPool = BasePool(len(types), name, StoragePool.noKnownFields, StoragePool.noAutoFields, cls)\n else:\n superPool = superPool.makeSubPool(len(types), name, cls)\n return superPool\n finally:\n types.append(superPool)", "def get_app_pool_list(self) -> list:\n ah_write = self.get_iis_object()\n section = ah_write.GetAdminSection(\"system.applicationHost/applicationPools\", \"MACHINE/WEBROOT/APPHOST\")\n collection = section.Collection\n result = list()\n\n for i in range(collection.Count):\n site = collection[i]\n prop = site.Properties\n\n name = prop[\"name\"].Value\n managed_runtime_version = prop[\"managedRuntimeVersion\"].Value\n if prop[\"managedPipelineMode\"].Value:\n managed_pipeline_mode = \"classic\"\n else:\n managed_pipeline_mode = \"integrated\"\n if prop[\"enable32BitAppOnWin64\"].Value:\n bitness = 32\n else:\n bitness = 64\n\n result.append(AppPool(name, managed_runtime_version, managed_pipeline_mode, bitness))\n\n return result", "def add_pool(ctx, pool_name, global_ip_range, global_port_range):\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n # Verify the ip address range and format\n ip_address = global_ip_range.split(\"-\")\n if len(ip_address) > 2:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n elif len(ip_address) == 2:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n\n if is_valid_ipv4_address(ip_address[1]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[1]))\n\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))\n if ipLowLimit >= ipHighLimit:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n else:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))\n\n # Verify the port address range and format\n if global_port_range is not None: \n port_address = global_port_range.split(\"-\")\n\n if len(port_address) > 2:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n elif len(port_address) == 2:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n\n if is_valid_port_address(port_address[1]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[1]))\n\n portLowLimit = int(port_address[0])\n portHighLimit = int(port_address[1])\n if portLowLimit >= portHighLimit:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n else:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n else:\n global_port_range = \"NULL\"\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n dataKey1 = 'nat_ip'\n dataKey2 = 'nat_port'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range:\n click.echo(\"Trying to add pool, which is already present.\")\n entryFound = True\n\n pool_dict = config_db.get_table(table) \n if len(pool_dict) == 16:\n click.echo(\"Failed to add pool, as already reached maximum pool limit 16.\")\n entryFound = True\n\n # Verify the Ip address is overlapping with any Static NAT entry\n if entryFound == False:\n static_dict = config_db.get_table('STATIC_NAT')\n if static_dict:\n for staticKey, staticValues in static_dict.items():\n global_ip = \"---\"\n local_ip = \"---\"\n nat_type = \"dnat\"\n\n if isinstance(staticKey, str) is True:\n global_ip = staticKey\n else:\n continue\n\n local_ip = staticValues[\"local_ip\"]\n\n if \"nat_type\" in staticValues:\n nat_type = staticValues[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n ipAddress = int(ipaddress.IPv4Address(global_ip))\n if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):\n ctx.fail(\"Given Ip address entry is overlapping with existing Static NAT entry !!\")\n\n if entryFound == False:\n config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range})", "def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool", "def node_pools(self) -> Sequence['outputs.NodePoolResponse']:\n return pulumi.get(self, \"node_pools\")", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def _create_volume_pool(self, pool_name):\n osd_map = self._rados_command('osd dump', {})\n\n existing_id = self._get_pool_id(osd_map, pool_name)\n if existing_id is not None:\n log.info(\"Pool {0} already exists\".format(pool_name))\n return existing_id\n\n osd_count = len(osd_map['osds'])\n\n # We can't query the actual cluster config remotely, but since this is\n # just a heuristic we'll assume that the ceph.conf we have locally reflects\n # that in use in the rest of the cluster.\n pg_warn_max_per_osd = int(self.rados.conf_get('mon_max_pg_per_osd'))\n\n other_pgs = 0\n for pool in osd_map['pools']:\n if not pool['pool_name'].startswith(self.POOL_PREFIX):\n other_pgs += pool['pg_num']\n\n # A basic heuristic for picking pg_num: work out the max number of\n # PGs we can have without tripping a warning, then subtract the number\n # of PGs already created by non-manila pools, then divide by ten. That'll\n # give you a reasonable result on a system where you have \"a few\" manila\n # shares.\n pg_num = ((pg_warn_max_per_osd * osd_count) - other_pgs) // 10\n # TODO Alternatively, respect an override set by the user.\n\n self._rados_command(\n 'osd pool create',\n {\n 'pool': pool_name,\n 'pg_num': int(pg_num),\n }\n )\n\n osd_map = self._rados_command('osd dump', {})\n pool_id = self._get_pool_id(osd_map, pool_name)\n\n if pool_id is None:\n # If the pool isn't there, that's either a ceph bug, or it's some outside influence\n # removing it right after we created it.\n log.error(\"OSD map doesn't contain expected pool '{0}':\\n{1}\".format(\n pool_name, json.dumps(osd_map, indent=2)\n ))\n raise RuntimeError(\"Pool '{0}' not present in map after creation\".format(pool_name))\n else:\n return pool_id", "def _create_graph(self, pools: List[Pool]):\n for pool in pools:\n self._add_nodes(pool.tokens)\n\n for pool in pools: # noqa: WPS440,WPS441\n self._add_edges(pool) # noqa: WPS441", "def get_new_pool ( self, force=False, with_deptype=DEFAULT_DEPTYPE ):\n if force or not self._poolstack or not self._poolstack[-1].empty():\n self._add_pool (\n roverlay.depres.simpledeprule.pool.SimpleDependencyRulePool (\n \"pool\" + str ( self._pool_id ),\n deptype_mask=with_deptype\n )\n )\n # -- end if force or ...\n return self._poolstack[-1]", "def create_pool(self, context, pool):\n LOG.info(\"Received request 'Create Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool\n }\n # REVISIT(jiahao) M:N pool is not yet implemented.\n self._send_event(lb_const.EVENT_CREATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])", "def list_device_pools(arn=None, type=None, nextToken=None):\n pass", "def create_pool(self, service, bigips):\n pool = self.service_adapter.get_pool(service)\n error = None\n\n for bigip in bigips:\n try:\n self.pool_helper.create(bigip, pool)\n except HTTPError as err:\n if err.response.status_code == 409:\n LOG.debug(\"Pool already exists...updating\")\n try:\n self.pool_helper.update(bigip, pool)\n except Exception as err:\n error = f5_ex.PoolUpdateException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n else:\n error = f5_ex.PoolCreationException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n except Exception as err:\n error = f5_ex.PoolCreationException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n\n return error", "def get_pool(b_dummy=True, num=4):\n if b_dummy:\n pool = ThreadPool(num)\n else:\n pool = ProcessPool(num)\n\n return pool", "async def create_pool(address, *, db=None, password=None, ssl=None,\n encoding=None, minsize=1, maxsize=10,\n parser=None, loop=None, create_connection_timeout=None,\n pool_cls=None, connection_cls=None):\n # FIXME: rewrite docstring\n if pool_cls:\n assert issubclass(pool_cls, AbcPool),\\\n \"pool_class does not meet the AbcPool contract\"\n cls = pool_cls\n else:\n cls = ConnectionsPool\n if isinstance(address, str):\n address, options = parse_url(address)\n db = options.setdefault('db', db)\n password = options.setdefault('password', password)\n encoding = options.setdefault('encoding', encoding)\n create_connection_timeout = options.setdefault(\n 'timeout', create_connection_timeout)\n if 'ssl' in options:\n assert options['ssl'] or (not options['ssl'] and not ssl), (\n \"Conflicting ssl options are set\", options['ssl'], ssl)\n ssl = ssl or options['ssl']\n # TODO: minsize/maxsize\n\n pool = cls(address, db, password, encoding,\n minsize=minsize, maxsize=maxsize,\n ssl=ssl, parser=parser,\n create_connection_timeout=create_connection_timeout,\n connection_cls=connection_cls,\n loop=loop)\n try:\n await pool._fill_free(override_min=False)\n except Exception:\n pool.close()\n await pool.wait_closed()\n raise\n return pool", "def _create_pool_vm(args):\n # check storage pool name unicity\n conn = libvirt.open(None)\n _sps = list()\n if conn:\n _sps = [sp for sp in conn.listAllStoragePools() if sp.name() == args.name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n\n if len(_sps) != 0:\n print(\"Storage pool with name [%s] already exists\" % args.name, file=sys.stderr)\n return 1\n\n if args.disk and args.netfshost:\n print(\"--disk and --host option are exclusive\", file=sys.stderr)\n return 1\n\n if not args.disk and not args.netfshost:\n print(\"Either --disk or --host must be specified.\", file=sys.stderr)\n return 1\n\n if args.netfshost and not args.path:\n print(\"Must specify the remote resource path with the --path option\", file=sys.stderr)\n return 1\n\n _pool_name = args.name\n if args.disk:\n return oci_utils.kvm.virt.create_fs_pool(args.disk, _pool_name)\n if args.netfshost:\n return oci_utils.kvm.virt.create_netfs_pool(args.netfshost, args.path, _pool_name)", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def add_to_pool(self):\n if self.check_pool():\n for func in self.getter._func:\n proxies = self.getter.get_proxies(func)\n for proxy in proxies:\n self.conn.push_to_right(proxy)\n else:\n print('Pool reached max capacity')", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()", "def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )", "async def get_pools(self) -> List[CachingPool]:\n return await self._pool_fetcher.get_pools()", "def restart_pool(self) -> pool.SimpleConnectionPool:\n\n self.close_all()\n return self.make_pool()", "def _get_pool(self, *args, **kwargs):\n\n pool_name = '_pool_%s' % getattr(self, 'alias', 'common')\n\n if not hasattr (self.__class__, pool_name):\n lock = thread.allocate_lock()\n lock.acquire()\n\n try:\n pool = cx_Oracle.SessionPool(\n user=self.user,\n password=self.password,\n dsn=self.tns,\n min=CX_POOL_SESSION_MIN,\n max=CX_POOL_SESSION_MAX,\n increment=CX_POOL_SESSION_INCREMENT,\n connectiontype=cx_Oracle.Connection,\n threaded=CX_POOL_THREADED,\n getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT,\n homogeneous=True)\n except Exception as err:\n pool = None\n\n if pool:\n pool.timeout = CX_POOL_CONNECT_TIMEOUT\n setattr(self.__class__, pool_name, pool)\n else:\n msg = \"\"\" ### Database login failed or database not found ### \"\"\"\n raise self.Database_Error, ('%s') %(msg)\n\n lock.release()\n\n return getattr(self.__class__, pool_name)", "def _get_pools(self, settings):\n available_pools = []\n pool_names = []\n connections_settings = self._get_connections_settings(settings)\n\n # Generate the names of the pools this settings can connect to\n for router_name, _ in connections_settings:\n pool_names.append(router_name)\n\n # Generate the names of the pools this settings can connect to\n for pool in self.__pools.get(settings.get(\"client_id\", \"No id\"), []):\n if pool.name in pool_names:\n available_pools.append(pool)\n return available_pools", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def pools(self) -> List[CachingPool]:\n return self._pool_fetcher.pools", "def test_create_pool_with_mandatory_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id]\r\n position_names = ['admin_state_up', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id']\r\n position_values = [True, lb_method, name,\r\n protocol, subnet_id, tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)", "async def build_pool(self):\n pool = UnitTypeId.SPAWNINGPOOL # to save line breaks\n if not self.structures(pool).ready and not self.already_pending(pool):\n await self.build(pool, self.start_location.towards(self.game_info.map_center, distance=5))", "def gen_floating_ip_pool(vnet):\n pool = FloatingIpPool(name='default',\n parent_obj=vnet,\n )\n return pool", "def _get_random_pool(pool_list):\n if not pool_list:\n return None\n if len(pool_list) == 1:\n return pool_list[0]\n\n last = len(pool_list) - 1\n index = random.randint(0, last)\n return pool_list[index]", "def add_pool(name, **kwargs):\n _CONNECTIONS[name] = redis.StrictRedis(**kwargs)", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool", "def cmd_list_resources(config=DEFAULT_LINUX_PATH):\n config = load_config_file(expand_config_path(config))\n px = connection_proxmox(config[\"proxmox\"])\n try:\n if config[\"pools\"]:\n l, h = list_resources(px, config[\"pools\"])\n return tabulate(l, h)\n else:\n print(\"Dick 'pools' is empty\")\n except KeyError:\n print(\"Missing 'pools' dict in config file\")\n sys.exit(1)", "def fusion_api_allocate_pool(self, body, uri, api=None, headers=None):\n return self.idpool.allocate(body, uri, api, headers)", "def _init_pool(self, cfg: dict):\n pool = PyMysqlPoolBase(**cfg)\n return pool", "def get_default_pool():\n return 'tank'", "def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):\n return self.pool.get(uri=uri, api=api, headers=headers, param=param)", "def _repopulate_pool(self):\n for i in range(self._processes - len(self._pool)):\n w = self.Process(target=worker,\n args=(self._inqueue, self._outqueue,\n self._initializer,\n self._initargs, self._maxtasksperchild,\n self._wrap_exception,\n self._finalizer,\n self._finalargs)\n )\n self._pool.append(w)\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n util.debug('added worker')", "def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)", "def get_connection_pool(self, params):\r\n cp_params = dict(params)\r\n cp_params.update(self.pool_cls_kwargs)\r\n return self.pool_cls(**cp_params)", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.find_pool(pool_id)\n pool = _get_sdk_object_dict(pool)\n\n if request.GET.get('includeChildResources'):\n resources = {}\n resources['pool'] = pool\n\n if pool.get('members'):\n member_list = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n resources['members'] = member_list\n\n if pool.get('health_monitor_id'):\n monitor_id = pool['health_monitor_id']\n monitor = conn.load_balancer.find_health_monitor(\n monitor_id)\n monitor = _get_sdk_object_dict(monitor)\n resources['monitor'] = monitor\n\n return resources\n else:\n return pool", "def refresh_pools(args):\n if not reactive.is_flag_set('leadership.is_leader'):\n ch_core.hookenv.action_fail('run action on the leader unit')\n return\n\n # set and flush flag to disk\n reactive.set_flag('refresh.pools')\n ch_core.unitdata._KV.flush()\n\n # run reactive handlers to deal with flag\n return reactive.main()", "def pool(self) -> asyncpg.pool.Pool:\n return self.bot.pool", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def pool(self):\n return self._properties.get('pool')", "def pool(self) -> NodePool:\n\n return self._pool", "def get_pool_list(mnode):\n ret, out, _ = g.run(mnode, \"gluster pool list --xml\", log_level='DEBUG')\n if ret != 0:\n g.log.error(\"Failed to execute 'pool list' on node %s. \"\n \"Hence failed to parse the pool list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster pool list xml output.\")\n return None\n\n pool_list_list = []\n for peer in root.findall(\"peerStatus/peer\"):\n peer_dict = {}\n for element in peer.getchildren():\n if element.tag == \"hostname\" and element.text == 'localhost':\n element.text = mnode\n if element.tag == \"hostnames\":\n hostnames_list = []\n for hostname in element.getchildren():\n hostnames_list.append(hostname.text)\n element.text = hostnames_list\n peer_dict[element.tag] = element.text\n\n pool_list_list.append(peer_dict)\n return pool_list_list", "def test_list_pools(mocker, api: API, account: Account, raw_pools):\n api.candlepin.get_pools.return_value = raw_pools\n\n apply_mapping_spy = mocker.patch(\n \"ethel.account.apply_mapping\", side_effect=apply_mapping\n )\n pools = account.list_pools()\n assert len(pools) == len(raw_pools)\n\n expected_call_count = len(account.POOL_ATTRIBUTES_MAPPING) * len(raw_pools)\n\n assert apply_mapping_spy.call_count == expected_call_count", "def resync_pools(args):\n if not ch_core.hookenv.action_get('i-really-mean-it'):\n ch_core.hookenv.action_fail('Required parameter not set')\n return\n with charms_openstack.charm.provide_charm_instance() as charm:\n ceph_local = reactive.endpoint_from_name('ceph-local')\n pools = get_pools()\n if not pools:\n pools = charm.eligible_pools(ceph_local.pools)\n result = collections.defaultdict(dict)\n for pool in pools:\n # list images in pool\n output = subprocess.check_output(\n ['rbd', '--id', charm.ceph_id, '--format', 'json',\n '-p', pool, 'ls'], universal_newlines=True)\n images = json.loads(output)\n for image in images:\n output = subprocess.check_output(\n ['rbd', '--id', charm.ceph_id, '--format', 'json', 'info',\n '{}/{}'.format(pool, image)], universal_newlines=True)\n image_info = json.loads(output)\n if image_info['mirroring']['state'] == 'disabled':\n continue\n output = subprocess.check_output(\n ['rbd', '--id', charm.ceph_id, 'mirror', 'image', 'resync',\n '{}/{}'.format(pool, image)], universal_newlines=True)\n result[pool][image] = output.rstrip()\n output_str = ''\n for pool in result:\n for image in result[pool]:\n if output_str:\n output_str += '\\n'\n output_str += '{}/{}: {}'.format(pool, image,\n result[pool][image])\n ch_core.hookenv.action_set({'output': output_str})", "def listRewardPools(self):\n return self.get_json('/rewardPool')", "def get_pools():\n command = 'zpool list -H'\n try:\n p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)\n except OSError:\n raise Exception('No ZFS tools found!')\n zpout, zperr = p.communicate()\n if p.returncode:\n raise Exception(\"Error executing '%s': %d\" % (command, p.returncode))\n return [line.split('\\t', 1)[0] for line in zpout.split('\\n') if line]", "def addpool(miner: Miner, pool):\n api = MinerApi(host=miner.ipaddress, port=int(miner.port))\n jaddpool = api.addpool(\"{0},{1},{2}\".format(pool.url, pool.user, \"x\"))\n return jaddpool[\"STATUS\"][0][\"Msg\"]", "def target_pools(self) -> pulumi.Output[Optional[List[str]]]:\n return pulumi.get(self, \"target_pools\")", "def _Pool(self, name, stride, first_n=None):\n p = self.params\n return p.funnel_pool_tpl.Copy().Set(\n stride=stride,\n first_n=first_n,\n name=name)", "def checkout_all(urls=settings.URLS_TO_FETCH, pool=None):\n\n if pool:\n print \"using pool\"\n r = pool.map(_checkout, urls)\n else:\n r = [_checkout(u) for u in urls]\n\n return r", "def get_pool_ids(host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n pool_ids = []\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1, pool_ids\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n pool_ids.append(p[\"id\"])\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1, pool_ids\n return 0, pool_ids", "def create_ldap_server_pool_obj(self,\n ldap_servers: typing.List[str] = None) -> ldap3.ServerPool:\n server_pool = ldap3.ServerPool(\n ldap_servers,\n pool_strategy=self.server_pool_strategy.upper(),\n active=self.server_pool_active,\n exhaust=self.server_pool_exhaust\n )\n return server_pool", "def pool(self) -> Pool:\n assert self._pool is not None\n return self._pool", "def ip_address_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressPoolArgs']]]]:\n return pulumi.get(self, \"ip_address_pools\")", "def remove_pools(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n pool_table_name = 'NAT_POOL'\n binding_table_name = 'NAT_BINDINGS'\n binding_dict = config_db.get_table(binding_table_name)\n pool_dict = config_db.get_table(pool_table_name)\n if pool_dict:\n for pool_key_name in pool_dict:\n entryFound = False\n for binding_name, binding_values in binding_dict.items():\n if binding_values['nat_pool'] == pool_key_name:\n click.echo(\"Pool {} is not removed, as it is mapped to Binding {}, remove the pool binding first !!\".format(pool_key_name,binding_name))\n entryFound = True\n break\n\n if entryFound == False: \n config_db.set_entry(pool_table_name, pool_key_name, None)", "def l7pool_add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n pool_main = {\n 'name': args.get('name'),\n 'loadBalancingAlgorithm': args.get('method'),\n 'protocol': args.get('protocol')\n }\n\n pool_members = list(args.get('server'))\n\n pool_health = {\n 'interval': args.get('healthinterval'),\n 'timeout': args.get('healthtimeout'),\n 'maxRetries': args.get('healthretry'),\n 'urlPath': args.get('healthpath')\n }\n\n pool_sticky = {\n 'type': args.get('sticky')\n }\n\n try:\n mgr.add_lb_l7_pool(uuid, pool_main, pool_members, pool_health, pool_sticky)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def test_list_pools_future(api: API, account: Account):\n api.candlepin.get_pools.return_value = []\n account.list_pools(future=True)\n api.candlepin.get_pools.assert_called_with(\"USERNAME\", \"PASSWORD\", 1234, future=True)", "def get_coip_pools(filters: Optional[Sequence[pulumi.InputType['GetCoipPoolsFilterArgs']]] = None,\n tags: Optional[Mapping[str, str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCoipPoolsResult:\n __args__ = dict()\n __args__['filters'] = filters\n __args__['tags'] = tags\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:ec2/getCoipPools:getCoipPools', __args__, opts=opts, typ=GetCoipPoolsResult).value\n\n return AwaitableGetCoipPoolsResult(\n filters=pulumi.get(__ret__, 'filters'),\n id=pulumi.get(__ret__, 'id'),\n pool_ids=pulumi.get(__ret__, 'pool_ids'),\n tags=pulumi.get(__ret__, 'tags'))", "def fusion_api_generate_pool(self, uri, api=None, headers=None):\n return self.idpool.generate(uri, api, headers)", "def test_update_pool(self):\r\n resource = 'pool'\r\n cmd = pool.UpdatePool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id'),\n 'listener_id': request.DATA.get('parentResourceId')}\n return create_pool(request, **kwargs)", "def clearPool(self):\n return self.request('clearPool')", "def csAddPool(self,pool,creatorid,nas=None):\n\n logger.debug(\"Attempting to create pool '\"+pool+\"'.\")\n\n localpath = \"/.\"+pool\n\n url = self.csurl + \"/polcentral/v1_0/pools/\"\n\n if nas == None:\n logger.debug(\"No NAS object provided, will create pool '\"+pool+\"' type 'MW'.\")\n pooltype = 'MW'\n subscribedevices = True\n deviceid = ''\n pathinpool = '/'\n servername = ''\n sharename = ''\n sharepath = ''\n creditname = ''\n overridewarnings = True\n else:\n logger.debug(\"NAS object provided, will create pool '\"+pool+\"' type 'PS'.\")\n pooltype = 'PS'\n subscribedevices = True\n deviceid = ''\n pathinpool = '/'\n servername = ''\n sharename = ''\n sharepath = ''\n creditname = ''\n overridewarnings = True\n\n payload = {\n \"name\": pool,\n \"description\":\"Pool added by testbot\",\n \"creatorid\": {\"$id\": creatorid},\n \"type\":pooltype,\n \"allowpiggybacks\":True,\n \"localpoolpath\": localpath\n }\n \"\"\"\n payload = {\n \"name\": poolname,\n \"description\":\"Pool added by testbot\",\n \"creatorid\": {\"$id\": creatorid},\n \"type\":pooltype,\n \"allowpiggybacks\":True,\n \"localpoolpath\": localpath,\n \"subscribedevices\":subscribedevices,\n \"deviceid\": deviceid,\n \"pathinpool\": pathinpool,\n \"servername\": servername,\n \"sharename\": sharename,\n \"sharepath\": sharepath,\n \"credsetname\": creditname,\n \"overridewarnings\": overridewarnings\n }\n \"\"\"\n\n try:\n r = requests.post(url, data=json.dumps(payload))\n except Exception:\n logger.error(\"Exception during api call to add pool.\")\n return 'Error'\n\n if r.status_code == 200:\n logger.debug(\"Pool '\"+pool+\"' was successfully created.\")\n poolid = r.json()['_id']\n return poolid['$id']\n else:\n logger.error(\"Pool '\"+pool+\"' was not created. Error code is \"+str(r.status_code)+\".\")\n return 'Error'", "def execute(self, pool, vthunder):\n\n args = {'service_group': self.meta(pool, 'service_group', {})}\n try:\n conf_templates = self.readConf('SERVICE_GROUP','templates').strip('\"')\n service_group_temp = {}\n service_group_temp['template-server'] = conf_templates\n except:\n service_group_temp = None\n\n try:\n c = self.client_factory(vthunder)\n lb_method=openstack_mappings.service_group_lb_method(c,pool.lb_algorithm)\n out = c.slb.service_group.create(pool.id, pool.protocol, lb_method,\n service_group_temp, axapi_args=args)\n LOG.info(\"Pool created successfully.\")\n except Exception as e:\n print(str(e))\n LOG.info(\"Error occurred\")" ]
[ "0.70111126", "0.6758204", "0.6567126", "0.65345985", "0.645883", "0.6388487", "0.6305644", "0.62771875", "0.61912704", "0.61358166", "0.6108212", "0.6096144", "0.6054042", "0.6049037", "0.604506", "0.6033518", "0.602757", "0.59318095", "0.59190464", "0.5901486", "0.5883244", "0.5856167", "0.5842945", "0.5821054", "0.58081037", "0.5790776", "0.578623", "0.5774328", "0.57141685", "0.5702614", "0.5702231", "0.5695721", "0.5663998", "0.5662801", "0.56599736", "0.564338", "0.5633942", "0.5619254", "0.56097823", "0.56060547", "0.5595694", "0.55904806", "0.5573057", "0.5565884", "0.5559468", "0.5555133", "0.55494267", "0.55464554", "0.5526306", "0.5510588", "0.5491902", "0.54711854", "0.5462631", "0.5433538", "0.5431351", "0.54059076", "0.5398912", "0.539821", "0.53869677", "0.5378027", "0.5373963", "0.5373535", "0.5368231", "0.53625154", "0.5359667", "0.5358474", "0.53532", "0.53484786", "0.5342903", "0.53401464", "0.5334458", "0.5332751", "0.53293675", "0.5308271", "0.5294432", "0.5289553", "0.5289454", "0.52849853", "0.5273966", "0.52679116", "0.5258685", "0.52498466", "0.52216184", "0.5209386", "0.52020466", "0.51922816", "0.51750684", "0.51731694", "0.5155374", "0.5146047", "0.51457936", "0.5144829", "0.51336807", "0.51335853", "0.5124382", "0.5119653", "0.5114195", "0.5113886", "0.51094705", "0.51033366" ]
0.64396614
5
Retrieve a known stored filter object from the db
def retrieve_filter(self, filter_id): LOG.debug("Retrieve filter {}".format(filter_id)) filter_obj = self.filter_collection.find_one({"_id": ObjectId(filter_id)}) # use _id to preselect the currently loaded filter, and drop it while we are at it filter_obj.update([("filters", filter_obj.pop("_id", None))]) return filter_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter():\n return get_filter_data(db, MyTable)", "def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None", "def get_instance(self, data):\n filters = {\n key: data[key]\n for key in self.fields.keys() if key in self.lookup_fields}\n\n if None not in filters.values():\n return self.session.query(\n self.opts.model\n ).filter_by(\n **filters\n ).first()\n return None", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def get(cls, filters: Dict = None):\n if filters is None:\n filters = {}\n\n data = DATABASE_CONNECTION.get(cls.__name__)\n\n for k, v in filters.items():\n data = [row for row in data if row[k] in v]\n\n res = [cls.deserialize(row) for row in data]\n\n return res", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def getFilter(self):\n\n return self.filter", "def get_record(self, collection_name, filter):\n\n try:\n self.logger.info('in get_record()')\n collection = self.get_db()[collection_name]\n record = collection.find_one(filter)\n self.logger.info('in get_record()')\n return record\n except Exception as e:\n self.logger.error(f'Error occurred while getting records {e}')", "def filter(self):\n return self._filter", "def get_one(self, filters: dict) -> dict:\n try:\n payment = Payments()\n payment.find(filters)\n app.logger.info(f\"Se encontro el complemento {str(payment._id)}\")\n return payment\n except Exception as e:\n print(\"ERROR in repo\", e)\n app.logger.error(e)\n return None", "def test_get_saved_filter(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[\"name\"], \"Devops\")", "def get_filters(self):", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def filter(self, filter_id):\r\n self.require_collection()\r\n url = '{0}/{1}'.format(self.get_url(), filter_id)\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def get_object(self):\n queryset = self.get_queryset()\n\n model = self.get_model()\n obj = queryset.get(get_primary_keys(model, self.kwargs))\n\n if not obj:\n raise Http404('No %s matches the given query.' % model.__name__)\n\n return obj", "def get_exact_filter_by_name(self, name):\n for entry in self.filters:\n if (entry['type'] == 'filter' and entry['name'] == name and\n entry['comparator'] == 'equals'):\n return entry", "def _get_filter(self, cr, uid, external_session, step, previous_filter=None, context=None):\n return None", "def find(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def db_for_read(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "def retrieve_from_db(self):\n pass", "def factory(self, request):\n # This yields the \"context\", which should be the row object\n try:\n q = model.session.query(self.sqla_table)\n q = self.filter_sqlalchemy_query(q, request)\n return q.one()\n except NoResultFound:\n # 404!\n raise NotFound()", "def test_get_saved_filters(self):\n url = reverse('xds_api:saved-filters')\n\n saved_config = SavedFilter(owner=self.user_1,\n name=\"Devops\", query=\"randomQuery\")\n saved_config.save()\n\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[0][\"name\"], \"Devops\")", "def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def getFilter(self, type: int) -> int:\n ...", "def get(cls, **filters) -> dict:\n errors = cls.validate_query(filters)\n if errors:\n raise ValidationFailed(filters, errors)\n\n cls.deserialize_query(filters)\n\n if cls.__collection__.count_documents(filters) > 1:\n raise ValidationFailed(\n filters, message=\"More than one result: Consider another filtering.\"\n )\n\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Query document matching {filters}...\")\n document = cls.__collection__.find_one(filters)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(\n f'{\"1\" if document else \"No corresponding\"} document retrieved.'\n )\n return cls.serialize(document)", "def filters(self):\n\t\treturn self.local_filter", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name", "def get(cls, *args, **kwargs):\n return SelectQuery(cls).filter(*args, **kwargs).one()", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def get(cls, **kwargs):\n # kwergs = map(lambda key, value: f\"{key}={value}\", kwargs.items())\n return cls.query.filter_by(\n **kwargs\n ).one_or_none()", "def getFilter(self):\n col = self.filtercol.get()\n val = self.filtercolvalue.get()\n op = self.operator.get()\n booleanop = self.booleanop.get()\n return col, val, op, booleanop", "def filter(self) -> Optional[str]:\n return pulumi.get(self, \"filter\")", "def get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def save(self):\n saved_filter = SavedFilterIterator()\n source_field = self._source.serialized_name() + '_source'\n getattr(saved_filter, source_field).CopyFrom(self._source.save())\n saved_filter.expression = self._raw_expression\n if self._mu is not None:\n pyDict_to_protoDict(self._mu, saved_filter.mu)\n return saved_filter", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = rewriteaction()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "async def get_or_create(self, **filters):\n obj = await self.get(**filters)\n if obj is not None:\n return (obj, False)\n connection_kwarg = self.connection_kwarg\n connection = filters.get(connection_kwarg)\n state = {k: v for k, v in filters.items()\n if '__' not in k and k != connection_kwarg}\n obj = self.model(**state)\n await obj.save(force_insert=True, connection=connection)\n return (obj, True)", "def first(self, filter_deleted=False):\n objects = self.matching_objects(filter_deleted=filter_deleted)\n\n if len(objects) > 0:\n value = objects[0]\n if self.session is not None:\n if hasattr(value, \"id\"):\n self.session.watch(value)\n return value\n else:\n return None", "def get_list_filters(self):\n # look in session for the saved search...\n filters = ListFilter()\n filters.get_list_filter(self.table)\n return filters", "def current_filter(self):\n return self._proxy.get(\"current_filter\", \"filterwheel\")", "def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, question=self.kwargs['pk'])\n self.check_object_permissions(self.request, obj)\n return obj", "def find(self, **kwargs):\n return self.__model__.query.filter_by(**kwargs)", "def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n\n # Perform the lookup filtering.\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n\n assert lookup_url_kwarg in self.kwargs, (\n 'Expected view %s to be called with a URL keyword argument '\n 'named \"%s\". Fix your URL conf, or set the `.lookup_field` '\n 'attribute on the view correctly.' %\n (self.__class__.__name__, lookup_url_kwarg)\n )\n\n filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}\n obj = get_object_or_404(queryset, **filter_kwargs)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def __get__(self, model_instance, model_class):\r\n if model_instance is not None:\r\n query = Query(self.__model)\r\n if type(self.__property) == list:\r\n props = []\r\n for prop in self.__property:\r\n props.append(\"%s =\" % prop)\r\n return query.filter(props, model_instance)\r\n else:\r\n return query.filter(self.__property + ' =', model_instance)\r\n else:\r\n return self", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def filter(self, *args, **kwargs):\n return FilteredQuery(self, F(*args, **kwargs))", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = appfwlearningsettings()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def _create_filter_object(form_data: Dict) -> Q:\n filter_object = Q(title__icontains=form_data[\"title\"])\n filter_object &= Q(author__icontains=form_data[\"author\"])\n filter_object &= Q(\n publication_language__icontains=form_data[\"publication_language\"]\n )\n if form_data[\"publication_date_start\"]:\n filter_object &= Q(\n publication_date__gte=form_data[\"publication_date_start\"]\n )\n if form_data[\"publication_date_end\"]:\n filter_object &= Q(publication_date__lte=form_data[\"publication_date_end\"])\n return filter_object", "def read(self, request, pk):\n if pk is None:\n return self.model.objects.all()\n else:\n return self._object_get(pk)", "def getRecordFilter(self):\n\n # Replace dots with double underscore to get the notation that is\n # required by `Q`. Note that we could use that notation right away, but\n # IMHO dot notation, is nicer to read.\n key = self.record_field.replace(\".\", \"__\").lower()\n return Q(**{key: self.value})", "def get(**kwargs):\n try:\n object = ObjectDB.objects.get(**kwargs)\n except ObjectDB.DoesNotExist:\n object = None\n\n return object", "def find(cls, **filters):\n return cls.query.filter_by(**filters).all()", "def get_filter_name(self):\n pass", "def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = queryset.get(pk=self.request.user.id)\n self.check_object_permissions(self.request, obj)\n return obj", "def save(self, *args, **kwargs):\n self.where_clause = None\n\n if self.filters is not None:\n queries = []\n\n for key in self.filters:\n category = self.project.categories.get(pk=key)\n queries.append(category.get_query(self.filters[key]))\n\n if len(queries) > 0:\n query = ' OR '.join(queries)\n self.where_clause = query\n else:\n self.where_clause = 'FALSE'\n\n super(FilterMixin, self).save(*args, **kwargs)", "def _process_storage_info_filters(query, filters):\n if filters:\n if not is_valid_model_filters(models.Storage, filters):\n return\n query = query.filter_by(**filters)\n return query", "def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")", "def _custom_filter(self, query):\r\n return query", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def from_query(\n cls: Type[SPK],\n *,\n key: str = None,\n filter: BinaryExpression = None,\n order: BinaryExpression = None,\n ) -> Optional[SPK]:\n cls_pk = cache.get(key) if key else None\n if not cls_pk or not isinstance(cls_pk, int):\n query = cls._construct_query(cls.query, filter, order)\n model = query.first()\n if model:\n if not cache.has(model.cache_key):\n cache.cache_model(model)\n if key:\n cache.set(key, model.primary_key)\n return model\n return None\n return cls.from_pk(cls_pk)", "def searchRecords(self, filterChoice, keyword):\r\n session = wx.GetApp().session\r\n model = getattr(db, self.modelName)\r\n\r\n result = None\r\n if filterChoice == \"Person\":\r\n qry = session.query(model)\r\n logging.debug(qry)\r\n result = qry.filter(db.Person.full_name.contains('%s' % keyword))\r\n\r\n result = result.all()\r\n\r\n logging.debug(result)\r\n return result", "def get_query(self):\n q = db.Query(self.KIND,keys_only=self.KEYS_ONLY)\n for prop, value in self.FILTERS:\n q.filter(\"%s =\" % prop, value)\n if self.ancestor:\n q.ancestor(self.ancestor)\n q.order(self.ORDER_BY)\n return q", "def condition_filter(self, filter_id):\r\n return filters.Filter(self, filter_id)", "def _get_filter(self, args):\n\n # Create the filters list\n filter_list = []\n \n # If we want to record all requests, add the file logger filter\n if args.record:\n filter_list.append(filters.StoreLoggerFilter(args.url))\n\n # Add the whitelist filter\n wl_filter = filters.WhitelistedSiteFilter(args.url)\n filter_list.append(wl_filter)\n\n # Create the ACL filter that filters all requests from devices\n acl_filter = filters.DeviceACLFilter(filter_list, args.url)\n\n return acl_filter", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def retrieve(self, data_only_filter=\"all\", return_type=\"python\"):\n if return_type == \"python\":\n if data_only_filter == \"all\":\n return dict(dict_data=self.data_dict, list_data=self.data_list)\n elif data_only_filter == \"list\":\n return self.data_list\n elif data_only_filter == \"dict\":\n return self.data_dict\n else:\n print(\">>>> Data filter only: {'all', 'list', 'dict'}, your: %s\" % data_only_filter)\n exit(1)\n elif return_type == \"model\":\n if data_only_filter == \"all\":\n return dict(dict_data=DictModel(name=\"obj_dict\", raw_data=self.data_dict),\n list_data=DictModel(name=\"obj_list\", raw_data=self.data_list))\n elif data_only_filter == \"list\":\n return DictModel(name=\"obj_dict\", raw_data=self.data_dict)\n elif data_only_filter == \"dict\":\n return DictModel(name=\"obj_list\", raw_data=self.data_list)\n else:\n print(\">>>> Data filter only: {'all', 'list', 'dict'}, your: %s\" % data_only_filter)\n exit(1)\n else:\n print(\">>>> Return type only: {'python', 'model'}, your: %s\" % return_type)\n exit(1)", "def filter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"filter\")", "def addAutoSaveFilter(filter):", "def get_filters(self) -> dict:\n return self._filters", "def get_queryset(self) -> object:\n category = Category.objects.all().filter(id=self.kwargs['pk'])\n return category", "def get_object(self):\n url_kwarg = self.lookup_url_kwarg or self.lookup_field\n lookup = self.kwargs.get(url_kwarg, None)\n # try converting to UUID, call parent to lookup by UUID if successful\n try:\n lookup = UUID(lookup)\n self.lookup_url_kwarg = url_kwarg\n self.lookup_field = 'uuid'\n except ValueError:\n pass\n return super(StudyInternalsFilterMixin, self).get_object()", "def __call__(self, *filters):\n return self.client._get_and_filter(Domain, *filters)", "def get(cls, **kwargs) -> Dict:\n return cls.where(**kwargs).first()", "def get_query(cls, model, info: 'ResolveInfo', sort=None, **args):\n query = super().get_query(model, info, sort, **args)\n\n request_filters = args.get(cls.filter_arg)\n if request_filters:\n filter_set = cls.get_filter_set(info)\n query = filter_set.filter(info, query, request_filters)\n\n return query", "def details_filter(self):\n return self._details_filter", "def filters(self):\n return self.England_filter", "def get(cls, args=None, user=None, session=None):\n if session is None:\n session = db.session\n try:\n instance = session.query(cls).filter_by(key=args, user=user['id']).one()\n except NoResultFound:\n instance = None\n return instance", "def __getitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).one()", "def process_filters(self, filters, queryset, view):\n return filters", "def get_object(id):", "def test_query_filter_field(self):\n obj = self.provision_single_asset()\n # TODO: Write a positive test for this\n ret = self.get('widget', 200,\n params={'__filter': [\n {'field': 'created_at', 'name': 'name', 'op': 'eq'}]})\n assert len(ret['objects']) == 0", "def create_search_filter(\n *, db_session: Session = Depends(get_db), search_filter_in: SearchFilterCreate\n):\n try:\n search_filter = create(db_session=db_session, search_filter_in=search_filter_in)\n return search_filter\n except IntegrityError:\n raise HTTPException(\n status_code=409, detail=\"A search filter already exists with this name.\"\n )", "def FilterMode(self):\n if self.force_auto_sync:\n self.get('FilterMode')\n return self._FilterMode", "def getRecordFilter(self):\n return Q(**{\"report__report_type\": self.value})", "def get(self, pk):\n return self.model.query.options(self.from_cache(pk=pk)).get(pk)", "def get_real_object(self):\n query_string = dedent(f\"\"\"\\\n import app.config.models_importer as models_importer\n\n class_ = models_importer.all_models['{self.ref_class}']\n \n class_.query.get({self.ref_id})\"\"\")\n\n return exec(query_string)", "def get_psf(self, run, camcol, field, filter, rowc, colc, verbose=True):\n self.cache(run, camcol, field)\n filternum=sdsspy.FILTERNUM[filter]\n kl = self.psf_kls[filternum]\n psf = kl.rec(rowc, colc, trim=True)\n return psf", "def test_get_stored_obj_by_where_clause(session):\n instance1 = Foo()\n instance2 = Foo()\n id1 = persist(session, instance1)\n id2 = persist(session, instance2)\n\n where_clause = Foo.__table__.c.id == id1\n stored_instance = get_stored_obj(session, Foo, where_clause=where_clause)\n assert stored_instance.id == instance1.id\n\n where_clause = Foo.__table__.c.id == id2\n stored_instance = get_stored_obj(session, Foo, where_clause=where_clause)\n assert stored_instance.id == instance2.id", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def queryset(self, request, queryset):\n if self.value() == \"animals\":\n return queryset.animals()\n if self.value() == \"webelos\":\n return queryset.webelos()", "def get_object(self, obj, key, val):\n f = {key: value}\n try:\n return obj.objects.get(**f)\n except obj.DoesNotExist:\n return None", "def lookup_obj(self,):\n return self._lookup_obj" ]
[ "0.6960243", "0.62703", "0.62410784", "0.61514443", "0.6141893", "0.61203706", "0.60491437", "0.60114807", "0.585763", "0.5800049", "0.578962", "0.5782984", "0.5733101", "0.5730981", "0.571787", "0.5717332", "0.5716527", "0.5708957", "0.5699338", "0.5699338", "0.5699338", "0.5667219", "0.56482416", "0.56412035", "0.5615871", "0.5610554", "0.56024283", "0.559583", "0.55694085", "0.5552218", "0.55512375", "0.55444413", "0.5541622", "0.55208313", "0.551589", "0.5513191", "0.5500609", "0.54977584", "0.5490432", "0.54902506", "0.5478282", "0.5475683", "0.5436622", "0.5423879", "0.54181385", "0.5385206", "0.53770703", "0.5374422", "0.53426975", "0.53426975", "0.5323972", "0.5320723", "0.5310665", "0.5294748", "0.5293287", "0.52928925", "0.52858895", "0.52813", "0.5278518", "0.5278041", "0.52767545", "0.5275384", "0.52745605", "0.52740055", "0.5269772", "0.52680635", "0.5266609", "0.5263476", "0.52631134", "0.52601254", "0.52254504", "0.5221665", "0.5209215", "0.5204864", "0.5200748", "0.51927334", "0.5191752", "0.5181994", "0.5160056", "0.5159903", "0.5149855", "0.5147126", "0.5134997", "0.5132305", "0.51275384", "0.51076436", "0.510104", "0.50962675", "0.50887024", "0.50825447", "0.50771874", "0.5071272", "0.506267", "0.505508", "0.505473", "0.50537276", "0.5053345", "0.5043065", "0.5037799", "0.50349826" ]
0.6990458
0
Store away filter settings for later use.
def stash_filter( self, filter_obj, institute_obj, case_obj, user_obj, category="snv", link=None ): LOG.info( "Stashing filter for user '%s' and institute %s.", user_obj.get("email"), institute_obj.get("display_name"), ) LOG.info("Filter object {}".format(filter_obj)) institute_id = institute_obj.get("_id") filter_dict = {"institute_id": institute_id, "category": category} # make up a default display name filter_dict["display_name"] = ( institute_obj.get("display_name") + "-" + case_obj.get("display_name") ) for (element, value) in filter_obj.lists(): if value == [""]: continue if element in ["save_filter", "filters", "csrf_token"]: continue if element == "filter_display_name": # filter display_name if given # will appear as the only element in an array filter_dict["display_name"] = value[0] continue filter_dict[element] = value result = self.filter_collection.insert_one(filter_dict) filter_id = result.inserted_id # log event subject = institute_obj["display_name"] # link e.g. to the variants view where filter was created if link is None: variants_target_from_category = { "sv": "variants.sv_variants", "cancer": "variants.cancer_variants", "snv": "variants.variants", } target = variants_target_from_category.get(category) case_name = case_obj.get("display_name") # filter dict already contains institute_id=institute_id, link = url_for(target, case_name=case_name, **filter_dict) self.create_event( institute=institute_obj, case=case_obj, user=user_obj, link=link, category="case", verb="filter_stash", subject=subject, level="global", ) return filter_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def setFilters(self, filters):\n self.__filters = filters", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def filters(self, filters):\n\n self._filters = filters", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def _filter_settings(self, kwargs):\n self.settings = {}\n \n for key, default in self._defaults.iteritems():\n self.settings[key] = kwargs.get(key, default)\n \n try:\n del kwargs[key]\n except KeyError:\n pass\n \n return kwargs", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def addAutoSaveFilter(filter):", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def on_filter_settings_triggered(self):\n\n FilterSettings(self, self.control).show()", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def addAutoSaveRestoreFilter(filter):", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def removeAutoSaveFilter(filter):", "def filter(self, filter):\n self._filter = filter", "def filters(self):\n\t\treturn self.local_filter", "def update_filters(self, fromTime='', toTime='', language=''):\n if fromTime:\n self._FILTERS['fromTime'] = fromTime\n if fromTime:\n self._FILTERS['toTime'] = toTime\n if language:\n self._set_language(language)", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = appfwlearningsettings()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def filter(self, filters):", "def set_restriction_filters(self):\n self.restriction_filters[\"pk__exact\"] = self.request.user.pk", "def __init__(self) -> None:\n self._settings = {}\n\n # Load values from global_settings (only uppercase)\n self.filter_and_set(global_settings)\n\n settings_env_value: str = os.environ.get(SETTINGS_ENV)\n if settings_env_value:\n # Load values from custom settings\n try:\n module = importlib.import_module(settings_env_value)\n except ModuleNotFoundError:\n msg = \"Can't import custom settings. Is it under PYTHONPATH?\"\n raise ModuleError(msg)\n self.filter_and_set(module)", "def get_filters(self):", "def setDefaultFilter(self):\n self.logsItem.setDefaultFilter()", "def initialize_globals():\n for filter_name in filter_list_from_config:\n try:\n trade_filter = acm.FTradeSelection[filter_name]\n live_trade_filters.append(trade_filter)\n add_trades(trade_filter)\n except Exception:\n exc_type, _exc_obj, exc_tb = sys.exc_info()\n log(exc_type)\n log(_exc_obj)\n log(exc_tb.tb_lineno)", "def search_settings(self, search_settings):\n\n self._search_settings = search_settings", "def get_filter_settings(options): \n \n if options.filternames != '-':\n filter_names = options.filternames.split(',')\n else:\n hdf_in = h5py.File(options.filters, 'r')\n filter_names = sorted(hdf_in.keys())\n hdf_in.close()\n\n if options.filtercombs != '-':\n filter_combs = []\n for fc in options.filtercombs.split(':'):\n filter_combs.append(fc.split(','))\n filter_combs[-1] = [int(x) for x in filter_combs[-1]]\n else:\n filter_combs = [[x] for x in range(len(filter_names))]\n\n if options.filtertypes == '-':\n filter_types = ['any'] * len(filter_names)\n else:\n ft = options.filtertypes.split(',')\n if len(ft) == 1:\n filter_types = [ft[0]] * len(filter_names)\n else:\n assert(len(ft) == len(filter_names))\n filter_types = ft\n \n return (filter_names, filter_combs, filter_types)", "def update_filter_params(self, fh):\n (self.data_timestamp, self.framerate,\n self.l, self.d, self.gamma,\n self.eps, self.alex, self.traceswitch) = (fh.attrs['data_timestamp'], fh.attrs['framerate'],\n fh.attrs['l'], fh.attrs['d'], fh.attrs['gamma'],\n fh.attrs['eps'], fh.attrs['alex'], fh.attrs['traceswitch'])", "def std_filters():\n kwargs = {\n \"sentence_filters\":[punctuation_filter],\n \"word_filters\":[small_word_filter, stopword_filter, stemming_filter]\n }\n return kwargs", "def test_filter_settings(self):\n self.es.register_filter(foo='bar')\n self.assertTrue(callable(self.es.filter['all'][0]))\n self.es.register_filter(bar='baz')\n self.assertLength(self.es.filter['all'], 2)", "def filter_and_set(self, module: \"module\") -> None:\n for key in filter(lambda x: x.isupper(), dir(module)):\n self._settings[key] = getattr(module, key)", "def removeAutoSaveRestoreFilter(filter):", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def allow_filtering(self):\r\n clone = copy.deepcopy(self)\r\n clone._allow_filtering = True\r\n return clone", "def _set_sub_settings(self, sub):\n\n if len(sub) == 3:\n time_filter = \"all\" \\\n if sub[1].upper() in self._filterables \\\n else None\n settings = [sub[1], sub[2], time_filter]\n if len(sub) == 4:\n settings = [sub[1], sub[2], sub[3]]\n\n return settings", "def filter(self, filter_dict):\n pass", "def filters(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"input must be a dictionary\")\n\n self._filters = value", "def save_settings(self, settings):\n settings.set_value('itemlist', pack(self._parentfilter._selected_items))", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def get_filters(self) -> dict:\n return self._filters", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def autoSaveFilter(filename):", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "async def async_set_filters(self, filters, state_mode):\n if filters not in ON_OFF_LIST:\n return\n self._filter = filters.lower()\n self._state_mode = state_mode\n await self.async_send_cmd()", "def dashboard_filters(self, dashboard_filters):\n\n self._dashboard_filters = dashboard_filters", "def policy_filter(self, policy_filter):\n self._policy_filter = policy_filter", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def initialize_filter(self):\n shape = self.filter_size + (self.input_shape[-1], self.channels)\n self.filter = self.filter_initializer(shape)", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def apply_grab_settings(self):\n raise NotImplementedError", "def save(self):\n saved_filter = SavedFilterIterator()\n source_field = self._source.serialized_name() + '_source'\n getattr(saved_filter, source_field).CopyFrom(self._source.save())\n saved_filter.expression = self._raw_expression\n if self._mu is not None:\n pyDict_to_protoDict(self._mu, saved_filter.mu)\n return saved_filter", "def filters(self):\n return self.__filters", "def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter", "def filters(self):\n return {\n 'port_channels': port_channels\n }", "def set_scanning_filter(self, **kwargs):\n warn(\n \"This method will be removed in a future version of Bleak. Use BleakScanner constructor args instead.\",\n FutureWarning,\n stacklevel=2,\n )\n self._backend.set_scanning_filter(**kwargs)", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def process_filters(self, filters, queryset, view):\n return filters", "def filters(self):\n return self.England_filter", "def settings(self, settings):\n\n self._settings = settings", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def set_git_filter_attribute(self, filtername):\n self._filter = filtername", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def load_filters(self):\n buffer_dict = dict(self.named_buffers())\n n = 0\n\n for k in self.phi_f.keys():\n if type(k) != str:\n self.phi_f[k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1", "def setFilters(self, regex=None):\n if regex is not None:\n try:\n self.__regex = re.compile(regex)\n except Exception as e:\n return\n\n self.__all_filters = (self.__regex,)\n\n self.__customFilterEnabled = any(self.__all_filters)\n self.invalidateFilter()", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def try_load_filter_opts(self):\n\t\topts = [None]*5\n\t\tif self.filters_open():\n\t\t\ttry:\n\t\t\t# possibly need to grab child of each opt for ios\n\t\t\t\topts[0] = self.driver.find_element_by_id('filter_invited')\n\t\t\t\topts[1] = self.driver.find_element_by_id('filter_inactive')\n\t\t\t\topts[2] = self.driver.find_element_by_id('filter_active')\n\t\t\t\topts[3] = self.driver.find_element_by_id('filter_removed')\n\t\t\t\topts[4] = self.driver.find_element_by_id('filter_terminated')\n\t\t\texcept NoSuchElementException:\n\t\t\t\tpass\n\t\tself.filter_opts = opts", "def prepend_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = [filter] + self.filters", "def reload_filters(self):\n while self.run_forever:\n time.sleep(60 * 5)\n\n active_filters = self.get_active_filters()\n self.filter_templates = self.make_filter_templates(active_filters)", "def append_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = self.filters + [filter]", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def _write_filter_params(self, spec):\n spec.switch_write_focus(self.REGIONS.FILTER_PARAMS.value)\n for param in self._filter_params:\n spec.write_value(param, data_type=DataType.FLOAT_64)", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def set_module_plugin_filters(self, module_plugin_filters):\n module_plugin_filters = util.return_list(module_plugin_filters)\n self.module_plugin_filters = module_plugin_filters", "def filters(self):\n return self._filters", "def reset_params(self):\n self.blur = -1\n self.closing = -1\n self.thresh = -1", "def default_search_filters(cls):\n q = QueryDict(mutable=True)\n q.setlist('status', cls.DEFAULT_SEARCH)\n return q.urlencode()", "def _toggle_filter(self, filter, toggle):\n \n if(bool(self.filters & filter) == bool(toggle)): # Filter already correctly set\n return\n else:\n if(toggle): # Adding the filter\n self.filters |= filter # Add the filter bit\n else:\n self.filters &= Filters.AllFilters ^ filter # Remove the filter bit\n\n self._reset_view()", "def reference_filters(self, version, options):\n pass", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def overrides(self) -> tuple[dict[str, Any], dict[str, Any]]:\n settings = {}\n if self.actions:\n settings = self.actions.overrides\n if self.validations:\n settings |= self.validations.overrides\n\n filter_settings = {}\n if self.extra_fields:\n filter_settings = self.extra_fields.model_dump(exclude_unset=True)\n\n return settings, filter_settings", "def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def ext_filter(app):\n return UrlRewriteFilter(app, conf)\n return ext_filter", "def setFilterTrackingCookies(self, filterTrackingCookies):\n if filterTrackingCookies == self.__filterTrackingCookies:\n return\n \n self.__filterTrackingCookies = filterTrackingCookies\n self.__saveTimer.changeOccurred()", "def _set_filter_type(filter):\n if filter == 'nat':\n return '-N'\n if filter == 'options':\n return '-O'\n if filter == 'filter':\n return '-R'", "def set_default_filters(fprime_test_api):\n set_event_filter(fprime_test_api, \"COMMAND\", True)\n set_event_filter(fprime_test_api, \"ACTIVITY_LO\", True)\n set_event_filter(fprime_test_api, \"ACTIVITY_HI\", True)\n set_event_filter(fprime_test_api, \"WARNING_LO\", True)\n set_event_filter(fprime_test_api, \"WARNING_HI\", True)\n set_event_filter(fprime_test_api, \"DIAGNOSTIC\", False)", "def _filter_in_request(self):\n pass", "def get_filters() -> dict:\n if environment is None or not hasattr(environment, 'loader'):\n return {}\n return environment.filters", "def on_filter_instances(self):\n self._set_filter_value(\n 'filterInstances', self.filter_instances_btn.isChecked())", "def _setParam(self, callerId, key, value):\n if key not in self.FilterParameters:\n self.__docWriter.addParam(callerId, key)", "def onSettings(self):\n pass", "def set_sensitive_to_filter(self, sensitive_name, sensitive_val):\n self.name += str(sensitive_val)\n self.sensitive_filter = sensitive_val\n self.sensitive_for_metric = sensitive_name", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def reset_instances_filter(self):\n page_instances = self.page_instances()\n page_instances.field_filter_instances.value = ''\n page_instances.button_filter_instances.click()" ]
[ "0.702972", "0.6695293", "0.6549355", "0.6472333", "0.6450351", "0.6393664", "0.63646764", "0.62200606", "0.616542", "0.61460763", "0.6126508", "0.61021346", "0.6078377", "0.60579336", "0.6051252", "0.60247004", "0.60106236", "0.60073197", "0.59548503", "0.59348184", "0.59262776", "0.5872105", "0.5855159", "0.5814564", "0.579907", "0.5781005", "0.5780019", "0.5763543", "0.5756455", "0.5754173", "0.574974", "0.57414156", "0.5713749", "0.5709313", "0.5692007", "0.56865335", "0.5663815", "0.5651846", "0.5648998", "0.56483", "0.56475127", "0.5635879", "0.5613893", "0.56017023", "0.55969566", "0.5592379", "0.55797535", "0.5573734", "0.5567016", "0.55628115", "0.5556951", "0.5544379", "0.5532383", "0.5510206", "0.5495167", "0.54872566", "0.5460362", "0.5449035", "0.54473627", "0.54353094", "0.5434056", "0.5428728", "0.5419421", "0.5403461", "0.5393576", "0.53903157", "0.53839", "0.5383639", "0.5357768", "0.53542095", "0.5349062", "0.533929", "0.53388953", "0.53313744", "0.5328632", "0.53272724", "0.5324406", "0.53236306", "0.53106683", "0.53100926", "0.5301306", "0.52813387", "0.52781093", "0.5272321", "0.52685976", "0.52627015", "0.525145", "0.5234516", "0.52323323", "0.5231785", "0.5226094", "0.52236927", "0.5209257", "0.51995784", "0.51960015", "0.5194944", "0.518764", "0.51828027", "0.5160323", "0.5151224", "0.513669" ]
0.0
-1
Obtain a cursor for all filters available to an institute in a category.
def filters(self, institute_id, category="snv"): filters_res = self.filter_collection.find( {"institute_id": institute_id, "category": category} ) return filters_res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(self, eng_category):\r\n sql_select_query = \"SELECT Name, URL, Ingredients FROM \"+ str(eng_category)\r\n self.mycursor.execute(sql_select_query)\r\n records = self.mycursor.fetchall()\r\n \r\n return records", "def filter():\n return get_filter_data(db, MyTable)", "def get_filters(self):", "def search_categories(self):\n with Transaction().start(DBNAME, 1):\n categorieslist = self.Category.search(['parent', '=', 'Ingredients'])\n return tuple(i.name for i in categorieslist)", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def _filter(self, __button):\r\n# WARNING: Refactor _filter; current McCabe Complexity metric = 54.\r\n _criteria = []\r\n _inputs = []\r\n _compound = []\r\n\r\n # Read the user inputs for the different fields that can be used to\r\n # filter with.\r\n _criteria.append(self.cmbCriteriaID.get_active_text())\r\n _inputs.append(self.txtFilterID.get_text())\r\n _compound.append(self.cmbCompound1.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCategory.get_active_text())\r\n _inputs.append(self.cmbFilterCategory.get_active())\r\n _compound.append(self.cmbCompound2.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaType.get_active_text())\r\n _inputs.append(self.cmbFilterType.get_active())\r\n _compound.append(self.cmbCompound3.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaStatus.get_active_text())\r\n _inputs.append(self.cmbFilterStatus.get_active())\r\n _compound.append(self.cmbCompound4.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCriticality.get_active_text())\r\n _inputs.append(self.cmbFilterCriticality.get_active())\r\n _compound.append(self.cmbCompound5.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAge.get_active_text())\r\n _inputs.append(self.txtFilterAge.get_text())\r\n _compound.append(self.cmbCompound6.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLifeCycle.get_active_text())\r\n _inputs.append(self.cmbFilterLifeCycle.get_active())\r\n _compound.append(self.cmbCompound7.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaShortDesc.get_active_text())\r\n _inputs.append(self.txtFilterShortDesc.get_text())\r\n _compound.append(self.cmbCompound8.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLongDesc.get_active_text())\r\n _inputs.append(self.txtFilterLongDesc.get_text())\r\n _compound.append(self.cmbCompound9.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRemarks.get_active_text())\r\n _inputs.append(self.txtFilterRemarks.get_text())\r\n _compound.append(self.cmbCompound10.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAnalysis.get_active_text())\r\n _inputs.append(self.txtFilterAnalysis.get_text())\r\n _compound.append(self.cmbCompound11.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTest.get_active_text())\r\n _inputs.append(self.txtFilterTest.get_text())\r\n _compound.append(self.cmbCompound12.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTestCase.get_active_text())\r\n _inputs.append(self.txtFilterTestCase.get_text())\r\n _compound.append(self.cmbCompound13.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestBy.get_active_text())\r\n _inputs.append(self.cmbFilterRequestBy.get_active_text())\r\n _compound.append(self.cmbCompound14.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestDate.get_active_text())\r\n _inputs.append(self.txtFilterRequestDate.get_text())\r\n _compound.append(self.cmbCompound15.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewBy.get_active_text())\r\n _inputs.append(self.cmbFilterReviewBy.get_active_text())\r\n _compound.append(self.cmbCompound16.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewDate.get_active_text())\r\n _inputs.append(self.txtFilterReviewDate.get_text())\r\n _compound.append(self.cmbCompound17.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveBy.get_active_text())\r\n _inputs.append(self.cmbFilterApproveBy.get_active_text())\r\n _compound.append(self.cmbCompound18.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveDate.get_active_text())\r\n _inputs.append(self.txtFilterApproveDate.get_text())\r\n _compound.append(self.cmbCompound19.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseBy.get_active_text())\r\n _inputs.append(self.cmbFilterCloseBy.get_active_text())\r\n _compound.append(self.cmbCompound20.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseDate.get_active_text())\r\n _inputs.append(self.txtFilterCloseDate.get_text())\r\n _compound.append(self.cmbCompound21.get_active_text())\r\n\r\n _inputs.append(self.chkFilterAccepted.get_active())\r\n _compound.append(self.cmbCompound22.get_active_text())\r\n\r\n _inputs.append(self.chkFilterReviewed.get_active())\r\n\r\n _criteria.append(self.cmbCriteriaAssembly.get_active_text())\r\n _model = self.cmbAssembly.get_model()\r\n _row = self.cmbAssembly.get_active_iter()\r\n if _row is not None:\r\n _text = int(_model.get_value(_row, 1))\r\n else:\r\n _text = 0\r\n _inputs.append(_text)\r\n _compound.append(self.cmbCompound23.get_active_text())\r\n\r\n # Build the query from the user-provided inputs.\r\n if all(_c is None for _c in _criteria):\r\n query = None\r\n elif Configuration.RTK_MODULES[0] == 1:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id={0:d} AND \".format(\r\n self._revision_id)\r\n else:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id=0 AND \"\r\n\r\n if _criteria[0] is not None and _criteria[0] != '':\r\n query = query + \"fld_incident_id\" + _criteria[0] + _inputs[0]\r\n if _compound[0] is not None and _compound[0] != '':\r\n query = query + \" \" + _compound[0] + \" \"\r\n\r\n if _criteria[1] is not None and _criteria[1] != '':\r\n query = query + \"fld_incident_category\" + _criteria[1] + \\\r\n str(_inputs[1])\r\n if _compound[1] is not None and _compound[1] != '':\r\n query = query + \" \" + _compound[1] + \" \"\r\n\r\n if _criteria[2] is not None and _criteria[2] != '':\r\n query = query + \"fld_incident_type\" + _criteria[2] + \\\r\n str(_inputs[2])\r\n if _compound[2] is not None and _compound[2] != '':\r\n query = query + \" \" + _compound[2] + \" \"\r\n\r\n if _criteria[3] is not None and _criteria[3] != '':\r\n query = query + \"fld_status\" + _criteria[3] + str(_inputs[3])\r\n if _compound[3] is not None and _compound[3] != '':\r\n query = query + \" \" + _compound[3] + \" \"\r\n\r\n if _criteria[4] is not None and _criteria[4] != '':\r\n query = query + \"fld_criticality\" + _criteria[4] + str(_inputs[4])\r\n if _compound[4] is not None and _compound[4] != '':\r\n query = query + \" \" + _compound[4] + \" \"\r\n\r\n if _criteria[5] is not None and _criteria[5] != '':\r\n query = query + \"fld_incident_age\" + _criteria[5] + str(_inputs[5])\r\n if _compound[5] is not None and _compound[5] != '':\r\n query = query + \" \" + _compound[5] + \" \"\r\n\r\n if _criteria[6] is not None and _criteria[6] != '':\r\n query = query + \"fld_life_cycle\" + _criteria[6] + str(_inputs[6])\r\n if _compound[6] is not None and _compound[6] != '':\r\n query = query + \" \" + _compound[6] + \" \"\r\n\r\n if _criteria[21] is not None and _criteria[21] != '':\r\n query = query + \"fld_hardware_id\" + _criteria[21] + \\\r\n str(_inputs[23])\r\n if _compound[22] is not None and _compound[22] != '':\r\n query = query + \" \" + _compound[22] + \" \"\r\n\r\n if _criteria[7] is not None and _criteria[7] != '':\r\n query = query + \"fld_short_description \" + _criteria[7] + \\\r\n \" '%\" + _inputs[7] + \"%'\"\r\n if _compound[7] is not None and _compound[7] != '':\r\n query = query + \" \" + _compound[7] + \" \"\r\n\r\n if _criteria[8] is not None and _criteria[8] != '':\r\n query = query + \"fld_long_description \" + _criteria[8] + \\\r\n \" '%\" + _inputs[8] + \"%'\"\r\n if _compound[8] is not None and _compound[8] != '':\r\n query = query + \" \" + _compound[8] + \" \"\r\n\r\n if _criteria[9] is not None and _criteria[9] != '':\r\n query = query + \"fld_remarks \" + _criteria[9] + \\\r\n \" '%\" + _inputs[9] + \"%'\"\r\n if _compound[9] is not None and _compound[9] != '':\r\n query = query + \" \" + _compound[9] + \" \"\r\n\r\n if _criteria[10] is not None and _compound[10] != '':\r\n query = query + \"fld_analysis \" + _criteria[10] + \\\r\n \" '%\" + _inputs[10] + \"%'\"\r\n if _compound[10] is not None and _compound[10] != '':\r\n query = query + \" \" + _compound[10] + \" \"\r\n\r\n if _criteria[11] is not None and _compound[11] != '':\r\n query = query + \"fld_test_found \" + _criteria[11] + \\\r\n \" '%\" + _inputs[11] + \"%'\"\r\n if _compound[11] is not None and _compound[11] != '':\r\n query = query + \" \" + _compound[11] + \" \"\r\n\r\n if _criteria[12] is not None and _compound[12] != '':\r\n query = query + \"fld_test_case \" + _criteria[12] + \\\r\n \" '%\" + _inputs[12] + \"%'\"\r\n if _compound[12] is not None and _compound[12] != '':\r\n query = query + \" \" + _compound[12] + \" \"\r\n\r\n if _criteria[13] is not None and _compound[13] != '':\r\n query = query + \"fld_request_by\" + _criteria[13] + \\\r\n \"'\" + _inputs[13] + \"'\"\r\n if _compound[13] is not None and _compound[13] != '':\r\n query = query + \" \" + _compound[13] + \" \"\r\n\r\n if _criteria[14] is not None and _compound[14] != '':\r\n query = query + \"fld_request_date\" + _criteria[14] + \\\r\n str(datetime.strptime(_inputs[14], \"%Y-%m-%d\").toordinal())\r\n if _compound[14] is not None and _compound[14] != '':\r\n query = query + \" \" + _compound[14] + \" \"\r\n\r\n if _criteria[15] is not None and _compound[15] != '':\r\n query = query + \"fld_reviewed_by\" + _criteria[15] + \\\r\n \"'\" + _inputs[15] + \"'\"\r\n if _compound[15] is not None and _compound[15] != '':\r\n query = query + \" \" + _compound[15] + \" \"\r\n\r\n if _criteria[16] is not None and _compound[16] != '':\r\n query = query + \"fld_reviewed_date\" + _criteria[16] + \\\r\n str(datetime.strptime(_inputs[16], \"%Y-%m-%d\").toordinal())\r\n if _compound[16] is not None and _compound[16] != '':\r\n query = query + \" \" + _compound[16] + \" \"\r\n\r\n if _criteria[17] is not None and _compound[17] != '':\r\n query = query + \"fld_approved_by\" + _criteria[17] + \\\r\n \"'\" + _inputs[17] + \"'\"\r\n if _compound[17] is not None and _compound[17] != '':\r\n query = query + \" \" + _compound[17] + \" \"\r\n\r\n if _criteria[18] is not None and _compound[18] != '':\r\n query = query + \"fld_approved_date\" + _criteria[18] + \\\r\n str(datetime.strptime(_inputs[18], \"%Y-%m-%d\").toordinal())\r\n if _compound[18] is not None and _compound[18] != '':\r\n query = query + \" \" + _compound[18] + \" \"\r\n\r\n if _criteria[19] is not None and _compound[19] != '':\r\n query = query + \"fld_complete_by\" + _criteria[19] + \\\r\n \"'\" + _inputs[19] + \"'\"\r\n if _compound[19] is not None and _compound[19] != '':\r\n query = query + \" \" + _compound[19] + \" \"\r\n\r\n if _criteria[20] is not None and _compound[20] != '':\r\n query = query + \"fld_complete_date\" + _criteria[20] + \\\r\n str(datetime.strptime(_inputs[20], \"%Y-%m-%d\").toordinal())\r\n if _compound[20] is not None and _compound[20] != '':\r\n query = query + \" \" + _compound[20] + \" \"\r\n\r\n if _inputs[21]:\r\n query = query + \"fld_accepted=%d\" % 1\r\n if _compound[21] is not None and _compound[21] != '':\r\n query = query + \" \" + _compound[21] + \" \"\r\n\r\n if _inputs[22]:\r\n query = query + \"fld_reviewed=%d\" % 1\r\n\r\n self._modulebook.request_filter_incidents(self._revision_id, query)", "def get_all_possible_filters(item_category):\n\n\tpk_lists = []\n\n\tfor filter_category in FilterCategory.objects.filter(item_category=item_category):\n\t\tfilter_option_set = filter_category.filteroption_set.all()\n\t\ttemp_list = list(filter_option_set.values_list('pk', flat=True))\n\n\t\tpk_lists.append(temp_list)\n\n\treturn pk_lists", "def search(self, what, cat='all'):\n # Sign in:\n if self.search_auth:\n self._sign_in()\n opener = self.opener\n else:\n opener = urllib2.build_opener(urllib2.BaseHandler())\n ret = []\n page = 0\n while page < self.PAGE_NUMBER:\n results = []\n parser = self.FilelistParser(results, self.url)\n url = self.url+'/browse.php?search=%s&cat=%s&searchin=0&sort=0&page=%d'%(what, self.supported_categories[cat], page)\n f = opener.open(url)\n dat = f.read().decode('iso-8859-1', 'replace')\n results_re = re.compile(\"(?s)<div class='cblock-innercontent'>.*\")\n for match in results_re.finditer(dat):\n res_tab = match.group(0)\n parser.feed(res_tab)\n parser.close()\n break\n if len(results) <= 0:\n break\n page += 1", "def browse_categories():\n print(\"***** Find Businesses by Categories *****\")\n while True:\n print()\n category = input(\n 'Please enter a type of business (category) or type \"back\" or \"quit\": ')\n print()\n if category == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if category == \"back\":\n return\n\n # create a regex pattern for business name\n pattern = r\".*\" + re.escape(category) + r\".*\"\n regx = re.compile(pattern, re.IGNORECASE)\n\n cursor = business_col.find({\"categories\": regx})\n\n business_objects = cursor.limit(10)\n \n if cursor.count() == 0:\n print(\"No businesses found with given category.\")\n continue\n for business_object in business_objects:\n print_business(business_object)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories", "def filter_query(self, form: dict): #-> cursor object\n form = templates.clean_filters(form)\n bitmap_query = templates.bitmap_filter_query(form)\n if not self.client:\n self.connect()\n if bitmap_query:\n print(\"This is the bitmap filter query: \", bitmap_query)\n cursor = self.client.moviebuff.bitmap.find(bitmap_query).limit(25)\n id_list = []\n if cursor:\n for x in cursor:\n id_list.append(x[\"Imdb_Title_id\"])\n order = templates.order_by(form)[\"$orderby\"]\n return self.db.find({\"$query\": { \"Imdb_Title_id\": { \"$in\": id_list}}, \"$orderby\": order})\n\n query = templates.filter_query(form)\n print(\"This is the full filter query: \", query)\n return self.db.find(query).limit(25)", "def filters():\n states = list(storage.all('State').values())\n states.sort(key=lambda state: state.name)\n cities = list(storage.all('City').values())\n cities.sort(key=lambda city: city.name)\n amenities = list(storage.all('Amenity').values())\n amenities.sort(key=lambda amenity: amenity.name)\n\n return render_template('10-hbnb_filters.html', states=states,\n cities=cities, amenities=amenities)", "def filter(self, filters):", "def get_category_list():\n return Category.objects.filter(active=True)", "def starwars_search(self, category, attribute, filters):\n self.load_response(category)\n while self.counter != int(self.response_info['count']):\n self.attribute_search(attribute, filters)\n self.load_next_response()", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def filters(self):\n return self.England_filter", "def with_category(self, category: str) -> list:\n return list(self.__holder.db_tags.filter(\n lambda t: t.category == category))", "async def search_by_product_or_category(\n conn, cursor, product: str = \"\", category: str = \"\"\n) -> List[str]:\n\n if (not product) and (not category):\n filter_term = \"\"\n elif product and category:\n filter_term = (\n f\"\\n WHERE product = '{product}' AND category = '{category}'\"\n )\n elif product:\n filter_term = f\"\\n WHERE product = '{product}'\"\n else:\n filter_term = f\"\\n WHERE category = '{category}'\"\n\n statement = f\"\"\"\n SELECT product.name as product,\n product.description as description,\n product.category as category,\n supplier_product.price as price,\n supplier_product.supplier as supplier,\n supplier_product.price as price,\n product.rating as product_rating,\n supplier.rating as supplier_rating,\n ROUND(((product.rating + supplier.rating)/2),2) as combined_rating,\n product.last_updated as last_updated \n FROM product \n INNER JOIN supplier_product\n ON product.name = supplier_product.product\n INNER JOIN supplier \n ON supplier_product.supplier = supplier.name {filter_term}\n ORDER BY (product.rating + supplier.rating) DESC\n \"\"\"\n await cursor.execute(statement)\n categories = await cursor.fetchall()\n return categories", "def filters(self):\n\t\treturn self.local_filter", "def show(collection, filter = {}):\n # creates a connection with database\n result = []\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"techstart\"]\n col = db[collection]\n for x in col.find(filter):\n result.append(x)\n return result", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # Get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Would you like to see data for Chicago, New York City or Washington?')\n if city.lower() in CITY_DATA:\n break\n print('ERROR: City does not match. Please try again.')\n\n # Get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"Type month (January, February, March, April, May or June) to filter by or type 'all' for no filter\")\n if month.lower() in MONTH_LIST or month.lower() == 'all':\n break\n print(\"ERROR: Input was not a month from January to June nor all. Please try again.\")\n\n # Get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input(\"Type day of the week to filter or type 'all' for no filter\")\n if day.lower() in DAY_LIST or day.lower() == 'all':\n break\n print(\"ERROR: Input was not a day of the week nor all.\")\n\n print('-'*40)\n return city, month, day", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def searchAttributeValues( self, REQUEST=None, category=None, field=None ):\n results = []\n\n if not ( category and field ):\n return results\n\n membership = getToolByName( self, 'portal_membership', None )\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n IsManager = user.IsManager()\n IsAdmin = user.IsAdmin()\n\n prptool = getToolByName( self, 'portal_properties', None )\n interval = prptool and prptool.getProperty( 'created_search_interval' ) or 60\n\n indexes = {}\n indexes['category'] = category\n indexes['created'] = { 'query' : ( DateTime()-interval, DateTime() ), 'range' : 'min:max' }\n\n if not IsAdmin:\n indexes['Creator'] = [ uname ]\n\n found_objects = self.searchResults( meta_type='HTMLDocument', **indexes )\n\n if not found_objects:\n return results\n\n for x in found_objects:\n value = ( str(x['CategoryAttributes'][field]) ).strip()\n if value and value not in ['None'] and value not in results:\n results.append( value )\n\n interrupt_thread( self )\n\n results.sort()\n return results", "def get_filters():\n\n city = prompts.city_prompt.launch()\n\n _filter = prompts.filter_prompt.launch()\n\n if _filter == \"Month\":\n month = prompts.month_prompt.launch()\n day = \"All\"\n\n elif _filter == \"Day\":\n day = prompts.day_prompt.launch()\n month = \"All\"\n\n elif _filter == \"Both\":\n month = prompts.month_prompt.launch()\n day = prompts.day_prompt.launch()\n\n else:\n month, day = \"All\", \"All\"\n\n print(\"-\" * 40)\n return city, month, day", "def _filter(\n self, criteria: Q, offset: int = 0, limit: int = 10, order_by: list = ()\n ) -> ResultSet:\n conn = self.provider.get_connection()\n\n # Build the filters from the criteria\n q = elasticsearch_dsl.Q()\n if criteria.children:\n q = self._build_filters(criteria)\n\n s = (\n Search(using=conn, index=self.model_cls._index._name)\n .query(q)\n .params(version=True)\n )\n\n if order_by:\n s = s.sort(*order_by)\n\n s = s[offset : offset + limit]\n\n # Return the results\n try:\n response = s.execute()\n result = ResultSet(\n offset=offset,\n limit=limit,\n total=response.hits.total.value,\n items=response.hits,\n )\n except Exception as exc:\n logger.error(f\"Error while filtering: {exc}\")\n raise\n\n return result", "def get_queryset(self, **kwargs):\n return Entry.published.filter(categories__slug=self.kwargs['cat_slug'])", "def search(self, filters=None):\n raise NotImplementedError", "def filter():\n\n course = request.args['course-filter']\n\n # Get relevant recipes\n get_recipes = mongo.db.recipes.find({'course': {'$regex': course}})\n\n count_recipes = mongo.db.recipes.count_documents({'course':\n {'$regex': course}})\n\n if course == 'All':\n flash('Here are our all of our recipes:', 'success')\n return redirect(url_for('index'))\n # If there are no recipes with the selected course\n elif count_recipes == 0:\n flash('There are currently no ' + course + ' recipes', 'danger')\n return redirect(url_for('index'))\n else:\n flash('Here are our ' + course + ' recipes:', 'success')\n return render_template('filter.html', title=course + ' Recipes',\n recipes=get_recipes)", "def get_queryset(self):\n ##Check for the url keyword arguments\n q = self.request.QUERY_PARAMS.get('q', None)\n if q:\n return Clip.live.filter(\n Q(title__icontains=q) |\n Q(author__username__icontains=q) |\n Q(categories__slug__in=[q]) |\n Q(tags__name__in=[q]) |\n Q(description__icontains=q)\n ).order_by('-created').distinct()\n\n return Clip.live.all().order_by('-created')", "def get_filterable_queryset(self):\n queryset = super().get_filterable_queryset()\n category_names = get_category_children(self.filterable_categories)\n return queryset.filter(categories__name__in=category_names)", "def FilterItems(self):\r\n\t\treturn self._get_attribute('filterItems')", "def get_queryset(self):\n\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors')\n\n filter_form = self.form\n results = filter_form.filter_queryset(qs)\n ordered_results = filter_form.order_queryset(results).distinct()\n return ordered_results", "async def get_categories_for_filter_menu(language: str):\n try:\n category_filter_query_result = get_db().AQLQuery(\n query=menu_queries.QUERY_CATEGORIES_FOR_LANGUAGE,\n batchSize=500,\n bindVars={\"language\": language},\n )\n return {\"categoryitems\": category_filter_query_result.result}\n\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error", "def visitCriteria(self, ctx: ApiQLParser.CriteriaContext):\n return lmap(lambda c: c.accept(self), ctx.getChildren(self.filter_ignored))", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n global city\n global month\n global day\n global filter_type\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = func.city()\n\n # get user input for month (all, january, february, ... , june)\n # get user input for day of week (all, monday, tuesday, ... sunday)\n filter_list = ['month', 'day', 'both', 'none']\n while True:\n try:\n filter_type = str(input('Would you like to filter the data by month, day, both, or not at all? type \"none\"for no time filter\\n')).lower()\n if filter_type == 'month':\n month = func.month()\n day = None\n break\n\n elif filter_type == 'day':\n day = func.day()\n month = None\n break\n \n elif filter_type == 'both':\n month = func.month()\n day = func.day()\n break\n \n elif filter_type == 'none':\n month = None\n day = None\n break\n\n else:\n print('Please enter the correct filter type.')\n\n except KeyboardInterrupt:\n print('Incorrect value. This is not an option!.')\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n while True:\n city = input('Are you from Washington, New York City or Chicago: ').lower()\n if city in cities:\n break\n print('You selected: ', city)\n#fixed the missing loop and case sensitivity in 'month' and 'day' input\n while True:\n month = input('Which month would you like to filter, choose \"all\" if you do not want to filter: ').lower()\n if month in months:\n break\n print('You selected')\n\n while True:\n day = input('Which day would you like to filter, choose \"all\" if you do not want to filter: ').lower()\n if day in days:\n break\n\n print('-'*40)\n return city, month, day", "def _filter(*args, **kwargs):\n if kwargs.pop('full_output', True):\n return filter(*args, full_output=True, **kwargs)\n return IteratorContextManager(*args, parser_func=filter, **kwargs)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington).\n city = get_city()\n # ask if they want to filter by day or month\n filter = wanna_filter()\n if filter == 'day':\n day = get_day()\n month = ''\n return city, month, day\n elif filter == 'month':\n month = get_month()\n day = ''\n return city, month, day\n else:\n day = ''\n month = ''\n print('-'*40)\n return city, month, day", "def search(self, cursor: sqlite3.Cursor, **kwargs: FilterTypes) -> List[ModelledTable]:\n\n for name, model in self.foreigners.values():\n if name in kwargs and isinstance(kwargs[name], model.record):\n kwargs[model.id_field] = getattr(kwargs[name], model.id_field)\n del kwargs[name]\n\n sql, params = self.where(self.foreigners, kwargs)\n sql = f\"SELECT {self.id_field} FROM [{self.table}] WHERE \" + sql\n\n _LOGGER.debug(sql)\n _LOGGER.debug(params)\n\n cursor.execute(sql, params)\n\n ids = [x[0] for x in cursor.fetchall()]\n\n return list(self.get_many(cursor, *ids).values())", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def _base_proxies_filter(self, category: str, filters: list) -> list:\n\n data_filtered = []\n \n if category == 'country':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=0, filters=filters)\n )\n \n elif category == 'anonymity':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=1, filters=filters)\n )\n\n elif category == 'protocol':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=2, filters=filters)\n )\n \n elif category == 'google_passed':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=3, filters=filters)\n )\n\n return data_filtered", "def cursor(self):\n cursor = Cursor(self, self.__aceQLHttpApi)\n return cursor", "def _cursor_collection(self, cursor):\n ...", "def _cursor_collection(self, cursor):\n ...", "def _filter_entries(self, entries):\n entries = super()._filter_entries(entries)\n if self._filter_categories:\n return list(filter(lambda entry:\n entry.category in self._filter_categories,\n entries))\n return entries", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def filters(self):\n return self._filters", "def test_search_collection_filters():\n col = Collection(search='forest', object_type=['layer'], filters={'provider': 'gee'}, app=['gfw'])\n assert len(col) > 1", "def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]", "def prepare_filter(self, ):\n if not self._parent.connected():\n return\n papers = self._parent.model.list_papers([\"name\"])\n upps = map(lambda a: (a[\"id\"], a[\"name\"]), papers)\n accounts = self._parent.model.list_accounts([\"name\"])\n uaccs = map(lambda a: (a[\"id\"], a[\"name\"]), accounts)\n self.dialog.update_widget(count_range = self._parent.model.get_deals_count_range(),\n price_range = self._parent.model.get_deals_price_range(),\n comm_range = self._parent.model.get_deals_commission_range(),\n volume_range = self._parent.model.get_deals_volume_range(),\n stock_list = upps,\n accounts_list = uaccs)", "def get_queryset(self):\n coachings_pk = []\n queryset = Coaching.objects.all()\n print(self.request.query_params)\n course = self.request.query_params.get('course')\n fee = self.request.query_params.get('feet')\n course_queryset = Course.objects.filter(Q(fees__lte=int(fee))|Q(stream__contains='science'))[:5]\n if course_queryset is not None:\n for course in course_queryset:\n course_branch_caoching_pk = course.branch.coaching.id\n coachings_pk.append(course_branch_caoching_pk)\n coaching_queryset = Coaching.objects.filter(pk__in=coachings_pk)\n queryset = coaching_queryset\n \n return queryset", "def filters(self):\n return self.__filters", "def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n city = get_city_from_user()\n print('-' * 10)\n\n # get user input for month (all, january, february, ... , june)\n month = get_month_from_user()\n print('-' * 10)\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = get_day_of_week_from_user()\n print('-' * 40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US Bikeshare data!')\n city, month, day = \"\", \"\", \"\"\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while city not in CITY_DATA.keys():\n city = input(\"Would you like to see data for Chicago, New York City, or Washington?\\n\").lower()\n\n # get user input for month (all, january, february, ... , june)\n while month not in months:\n month = input(\"Which month - January, February, March, April, May, June, or All?\\n\").lower()\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while day not in days:\n day = input(\"Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday, or All?\\n\").lower()\n\n print('-' * 40)\n return city, month, day", "def get_cursor(self, *args, **kwargs):", "def get_category_recipe(filters, db_conn, host_url):\n try:\n result = db_conn[\"recipes\"].find(\n filters).sort(\"createdOn\", -1).limit(9)\n recipe_list = map_response(result, host_url)\n\n return recipe_list\n\n except Exception as e:\n print(e)\n return {\"success\": False, \"message\": \"Error in api: \" + str(e)}", "def get_criteria_by_hostname_job_category(self, hostname: str, job: str, category: str) -> List[Dict[str, str]]:\n with self.lock:\n host_query_infos = self.host_query_info.all()\n for host in host_query_infos:\n if host['hostname'] == hostname and host['job'] == job:\n for item in host['filters']:\n if item['category'] == category:\n return item['criteria']\n return None", "def queryComponent(type=None, filter=None, all=0):", "def list_container(cls, context, filters=None,\n limit=None, marker=None,\n sort_key=None, sort_dir=None):\n dbdriver = get_instance()\n return dbdriver.list_container(\n context, filters, limit, marker, sort_key, sort_dir)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n #Invalid input is administered to by using a while loop.\n while True:\n city=input(\"Choose a city name between Chicago, New York City or Washington:!\").lower()\n if city not in CITY_DATA:\n print(\"\\n Not a valid city\\n\")\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try\n month=str(input('Enter name of one month(from January to June) to filter by or \"all\" ,for no filter :')).lower()\n months=['january', 'february', 'march', 'april', 'may', 'june']\n if month == 'january':\n month = months[0]\n elif month == 'february':\n month = months[1]\n elif month == 'march':\n month = months[2]\n elif month == 'april':\n month = months[3]\n elif month == 'may':\n month = months[4]\n elif month == 'june':\n month = months[5]\n elif month == 'all':\n print('all')\n else:\n raise(Exception)\n\t\t\texcept Exception as error:\n print('Invalid Input!,please restart again!.')", "def get_interests(self):\n cur = self.conn.cursor(pymysql.cursors.DictCursor)\n\n cur.execute('SELECT id, name FROM Interests ORDER BY sort_order;')\n\n return CursorIterator(cur)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n \n while True:\n city=input('Please enter the city you would want to explore! forexample washington, chicago or new york city \\n').lower()\n if city in CITY_DATA:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n \n while True:\n month=input('Which month are you interested in (You can type january,february....june or all for none) \\n').lower()\n \n if month.isalpha():\n if month in months:\n break\n if month =='all':\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n \n while True:\n day=input('Now provide the day of the week you are intrested in... you can user monday,truesday...sunday \\n').lower()\n if day in days:\n break\n if day=='all':\n break\n\n print('-'*40)\n return city, month, day", "async def filter(self, **kwargs):\n\n pass", "def curate_filter_info(self):\n filter_list = [\n self.sample_name, self.final_id, self.all_variant_count,\n self.filter_min_depth_count, self.filter_max_depth_count,\n self.filter_common_var_count, self.log_mut_count,\n self.cosmic_variant_counts, self.unknown_maf_count\n ]\n return filter_list", "def get_category_list(self, category):\r\n try:\r\n conn = self.create_connection()\r\n query = \"\"\"SELECT sub_category \r\n\t\t\t\t\t FROM categories\r\n\t\t\t\t\t WHERE category = '%s'\"\"\"%(category)\r\n file_list = pd.read_sql(query, conn).iloc[:, 0].values.tolist()\r\n conn.close()\r\n except (psycopg2.Error, ValueError):\r\n print(\"Error at get_category_list, check connection or query\")\r\n return file_list", "def _filter(\n self, criteria: Q, offset: int = 0, limit: int = 10, order_by: list = ()\n ) -> ResultSet:\n conn = self._get_session()\n qs = conn.query(self.model_cls)\n\n # Build the filters from the criteria\n if criteria.children:\n qs = qs.filter(self._build_filters(criteria))\n\n # Apply the order by clause if present\n order_cols = []\n for order_col in order_by:\n col = getattr(self.model_cls, order_col.lstrip(\"-\"))\n if order_col.startswith(\"-\"):\n order_cols.append(col.desc())\n else:\n order_cols.append(col)\n qs = qs.order_by(*order_cols)\n qs_without_limit = qs\n qs = qs.limit(limit).offset(offset)\n\n # Return the results\n try:\n items = qs.all()\n result = ResultSet(\n offset=offset, limit=limit, total=qs_without_limit.count(), items=items\n )\n except DatabaseError as exc:\n logger.error(f\"Error while filtering: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return result", "def get_categories():\n return session.query(Category)", "def fusion_api_get_authorization_category_actions(self, api=None, headers=None, resource_uri='', sessionID=None,):\n param = '/category-actions%s' % resource_uri\n return self.auth.get(api=api, param=param, headers=headers, sessionID=sessionID)", "def search_current_auctions(request):\n query = request.GET.get('q')\n auction = Auction.objects.all()\n\n if query:\n results = auction.filter(Q(antiques__name__icontains=query) | Q(antiques__description__icontains=query))\n\n else:\n results = Auction.objects.all()\n\n pages = pagination(request, results, num=4)\n context = {\n 'items': pages[0],\n 'page_range': pages[1]\n }\n\n return render(request, \"showallauctions.html\", context)", "def get_list_filters(self):\n # look in session for the saved search...\n filters = ListFilter()\n filters.get_list_filter(self.table)\n return filters", "def get_filters():\r\n print('Hello! Let\\'s explore some US bikeshare data!\\n')\r\n\r\n # ref https://stackabuse.com/getting-user-input-in-python/\r\n\r\n # Get user input for city (chicago, new york city, washington).\r\n cities = ['Chicago', 'New York city', 'Washington']\r\n city = get_user_input(cities,\"city\")\r\n\r\n # Get user input for month (all, january, february, ... , june)\r\n months = ['All', 'Jan', 'Feb', 'Mar', 'Apr', 'Jun']\r\n month = get_user_input(months,\"month\")\r\n\r\n # Get user input for day of week (all, monday, tuesday, ... sunday)\r\n days = ['All', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\n day = get_user_input(days,\"day\")\r\n\r\n print('-'*40)\r\n return city, month, day", "def get_queryset(self, *args, **kwargs):\n post = Post.objects.all()\n category = self.request.GET.get('category')\n print('this is the category', category)\n if category:\n query_list = post.filter(\n Q(category__icontains=category) |\n Q(category__iexact=category)\n ).distinct()\n print('the category show')\n else:\n query_list = Post.objects.all()\n return query_list", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('Would you like to see data for Chicago, New York City, Washington?').lower()\n\n print(city)\n\n while city not in ['chicago','washington','new york city']:\n city = input('Input available name of the city').lower()\n continue\n\n\n # TO DO: get user input for month (all, january, february, ... , june)\n\n month = input('choose month from January, Febraury, March, April, May, June or all').lower()\n while month not in ['january','febraury','march','april','may','june','all'] :\n month = input('Please input available month name')\n continue\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n day = input('choose day of week or all').lower()\n while day not in ['sunday','monday','tuesday','wednesday','thursday','friday','saturday', 'all']:\n day = input('please input day of week')\n\n print('-'*40)\n return city, month, day", "def searchRecords(self, filterChoice, keyword):\r\n session = wx.GetApp().session\r\n model = getattr(db, self.modelName)\r\n\r\n result = None\r\n if filterChoice == \"Person\":\r\n qry = session.query(model)\r\n logging.debug(qry)\r\n result = qry.filter(db.Person.full_name.contains('%s' % keyword))\r\n\r\n result = result.all()\r\n\r\n logging.debug(result)\r\n return result", "def keywords_of_section(self, section, kwfilter):\n pcat = getToolByName(section, 'portal_catalog')\n cat = pcat._catalog\n path_idx = cat.indexes[self.path_index]\n tags_idx = cat.indexes[self.keyword_index]\n result = []\n # query all oids of path - low level\n pquery = {\n self.path_index: {\n 'query': '/'.join(section.getPhysicalPath()),\n 'depth': -1,\n }\n }\n kwfilter = safe_encode(kwfilter)\n # uses internal zcatalog specific details to quickly get the values.\n path_result, info = path_idx._apply_index(pquery)\n for tag in tags_idx.uniqueValues():\n if kwfilter and kwfilter not in safe_encode(tag):\n continue\n tquery = {self.keyword_index: tag}\n tags_result, info = tags_idx._apply_index(tquery)\n if intersection(path_result, tags_result):\n result.append(tag)\n # result should be sorted, because uniqueValues are.\n return safe_simplevocabulary_from_values(result)", "def get_courses_by_query(self, query: str, *filters: str, \n year=None) -> List[Course]:\n\n url = self._URL + \"search\"\n\n payload = {\n \"view\": \"xml-20200810\",\n \"filter-coursestatus-Active\": \"on\",\n \"q\": query,\n }\n payload.update({f: \"on\" for f in filters})\n if year:\n payload.update({\"academicYear\": year.replace('-', '')})\n\n res = self._session.get(url, params=payload)\n\n root = ET.fromstring(res.content)\n courses = root.findall(\".//course\")\n\n return [Course(course) for course in courses]", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = str(input(\"Please enter the city name: \\n(Chicago or New york city or Washington)\")).lower()\n while city not in CITY_DATA.keys():\n print(\"Invalid Input.\")\n city = str(input(\"Please enter the city name: (chicago or new york city or washington)\")).lower()\n\n month = str(input(\"To filter by month please enter the month name \\n(January, February, March, \"\n \"April, May, June)\\nor all for not filtering by month: \")).lower()\n while month not in months:\n print(\"Invalid Input.\")\n month = str(input(\"To filter by month please enter the month name \\n(January, February, March, \"\n \"April, May, June)\\nor all for not filtering by month: \")).lower()\n\n day = str(input(\"To filter by day please enter the day name \\n(Saturday, Sunday, Monday,\"\n \" Tuesday, Wednesday, Thursday, Friday)\\nor all for not filtering by day: \")).lower()\n while day not in days:\n print(\"Invalid Input.\")\n day = str(input(\"To filter by day please enter the day name \\n(Saturday, Sunday, Monday,\"\n \" Tuesday, Wednesday, Thursday, Friday)\\nor all for not filtering by day: \")).lower()\n\n\n print('-'*40)\n return city, month, day", "def oreDbQuery():\n # TODO: Change TABLE name to the Crop Slection table that has not yet been Created :-(\n # TODO: Currently using the old DB for Crop Lookup table\n\n c.execute('SELECT DISTINCT Crop, GrpNo, GrpName, SubGrpNo, SubGrpName, Category FROM CCA')\n\n return c.fetchall()", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('Would you like to see data for Chicago, New York City, or Washington? ')\n city = city.lower()\n while city not in CITY_DATA:\n city = input('Not a valid name! Try again!')\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('Which month? January, February, March, April, May, or June? Type \"all\" for all months ')\n month = month.lower()\n while month not in months:\n if month == 'all':\n break\n else:\n month = input('Not a valid month! Try again!')\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('Which day of week? Type \"all\" for every weekday ')\n day = day.lower()\n while day not in days:\n if day == 'all':\n break\n else:\n day = input('Not a valid day! Try again!')\n print('-'*40)\n return city, month, day", "def ls(filter=None):", "def get_cards_by_kwargs(self,**kwargs):\n\n card_ids = []\n\n for category,values in kwargs.items():\n\n if not category in self._category2id:\n print \"Category [{}] not found, skipping!\".format(category)\n continue\n\n try:\n card_ids.append(self._category2id[category][values].values())\n except:\n print 'No cards with constraint - {}:{}'.format(category,values)\n return None\n\n return [self._id2database[card_id] for card_id in intersect(*card_ids)]", "def step_filter(self, qs):\n return qs", "def cursor(self):\n with self.conn as c:\n yield c.cursor()", "def cursor(self):\n with self.conn as c:\n yield c.cursor()", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def get_queryset(self):\n category_qs = Category.objects \\\n .select_related('theme') \\\n .order_by('theme__name', 'name')\n\n base_qs = Aid.objects \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors') \\\n .prefetch_related(Prefetch('categories', queryset=category_qs))\n\n user = self.request.user\n if user.is_authenticated and user.is_superuser:\n qs = base_qs\n elif user.is_authenticated:\n q_published = Q(status='published')\n q_is_author = Q(author=user)\n qs = base_qs.filter(q_published | q_is_author)\n else:\n qs = base_qs.published()\n\n return qs", "def get_recipies_from_category(category_id,\n usertype=1,\n page=0,\n count=0,\n session=None):\n if session is None:\n session = requests.Session()\n payload = {'categoryId': category_id,\n 'usertype': usertype,\n 'page': page,\n 'offset': count}\n response = session.get(\n 'https://cms.sortedfood.com/apiRecipe/getFeaturedByUsertype',\n params=payload)\n if response.status_code != 200:\n raise response.HTTPError(response)\n response.encoding = 'utf-8'\n return json.loads(response.text)", "def get_all_categorizations():\n try:\n query = run_select_query('SELECT id, category_name, category_item_type, name \\\n FROM categorization \\\n JOIN item ON (categorization.item_id = item.id)')\n return [{'id': obj[0], 'category_name': obj[1], 'category_item_type': obj[2].capitalize(), 'item_name': obj[3]} for obj in query]\n except (Exception, psycopg2.Error) as error:\n return {\"status\": \"error\", \"error\": error}", "def apply_filters(filters, items):\n return scom.apply_filters(filters, items)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n cities = ('chicago', 'new york', 'washington')\n while True:\n city = input(\"Which city would you like to look at? Chicago, New York, or Washington? \\n\").lower()\n\n if city in cities:\n break\n\n # get user input for month (all, january, february, ... , june)\n months = ('all', 'january', 'february', 'march', 'april', 'may', 'june')\n while True:\n month = (input(\"Which month would you like to look at? You can choose january through june. Or type 'all' for all of them\\n\")).lower()\n\n if month in months:\n break\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n days = ('all', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday')\n while True:\n day = (input(\"Which day would you like to select? Type in the name of the day or all to select all days. \\n\")).lower()\n\n if day in days:\n break\n\n print('-'*40)\n return city, month, day", "def misc_search(self, kwargs):\n attr = kwargs[\"attributes\"]\n filter_ = kwargs[\"filter\"]\n\n try:\n if attr and attr != \"ALL\":\n results = self.engine.query(filter_, attr.split(\",\"))\n else:\n results = self.engine.query(filter_)\n self.display(results, True)\n except PyAsn1UnicodeDecodeError as e:\n error(f\"Decoding error with the filter: {e}\")\n except Exception as e:\n if e.__str__() == \"\":\n error(\"An exception occurred with the provided filter\")\n else:\n error(e)", "def get_context_data(self, **kwargs):\n\n context = super(FilteredActionListView, self).get_context_data(**kwargs)\n\n #-- Process categories\n try:\n query = self.request.GET['q'].strip()\n context['filter_query'] = query\n\n except KeyError:\n #OK: q is not among GET parameters\n pass\n\n #-- Process categories\n try:\n cat_pks = self.request.GET['cat_pks'].split(SEP)\n context['filter_categories'] = ActionCategory.objects.filter(pk__in=cat_pks)\n\n except KeyError:\n #OK: cat_pks is not among GET parameters\n pass\n\n #-- Process geonames\n try:\n geo_pks = self.request.GET['geo_pks'].split(SEP)\n context['filter_geonames'] = Geoname.objects.filter(pk__in=geo_pks)\n\n except KeyError:\n #OK: geo_pks is not among GET parameters\n pass\n\n #-- Process politicians\n try:\n pol_pks = self.request.GET['pol_pks'].split(SEP)\n context['filter_politicians'] = Politician.objects.filter(pk__in=pol_pks)\n\n except KeyError:\n #OK: pol_pks is not among GET parameters\n pass\n\n return context", "def getCbsdRecords(self, filters=[]):\n return self._getRecords('cbsd', filters)" ]
[ "0.5364717", "0.53382677", "0.5266764", "0.5199028", "0.51203734", "0.51203734", "0.5113741", "0.508854", "0.5086552", "0.5083298", "0.50145316", "0.49857625", "0.4980039", "0.49589002", "0.49563545", "0.49544093", "0.49255437", "0.48955005", "0.48900995", "0.48809904", "0.48734722", "0.4855942", "0.4836454", "0.47952694", "0.47769836", "0.47742456", "0.47742456", "0.47673994", "0.47592837", "0.47563037", "0.47553512", "0.47549483", "0.4751879", "0.4751167", "0.47438857", "0.47326368", "0.47292548", "0.47204143", "0.47194308", "0.47160903", "0.47089213", "0.4708227", "0.4696775", "0.46846676", "0.46810636", "0.4676743", "0.46734795", "0.4672842", "0.4672842", "0.46652552", "0.46634167", "0.46621373", "0.46557885", "0.46542", "0.46416008", "0.46276447", "0.46203867", "0.46195072", "0.4619198", "0.46189383", "0.46097034", "0.4609644", "0.46077517", "0.4601761", "0.46010998", "0.45955658", "0.4594301", "0.45937595", "0.4587869", "0.45856208", "0.45815498", "0.45761612", "0.45755225", "0.4574312", "0.45720813", "0.45710596", "0.45708507", "0.45699662", "0.45688158", "0.45685032", "0.45678896", "0.456153", "0.45587447", "0.45549273", "0.45424792", "0.45262957", "0.4523604", "0.45232776", "0.45168203", "0.45168203", "0.45155048", "0.451208", "0.45108694", "0.45096964", "0.4508295", "0.45000342", "0.44998157", "0.44939363", "0.44934198", "0.44895983" ]
0.6988995
0
Synchronize IDA's cursor with GDB
def j(*args): try: pc = int(gdb.selected_frame().pc()) pwndbg.ida.Jump(pc) except Exception: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_pdb():\r\n import ctypes\r\n ctypes.windll.kernel32.AllocConsole()\r\n import sys\r\n sys.stdout = open('CONOUT$', 'wt')\r\n sys.stdin = open('CONIN$', 'rt')\r\n import pdb\r\n pdb.set_trace()", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def set_trace():\n import pdb\n import sys\n stdout = sys.stdout\n sys.stdout = sys.__stdout__\n pdb.Pdb().set_trace(sys._getframe().f_back)", "def run_ipdb(_step):\r\n import ipdb\r\n ipdb.set_trace()\r\n assert True", "def enter_pdb():\n import sys, pdb\n sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__\n pdb.set_trace()", "def gdb(*args):\n _gdb_python_call_gen('gdb', *args)()", "def debug():\n # written before I knew about the pdb module\n caller = currentframe().f_back\n method_name = caller.f_code.co_name\n line_no = getframeinfo(caller).lineno\n print(method_name + \": line \" + str(line_no))\n code.interact(local=dict(globals(), **caller.f_locals))", "def dbtrace_ui():\n\n pass", "def pdb(item, item2=None):\n import pdb # noqa\n pdb.set_trace() # noqa", "def set_trace():\r\n # without this in iPython debugger can generate strange characters.\r\n from IPython.core.debugger import Pdb\r\n Pdb().set_trace(sys._getframe().f_back)", "def _debug_trace():\n from PyQt4.QtCore import pyqtRemoveInputHook\n from pdb import set_trace\n pyqtRemoveInputHook()\n set_trace()", "def set_trace():\n Bdb().set_trace()", "def cursor_placement_thread(self):\r\n while self.editing:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n curses.curs_set(2)\r\n self.win.touchwin()\r\n self.win.refresh()\r\n time.sleep(0.1)\r\n curses.curs_set(0)", "def rpdb_set_trace(log=None):\n import rpdb\n import subprocess\n\n ip=subprocess.check_output([\"hostname\", \"-i\"]).strip()\n port=4444\n print \"connect to rpdb remotely with: nc %s %d # Control-C to exit nc\" % (ip, port)\n if log:\n log.warn(\"connect to rpdb remotely with: nc %s %d # Control-C to exit nc\" % (ip, port))\n debugger = rpdb.Rpdb(ip, port)\n debugger.set_trace()", "def cursor_set():\n print(\"\\033[0;0H\")", "def do_debug(self, arg):\n orig_trace = sys.gettrace()\n if orig_trace:\n sys.settrace(None)\n globals = self.curframe.f_globals\n locals = self.curframe_locals\n Config = self.ConfigFactory\n\n class PdbppWithConfig(self.__class__):\n def __init__(self_withcfg, *args, **kwargs):\n kwargs.setdefault(\"Config\", Config)\n super(PdbppWithConfig, self_withcfg).__init__(*args, **kwargs)\n\n # Backport of fix for bpo-31078 (not yet merged).\n self_withcfg.use_rawinput = self.use_rawinput\n\n local.GLOBAL_PDB = self_withcfg\n local.GLOBAL_PDB._use_global_pdb_for_class = self.__class__\n\n prev_pdb = local.GLOBAL_PDB\n p = PdbppWithConfig(self.completekey, self.stdin, self.stdout)\n p._prompt = \"({}) \".format(self._prompt.strip())\n self.message(\"ENTERING RECURSIVE DEBUGGER\")\n self._flush_sticky_messages()\n try:\n with self._custom_completer():\n sys.call_tracing(p.run, (arg, globals, locals))\n except Exception:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())\n finally:\n local.GLOBAL_PDB = prev_pdb\n self.message(\"LEAVING RECURSIVE DEBUGGER\")\n\n if orig_trace:\n sys.settrace(orig_trace)\n self.lastcmd = p.lastcmd", "def debugger(self, force=False):\r\n from IPython.utils.warn import error\r\n if not (force or self.call_pdb):\r\n return\r\n\r\n if not hasattr(sys, 'last_traceback'):\r\n error('No traceback has been produced, nothing to debug.')\r\n return\r\n\r\n from pudb import pm\r\n\r\n with self.readline_no_record:\r\n pm()", "def test(self):\n self.gdb.b(\"main:start\")\n self.gdb.c()\n self.gdb.p(\"i=123\")\n self.gdb.c(wait=False)\n time.sleep(0.1)\n output = self.gdb.interrupt()\n assert \"main\" in output\n assertGreater(self.gdb.p(\"j\"), 10)\n self.gdb.p(\"i=0\")\n self.exit()", "def resetCursor():\n print(\"\\u001b[?0l\", end='')", "def setDefaultCursorPosition(self):\n self.srcEditor.setFocus()\n self.srcEditor.setCursorPosition(0,0)", "def change_cursor(self, cursor):\n self.setCursor(cursor)", "def test(self):\n self.gdb.c(wait=False)\n time.sleep(0.5)\n self.gdb.interrupt()\n output = self.gdb.command(\"p/x *(int*)(((char*)&data)-0x80000000)\")\n assertIn(\"0xbead\", output)", "def cursor():\n dbh = handle()\n return dbh.cursor()", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message =\"Signal received : entering python shell.Traceback:\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message = \"Signal received : entering python shell.\\nTraceback:\\n\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def _set_cursor(self, cursor):\n self._cursor = cursor", "def pdb_option(args, run):\n run.pdb = True", "def switch_to_ipython_env():\n # Save GDB's excepthook\n saved_excepthook = sys.excepthook\n # Switch to default stdout/stderr\n with pwndbg.lib.stdio.stdio:\n yield\n # Restore Python's default ps1, ps2, and excepthook for GDB's `pi` command\n sys.ps1 = \">>> \"\n sys.ps2 = \"... \"\n sys.excepthook = saved_excepthook", "def msg_console_switched(self, msg):\r\n #update the paused/line number markers\r\n self.frame.notebook.UpdatePauseMarkers()\r\n\r\n #update the bp markers in the editor pages\r\n pages = self.frame.notebook.GetAllPages()\r\n for page in pages:\r\n page.UpdateBreakpointSymbols()", "def debugger(self):\n\n if not self.rc.pdb:\n return\n pdb.pm()", "def test_cle_gdb():\n mappath = os.path.join(test_location, \"../test_data/test_gdb_plugin/procmap\")\n p = angr.Project(binpath, load_options={\"gdb_map\":mappath})\n check_addrs(p)", "def _D(stdscr):\n curses.nocbreak()\n stdscr.keypad(0)\n curses.echo()\n curses.endwin()\n import pdb; pdb.set_trace()", "def attach_gdb(p, commands=None):\n val = \"\"\"\n \"\"\" if commands is None else commands\n return gdb.attach(p, val)", "def updateDisplay(self):\n\t\tregion = braille.handler.mainBuffer.regions[-1] if braille.handler.mainBuffer.regions else None\n\t\tif isinstance(region, braille.TextInfoRegion):\n\t\t\tbraille.handler._doCursorMove(region)", "def append_cursor_enter_callback(self):", "def wait_cursor(win):\n win.app.setOverrideCursor(gui.QCursor(core.Qt.WaitCursor))\n yield\n win.app.restoreOverrideCursor()", "def pm(conn):\n #pdb.post_mortem(conn.root.getconn()._last_traceback)\n redir = redirected_stdio(conn)\n try:\n conn.modules.pdb.post_mortem(conn.root.getconn()._last_traceback)\n finally:\n redir.restore()", "def current_line_preserved():\n\n current_line = get_current_line_number() + 1\n yield\n vim.command('{0}'.format(current_line))", "def __setCursor(self, id=None):\n if self.__currentCursor != id: # Avoid redundant calls\n if id:\n self.drawingSurface.SetCursor(wx.StockCursor(id))\n else:\n self.drawingSurface.SetCursor(wx.NullCursor)\n self.__currentCursor = id", "def on_stop(self, event=None):\n try:\n f = gdb.selected_frame()\n except gdb.error:\n # If we get an error here, just return\n return\n\n cmds = Cmds()\n\n # Main job of the plug-in: Open the debugged file in a split and focus\n # vim on the correct line.\n sal = f.find_sal()\n if sal and sal.symtab:\n filename = sal.symtab.filename\n cmds.append(Cmd.focus_on(self.code_window))\n\n if P.exists(filename):\n if self.current_filename != filename:\n cmds.append(Cmd.edit_file(filename))\n self.current_filename = filename\n\n # Go to line and center\n cmds.append(Cmd.center_on_line(sal.line))\n buf = self.code_window.buffer\n buf.clear_highlight(self.hl_source)\n buf.add_highlight(\"NvgdbCurrent\", sal.line - 1, 0, -1, src_id=self.hl_source)\n\n # Allow every extension to register commands\n for ext in self.extensions:\n cmds += ext.on_stop(self, event)\n\n # Focus on the main window\n cmds.append(Cmd.focus_on(self.main_window))\n\n cmds.run(self.nvim)\n return cmds", "def enable_gdb(node, separate_window=True, auto_start=True):\n Helpers.__warn_if_not_empty(node)\n if separate_window:\n node.prefix = \"xterm -e gdb {}--args\".format(\"-ex run \" if auto_start else \"\")\n else:\n node.prefix = \"gdb {}--args\".format(\"-ex run \" if auto_start else \"\")", "def after_cursor_next(self, cursor):\n pass", "def console():\n repl(click.get_current_context())", "def dispatch_line(self, frame):\n if self.stop_here(frame) or self.break_here(frame):\n self.user_line(frame)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch", "def set_cursor(self, cursor):\n for step in self.steps:\n step[1].set_cursor(cursor)\n return self", "def set_step(self):\n super(Pdb, self).set_step()\n if hasattr(self, \"_set_trace_use_next\"):\n del self._set_trace_use_next\n self.set_next(self._via_set_trace_frame)", "def gdb_init(*args):\n _gdb_python_call_gen('gdb_init', *args)()", "def excepthook(self, type, value, tb):\n \n self.InteractiveTB(type, value, tb, tb_offset=0)\n if self.InteractiveTB.call_pdb and self.has_readline:\n self.readline.set_completer(self.Completer.complete)", "def cursor(self, cursor):\n\n self._cursor = cursor", "def set_cursor(self, row, col):\n self._vim.current.window.cursor = (row, col)", "def test_qt_cursor(self):\n text_edit = QtWidgets.QPlainTextEdit()\n ring = QtKillRing(text_edit)\n\n ring.kill('foo')\n ring.kill('bar')\n ring.yank()\n text_edit.moveCursor(QtGui.QTextCursor.Left)\n ring.rotate()\n self.assertEqual(text_edit.toPlainText(), 'bar')", "def idb_excepthook(type, value, tb):\n if hasattr(sys, \"ps1\") or not sys.stderr.isatty():\n sys.__excepthook__(type, value, tb)\n else:\n traceback.print_exception(type, value, tb)\n print\n pdb.pm()", "def user_line(self, frame):\r\n if \"__exc_tuple__\" in frame.f_locals:\r\n del frame.f_locals['__exc_tuple__']\r\n\r\n if self._wait_for_mainpyfile:\r\n if (self.mainpyfile != self.canonic(frame.f_code.co_filename)\r\n or frame.f_lineno <= 0):\r\n return\r\n self._wait_for_mainpyfile = False\r\n self.bottom_frame = frame\r\n\r\n if self.get_break(self.canonic(frame.f_code.co_filename), frame.f_lineno):\r\n self.current_bp = (\r\n self.canonic(frame.f_code.co_filename), frame.f_lineno)\r\n else:\r\n self.current_bp = None\r\n self.ui.update_breakpoints()\r\n\r\n self.interaction(frame)", "def resetCursor(self):\n self.personalDataList.viewport().setCursor(QtCore.Qt.ArrowCursor)\n self.personalDataList.areaClicked.disconnect()", "def interact(self, prompt='debug> '):\r\n msg = 'Entering Octave Debug Prompt...\\n%s' % prompt\r\n self.stdout.write(msg)\r\n while 1:\r\n inp_func = input if not PY2 else raw_input\r\n try:\r\n inp = inp_func() + '\\n'\r\n except EOFError:\r\n return\r\n if inp in ['exit\\n', 'quit\\n', 'dbcont\\n', 'dbquit\\n']:\r\n inp = 'return\\n'\r\n self.write('disp(char(3));' + inp)\r\n if inp == 'return\\n':\r\n self.write('return\\n')\r\n self.write('clear _\\n')\r\n self.readline()\r\n self.readline()\r\n if not pty is None:\r\n self.readline()\r\n self.write('disp(char(3))\\n')\r\n return\r\n self.expect('\\x03')\r\n self.stdout.write(self.expect(prompt))", "def shel(line):\n get_ipython().run_line_magic('autocall', '1')\n get_ipython().run_line_magic('rehashx', '')", "def show_cursor():\n ret = mouse.SDL_ShowCursor(SDL_ENABLE)\n if ret < 0:\n raise_sdl_err(\"showing the mouse cursor\")", "def hideCursor():\n print(\"\\u001b[?25l\", end='')", "def set_cursor(obj: QObject, cursor: QCursor = Qt.PointingHandCursor) -> None:\n obj.setCursor(QCursor(cursor))", "def on_debug(word, word_eol, userdata):\n event_text = LOG_FORMAT.format('DBG', 'network', 'channel', 'rank', 'nickname', 'phrase')\n context = highlights_tab()\n context.prnt(event_text)\n return hexchat.EAT_ALL", "def cursor_set(self, yes: bool = True) -> None:\n unicurses.curs_set(False)", "def do_grip():\n rospy.sleep(1.0)\n assert right_gripper.is_ready()\n right_gripper.open()\n rospy.sleep(2.0)\n assert right_gripper.is_ready()\n right_gripper.close()\n for _ in range(3):\n if right_gripper.is_gripping() and not right_gripper.get_force() > 0:\n break\n right_gripper.open()\n rospy.sleep(2)\n right_gripper.close()", "def _update_cursor(self) -> None:\n # get the brush size (get a local reference in case another process\n # changes it between the different accesses in this method)\n brush_size = self.brush_size\n # if there is not update, return\n if not self.is_cursor_change:\n return\n # otherwise dequeue the update\n self.is_cursor_change = False\n # make a static border ring for the cursor\n ring = make_ring(brush_size - 1, brush_size)\n cursor = make_cursor(ring, self._brush_border_color)\n # make a circle with the current color\n brush_circle = make_circle(brush_size) - ring\n cursor = cursor + make_cursor(brush_circle, self._color)\n # create the pyglet cursor object and set it\n mouse = pyglet_cursor(cursor)\n self._view.set_cursor(mouse)", "def move3dCursor(p = (0,0,0)):\n bpy.context.scene.cursor_location = p\n # bpy.context.space_data.cursor_location = p", "def test_controller() -> None:\n\n # Initialize object that manages gdb subprocess\n gdbmi = GdbController()\n\n c_hello_world_binary = _get_c_program(\"hello\", \"pygdbmiapp.a\")\n\n if USING_WINDOWS:\n c_hello_world_binary = c_hello_world_binary.replace(\"\\\\\", \"/\")\n # Load the binary and its symbols in the gdb subprocess\n responses = gdbmi.write(\n \"-file-exec-and-symbols %s\" % c_hello_world_binary, timeout_sec=1\n )\n\n # Verify output was parsed into a list of responses\n assert len(responses) != 0\n response = responses[0]\n assert set(response.keys()) == {\"message\", \"type\", \"payload\", \"stream\", \"token\"}\n\n assert response[\"message\"] == \"thread-group-added\"\n assert response[\"type\"] == \"notify\"\n assert response[\"payload\"] == {\"id\": \"i1\"}\n assert response[\"stream\"] == \"stdout\"\n assert response[\"token\"] is None\n\n responses = gdbmi.write([\"-file-list-exec-source-files\", \"-break-insert main\"])\n assert len(responses) != 0\n\n responses = gdbmi.write([\"-exec-run\", \"-exec-continue\"], timeout_sec=3)\n\n # Test GdbTimeoutError exception\n with pytest.raises(GdbTimeoutError):\n gdbmi.get_gdb_response(timeout_sec=0)\n\n # Close gdb subprocess\n gdbmi.exit()\n assert gdbmi.gdb_process is None\n\n # Test NoGdbProcessError exception\n got_no_process_exception = False\n try:\n responses = gdbmi.write(\"-file-exec-and-symbols %s\" % c_hello_world_binary)\n except OSError:\n got_no_process_exception = True\n assert got_no_process_exception is True\n\n # Respawn and test signal handling\n gdbmi.spawn_new_gdb_subprocess()\n responses = gdbmi.write(\n \"-file-exec-and-symbols %s\" % c_hello_world_binary, timeout_sec=1\n )\n responses = gdbmi.write([\"-break-insert main\", \"-exec-run\"])", "def bp_ins(filename, start, end):\n with open(filename, 'r') as f:\n lines = f.readlines()\n lines.insert(start-1, \"\")\n lines.insert(end+1, \"\")\n lines.insert(0, \"\")\n lines[start-1] = 'ipdb.set_trace()\\n'\n lines[end+1] = 'ipdb.set_trace()\\n'\n lines[0] = \"import ipdb\\n\"\n with open(f\"break_{filename}\", 'w+') as f:\n f.writelines(lines)", "def setup_debugging():\n import sys\n sys.path.append('/root/pycharm-debug-py3k.egg')\n import pydevd\n pydevd.settrace('192.168.4.47', port=5422, stdoutToServer=True, stderrToServer=True, suspend=False)", "def set_trace(self, frame=None):\n if getattr(local, \"_pdbpp_completing\", False):\n # Handle set_trace being called during completion, e.g. with\n # fancycompleter's attr_matches.\n return\n if self.disabled:\n return\n\n if frame is None:\n frame = sys._getframe().f_back\n self._via_set_trace_frame = frame\n self._stopped_for_set_trace = False\n\n self.start_filename = frame.f_code.co_filename\n self.start_lineno = frame.f_lineno\n\n return super(Pdb, self).set_trace(frame)", "def set_cursor_values(self, pairs: Mapping[str, str]) -> None:", "def debug():", "def _patch_readline_for_pyrepl(self):\n uses_pyrepl = self.fancycompleter.config.readline != sys.modules[\"readline\"]\n\n if not uses_pyrepl:\n yield\n return\n\n # Make pdb.Pdb.complete use pyrepl's readline.\n orig_readline = sys.modules[\"readline\"]\n sys.modules[\"readline\"] = self.fancycompleter.config.readline\n try:\n yield\n finally:\n sys.modules[\"readline\"] = orig_readline", "def loop(gdb_pty):\n try:\n msg = ''\n slave_closed = False\n gdb_pty.stty_raw()\n try:\n while asyncore.socket_map and not got_sigchld:\n asyncore.poll(timeout=debugger.LOOP_TIMEOUT)\n if gdb_pty.stdin_dsptch.close_tty and not slave_closed:\n slave_closed = True\n os.close(gdb_pty.slave_fd)\n except asyncore.ExitNow, err:\n msg = err\n if got_sigchld:\n msg = '\\n[terminal emulator is terminating]'\n os.wait()\n finally:\n gdb_pty.close()\n if msg:\n print >> sys.stderr, msg\n info('========================================')", "def main():\r\n\r\n debug_tb = []\r\n def curses_loop(stdscr):\r\n \"\"\"Only the code inside this function runs within the curses wrapper\"\"\"\r\n\r\n # this function may under no circumstancs raise an exception, so I'm\r\n # wrapping everything into try/except (should actually never happen\r\n # anyways but when it happens during coding or debugging it would\r\n # leave the terminal in an unusable state and this must be avoded).\r\n # We have a list debug_tb[] where we can append tracebacks and\r\n # after curses uninitialized properly and the terminal is restored\r\n # we can print them.\r\n try:\r\n init_colors()\r\n gox = goxapi.Gox(secret, config)\r\n\r\n logwriter = LogWriter(gox)\r\n printhook = PrintHook(gox)\r\n\r\n conwin = WinConsole(stdscr, gox)\r\n bookwin = WinOrderBook(stdscr, gox)\r\n statuswin = WinStatus(stdscr, gox)\r\n chartwin = WinChart(stdscr, gox)\r\n\r\n strategy_manager = StrategyManager(gox, strat_mod_list)\r\n\r\n gox.start()\r\n while True:\r\n key = stdscr.getch()\r\n if key == ord(\"q\"):\r\n break\r\n elif key == curses.KEY_F4:\r\n DlgNewOrderBid(stdscr, gox).modal()\r\n elif key == curses.KEY_F5:\r\n DlgNewOrderAsk(stdscr, gox).modal()\r\n elif key == curses.KEY_F6:\r\n DlgCancelOrders(stdscr, gox).modal()\r\n elif key == curses.KEY_RESIZE:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n stdscr.erase()\r\n stdscr.refresh()\r\n conwin.resize()\r\n bookwin.resize()\r\n chartwin.resize()\r\n statuswin.resize()\r\n elif key == ord(\"l\"):\r\n strategy_manager.reload()\r\n\r\n # which chart to show on the right side\r\n elif key == ord(\"H\"):\r\n set_ini(gox, \"display_right\", \"history_chart\",\r\n gox.history.signal_changed, gox.history, None)\r\n elif key == ord(\"D\"):\r\n set_ini(gox, \"display_right\", \"depth_chart\",\r\n gox.orderbook.signal_changed, gox.orderbook, None)\r\n\r\n # depth chart step\r\n elif key == ord(\",\"): # zoom out\r\n toggle_depth_group(gox, +1)\r\n elif key == ord(\".\"): # zoom in\r\n toggle_depth_group(gox, -1)\r\n\r\n # orderbook grouping step\r\n elif key == ord(\"-\"): # zoom out (larger step)\r\n toggle_orderbook_group(gox, +1)\r\n elif key == ord(\"+\"): # zoom in (smaller step)\r\n toggle_orderbook_group(gox, -1)\r\n\r\n elif key == ord(\"S\"):\r\n toggle_orderbook_sum(gox)\r\n\r\n elif key == ord(\"T\"):\r\n toggle_depth_sum(gox)\r\n\r\n # lowercase keys go to the strategy module\r\n elif key >= ord(\"a\") and key <= ord(\"z\"):\r\n gox.signal_keypress(gox, (key))\r\n else:\r\n gox.debug(\"key pressed: key=%i\" % key)\r\n\r\n except KeyboardInterrupt:\r\n # Ctrl+C has been pressed\r\n pass\r\n\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # we are here because shutdown was requested.\r\n #\r\n # Before we do anything we dump stacktraces of all currently running\r\n # threads to a separate logfile because this helps debugging freezes\r\n # and deadlocks that might occur if things went totally wrong.\r\n with open(\"goxtool.stacktrace.log\", \"w\") as stacklog:\r\n stacklog.write(dump_all_stacks())\r\n\r\n # we need the signal lock to be able to shut down. And we cannot\r\n # wait for any frozen slot to return, so try really hard to get\r\n # the lock and if that fails then unlock it forcefully.\r\n try_get_lock_or_break_open()\r\n\r\n # Now trying to shutdown everything in an orderly manner.it in the\r\n # Since we are still inside curses but we don't know whether\r\n # the printhook or the logwriter was initialized properly already\r\n # or whether it crashed earlier we cannot print here and we also\r\n # cannot log, so we put all tracebacks into the debug_tb list to\r\n # print them later once the terminal is properly restored again.\r\n try:\r\n strategy_manager.unload()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n gox.stop()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n printhook.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n try:\r\n logwriter.close()\r\n except Exception:\r\n debug_tb.append(traceback.format_exc())\r\n\r\n # curses_loop() ends here, we must reach this point under all circumstances.\r\n # Now curses will restore the terminal back to cooked (normal) mode.\r\n\r\n\r\n # Here it begins. The very first thing is to always set US or GB locale\r\n # to have always the same well defined behavior for number formatting.\r\n for loc in [\"en_US.UTF8\", \"en_GB.UTF8\", \"en_EN\", \"en_GB\", \"C\"]:\r\n try:\r\n locale.setlocale(locale.LC_NUMERIC, loc)\r\n break\r\n except locale.Error:\r\n continue\r\n\r\n # before we can finally start the curses UI we might need to do some user\r\n # interaction on the command line, regarding the encrypted secret\r\n argp = argparse.ArgumentParser(description='MtGox live market data monitor'\r\n + ' and trading bot experimentation framework')\r\n argp.add_argument('--add-secret', action=\"store_true\",\r\n help=\"prompt for API secret, encrypt it and then exit\")\r\n argp.add_argument('--strategy', action=\"store\", default=\"strategy.py\",\r\n help=\"name of strategy module files, comma separated list, default=strategy.py\")\r\n argp.add_argument('--protocol', action=\"store\", default=\"\",\r\n help=\"force protocol (socketio or websocket), ignore setting in .ini\")\r\n argp.add_argument('--no-fulldepth', action=\"store_true\", default=False,\r\n help=\"do not download full depth (useful for debugging)\")\r\n argp.add_argument('--no-depth', action=\"store_true\", default=False,\r\n help=\"do not request depth messages (implies no-fulldeph), useful for low traffic\")\r\n argp.add_argument('--no-lag', action=\"store_true\", default=False,\r\n help=\"do not request order-lag updates, useful for low traffic\")\r\n argp.add_argument('--no-history', action=\"store_true\", default=False,\r\n help=\"do not download full history (useful for debugging)\")\r\n argp.add_argument('--use-http', action=\"store_true\", default=False,\r\n help=\"use http api for trading (more reliable, recommended\")\r\n argp.add_argument('--no-http', action=\"store_true\", default=False,\r\n help=\"use streaming api for trading (problematic when streaming api disconnects often)\")\r\n argp.add_argument('--password', action=\"store\", default=None,\r\n help=\"password for decryption of stored key. This is a dangerous option \"\r\n +\"because the password might end up being stored in the history file \"\r\n +\"of your shell, for example in ~/.bash_history. Use this only when \"\r\n +\"starting it from within a script and then of course you need to \"\r\n +\"keep this start script in a secure place!\")\r\n args = argp.parse_args()\r\n\r\n config = goxapi.GoxConfig(\"goxtool.ini\")\r\n config.init_defaults(INI_DEFAULTS)\r\n secret = goxapi.Secret(config)\r\n secret.password_from_commandline_option = args.password\r\n if args.add_secret:\r\n # prompt for secret, encrypt, write to .ini and then exit the program\r\n secret.prompt_encrypt()\r\n else:\r\n strat_mod_list = args.strategy.split(\",\")\r\n goxapi.FORCE_PROTOCOL = args.protocol\r\n goxapi.FORCE_NO_FULLDEPTH = args.no_fulldepth\r\n goxapi.FORCE_NO_DEPTH = args.no_depth\r\n goxapi.FORCE_NO_LAG = args.no_lag\r\n goxapi.FORCE_NO_HISTORY = args.no_history\r\n goxapi.FORCE_HTTP_API = args.use_http\r\n goxapi.FORCE_NO_HTTP_API = args.no_http\r\n if goxapi.FORCE_NO_DEPTH:\r\n goxapi.FORCE_NO_FULLDEPTH = True\r\n\r\n # if its ok then we can finally enter the curses main loop\r\n if secret.prompt_decrypt() != secret.S_FAIL_FATAL:\r\n\r\n ###\r\n #\r\n # now going to enter cbreak mode and start the curses loop...\r\n curses.wrapper(curses_loop)\r\n # curses ended, terminal is back in normal (cooked) mode\r\n #\r\n ###\r\n\r\n if len(debug_tb):\r\n print \"\\n\\n*** error(s) in curses_loop() that caused unclean shutdown:\\n\"\r\n for trb in debug_tb:\r\n print trb\r\n else:\r\n print\r\n print \"*******************************************************\"\r\n print \"* Please donate: 1C8aDabADaYvTKvCAG1htqYcEgpAhkeYoW *\"\r\n print \"*******************************************************\"", "def _cursor_namespace(self):\n ...", "def idle():", "def get_cursor(self, *args, **kwargs):", "def snap():\n pass", "def set_cursor_position(self, x: int, y: int) -> None:\n self.screen.move(y, x)", "def set_trace(stop=True, **kwargs):\n Qdb(**kwargs).set_trace(sys._getframe().f_back, stop=stop)\n # We use f_back so that we start in the caller of this function.", "def sync() -> None:", "def postcmd(self, stop, line):\n stop = super(Pdb, self).postcmd(stop, line)\n if self.sticky:\n if stop and not self.commands_defining:\n self._sticky_handle_cls()\n else:\n self._flush_sticky_messages()\n return stop", "def _DumpDebugPid(cls, log_level, pid):\n pid = str(pid)\n commands = (\n ('pstree', '-Apals', pid),\n ('lsof', '-p', pid),\n )\n for cmd in commands:\n cls._DebugRunCommand(cmd, debug_level=log_level, error_code_ok=True,\n log_output=True)\n\n stdin = '\\n'.join(['echo \\\\n>>> %s\\\\n\\n%s' % (x, x)\n for x in cls.GDB_COMMANDS])\n cmd = ('gdb', '--nx', '-q', '-p', pid, '-ex', 'set prompt',)\n cls._DebugRunCommand(cmd, debug_level=log_level, error_code_ok=True,\n log_output=True, input=stdin)", "def cmd_j(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n if node.next is not None:\n self.cursor = node.next\n break\n node = node.next\n self.get_text()", "def show_cursor():\n props = WindowProperties()\n props.setCursorHidden(False)\n path = sys.path[0]\n # set the filename to the mouse cursor\n x11 = path+\"assets/gui/Cursor.x11\"\n win = path+\"/\"+\"assets/gui/Cursor.ico\"\n if sys.platform.startswith(\"linux\"):\n props.setCursorFilename(x11)\n else:\n props.setCursorFilename(win)\n base.win.requestProperties(props)", "def _(event):\n event.cli.push_focus(SYSTEM_BUFFER)", "def up(n=1):\n f = gdb.selected_frame()\n\n for i in range(n):\n o = f.older()\n if o:\n o.select()\n\n bt = pwndbg.commands.context.context_backtrace(with_banner=False)\n print('\\n'.join(bt))\n\n j()", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def set_display_start_line(line):\n send_command(0x40 | line)", "def setup(self):\n\n try:\n with terminal.cbreak(), terminal.hidden_cursor():\n yield\n finally:\n print(terminal.clear)\n print(terminal.exit_fullscreen)", "def update_launcher(self):\n if not self.misc.bufwinnr(self.name):\n self.open_launcher()\n\n self.mapper.clear()\n self.clear_highlighting()\n self.misc.go_to_win(self.misc.bufwinnr(self.name))\n self.misc.set_buffer(None)\n\n buffer_list = sorted(self.buffers_with_matches())\n if not self.view_buffer:\n self.view_buffer = self.curr_buf.number\n\n i = buffer_list.index(self.view_buffer)\n buf_prev = buffer_list[-1 if not i else i - 1]\n buf_next = buffer_list[0 if i == len(buffer_list) - 1 else i + 1]\n\n vim.command(\"setlocal stl=\\ \\ <-\\ {0}\\ \\ [{1}]\\ \\ {2}\\ ->\\ \\ \".format(\n os.path.split(self.misc.bufname(buf_prev))[1].replace(' ', '\\\\'),\n os.path.split(self.misc.bufname(self.view_buffer))[1].replace(' ', '\\\\'),\n os.path.split(self.misc.bufname(buf_next))[1].replace(' ', '\\\\')))\n\n # self.matches = {'bufname': [(linenr, col, line), ...], ...}\n if self.find_new_matches:\n if not self.cache:\n self.search(self.input_so_far)\n self.cache = list(self.matches)\n\n _matches = self.matches[self.view_buffer]\n if _matches:\n if self.view_buffer == self.curr_buf.number:\n pos = bisect.bisect_left(_matches, self.curr_buf_pos)\n _matches.insert(pos, self.curr_buf_pos)\n else:\n _matches = self.matches[self.view_buffer]\n\n if _matches:\n self.misc.set_buffer(\n [self.render_line(m, j) for j, m in enumerate(_matches)])\n\n # set the position to the current line\n if self.find_new_matches:\n if self.view_buffer == self.curr_buf.number:\n self.launcher_curr_pos = pos\n else:\n self.launcher_curr_pos = 0\n\n if self.launcher_curr_pos is not None:\n length = len(vim.current.buffer)\n if self.launcher_curr_pos >= length:\n self.launcher_curr_pos = length - 1\n vim.current.window.cursor = (self.launcher_curr_pos + 1, 1)\n\n self.render_curr_line()\n self.highlight()\n\n # adjust the window height according to the total\n # number of matches\n n = len(_matches)\n if n > self.max_height:\n vim.current.window.height = self.max_height\n else:\n vim.current.window.height = n\n\n vim.command(\"normal! zz\")\n\n else:\n vim.command('syntax clear')\n self.misc.set_buffer([' nothing found...'])\n vim.current.window.height = 1\n self.launcher_curr_pos = 0", "def changeCursor(self):\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.doBackup(filePath)\n self.personalDataList.viewport().setCursor(QtCore.Qt.CrossCursor)\n self.personalDataList.areaClicked.connect(self.hideOnePersonalData)\n self.pDataCancelButton.show()\n self.pDataCommitButton.show()", "def pdb_view(request):\n import pdb; pdb.set_trace()\n return HttpResponse(\"This works.\")", "def toggle_remote_debug():\n import sys\n import os\n\n debug_on = len(sys.argv) >= 2 and '--remote-debug' in sys.argv[1]\n\n if debug_on:\n egg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"pycharm-debug-py3k.egg\"))\n sys.path.append(egg_path)\n import pydevd\n pydevd.settrace('localhost', port=9090)\n\n yield\n\n if debug_on:\n import pydevd\n pydevd.stoptrace()", "def _flush():\n libtcod.console_flush()", "def sync():\n sync_ssda()", "def set_cursor(self,x,y):\n if 1 <= x <= 20 and y in [1,2]:\n self.send(\"\\x1f\\x24%c%c\" % (x,y))\n else:\n raise ValueError('cursor position must be between 1,20 and 1,2')", "def before_cursor_next(self, cursor):\n pass", "def setEditCursor(self, event):\n self.editMode = True\n self.updateCursor(\"X_cursor\")\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if not (0 <= x < self.rows and 0 <= y < self.cols):\n return\n if not self.gridBusy[x][y]:\n return\n assert len(self.history) >= self.gridBusy[x][y]\n self.lastChanged = self.gridBusy[x][y]\n self.changeColor(self.lastChanged, self.colors['pent_edit'])", "def on_cursor(self, enable: bool) -> None:\n self.on_cursor = enable", "def wdb_f(self, arg):\n\n global rpdb_started\n if not arg.strip():\n print __doc__\n return\n \n if arg.strip() == 'pass':\n passwd = raw_input('Enter new winpdb session password: ')\n ip.db['winpdb_pass'] = passwd\n print \"Winpdb password changed\"\n if rpdb_started:\n print \"You need to restart IPython to use the new password\"\n return \n \n path = os.path.abspath(arg)\n if not os.path.isfile(path):\n raise UsageError(\"%%wdb: file %s does not exist\" % path)\n if not rpdb_started:\n passwd = ip.db.get('winpdb_pass', None)\n if passwd is None:\n import textwrap\n print textwrap.dedent(\"\"\"\\\n Winpdb sessions need a password that you use for attaching the external\n winpdb session. IPython will remember this. You can change the password later \n by '%wpdb pass'\n \"\"\")\n passwd = raw_input('Enter new winpdb session password: ')\n ip.db['winpdb_pass'] = passwd\n \n print \"Starting rpdb2 in IPython process\"\n rpdb2.start_embedded_debugger(passwd, timeout = 0)\n rpdb_started = True\n \n rpdb2.set_temp_breakpoint(path)\n print 'It is time to attach with WinPdb (launch WinPdb if needed, File -> Attach)'\n ip.magic('%run ' + arg)" ]
[ "0.618987", "0.61721236", "0.5961415", "0.5940629", "0.5920072", "0.59150904", "0.58346677", "0.5826086", "0.58243257", "0.5765665", "0.56860536", "0.56628925", "0.56359243", "0.5516654", "0.5512976", "0.55124795", "0.5501486", "0.5466048", "0.5450095", "0.5408604", "0.5396289", "0.5353856", "0.53416777", "0.5324916", "0.5322405", "0.53153783", "0.5214469", "0.5210162", "0.5206785", "0.51857877", "0.5179796", "0.51792556", "0.51761496", "0.5148287", "0.51479983", "0.5127167", "0.51218396", "0.5117617", "0.50860965", "0.50849116", "0.5062221", "0.50562215", "0.501886", "0.4987934", "0.49862128", "0.4977919", "0.49740803", "0.4960452", "0.4954252", "0.49524248", "0.4948661", "0.49456072", "0.49379212", "0.49372065", "0.49176174", "0.49119234", "0.4909725", "0.49092683", "0.49028626", "0.4892424", "0.48807412", "0.48715085", "0.48635507", "0.48585624", "0.48552302", "0.48527396", "0.48381233", "0.48362854", "0.48340863", "0.48332852", "0.48326075", "0.48313257", "0.4819117", "0.4817702", "0.48156902", "0.48093194", "0.47973192", "0.4794958", "0.4789874", "0.4788717", "0.4786335", "0.47859406", "0.47791523", "0.47709206", "0.47634754", "0.47531274", "0.47523534", "0.47490826", "0.47413063", "0.4738959", "0.47332412", "0.4730095", "0.47287813", "0.47273663", "0.472232", "0.4722105", "0.4720349", "0.47101462", "0.47089884", "0.47017384" ]
0.48803896
61
Select and print stack frame that called this one. An argument says how many frames up to go.
def up(n=1): f = gdb.selected_frame() for i in range(n): o = f.older() if o: o.select() bt = pwndbg.commands.context.context_backtrace(with_banner=False) print('\n'.join(bt)) j()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_frame(self, arg):\n if not arg:\n # Just display the frame, without handling sticky.\n self.print_stack_entry(self.stack[self.curindex])\n return\n\n try:\n arg = int(arg)\n except (ValueError, TypeError):\n print(\n '*** Expected a number, got \"{0}\"'.format(arg), file=self.stdout)\n return\n if abs(arg) >= len(self.stack):\n print('*** Out of range', file=self.stdout)\n return\n if arg >= 0:\n self.curindex = arg\n else:\n self.curindex = len(self.stack) + arg\n self.curframe = self.stack[self.curindex][0]\n self.curframe_locals = self.curframe.f_locals\n self.print_current_stack_entry()\n self.lineno = None", "def _select_frame(self, number):\n assert 0 <= number < len(self.stack), (number, len(self.stack))\n self.curindex = number\n self.curframe = self.stack[self.curindex][0]\n self.curframe_locals = self.curframe.f_locals\n self.print_current_stack_entry()\n self.lineno = None", "def record_python_call_stack(self, frames_to_skip: int) -> infra.Stack:\n frames_to_skip += 1 # Skip this function.\n stack = utils.python_call_stack(frames_to_skip=frames_to_skip)\n self.with_stack(stack)\n if len(stack.frames) > 0:\n self.with_location(stack.frames[0].location)\n return stack", "def _getframe(depth=None): # real signature unknown; restored from __doc__\n pass", "def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack:\n # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info.\n # No need to skip this function because python frame is not recorded\n # in cpp call stack.\n stack = _cpp_call_stack(frames_to_skip=frames_to_skip)\n stack.message = \"C++ call stack\"\n self.with_stack(stack)\n return stack", "def inspect_frame(self, frame):\n while frame:\n self.inspect_single_frame(frame)\n frame = frame.f_back", "def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil", "def callstack_push(*frame):\n callstack_now().append(frame)", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack:\n # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info.\n frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split(\"\\n\")\n frame_messages = []\n for frame in frames:\n segments = frame.split(\":\", 1)\n if len(segments) == 2:\n frame_messages.append(segments[1].strip())\n else:\n frame_messages.append(\"<unknown frame>\")\n return infra.Stack(\n frames=[\n infra.StackFrame(location=infra.Location(message=message))\n for message in frame_messages\n ]\n )", "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])", "def printframe(frame,endframe):\n line = \"\\r timeframe: {:d} / {:d}\".format(frame, endframe)\n #print(line),\n sys.stdout.write(line)\n sys.stdout.flush()", "def currentframe(_no_of_go_up_level):\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[_no_of_go_up_level - 1].tb_frame.f_back", "def do_top(self, arg):\n if self.curindex == 0:\n self.error('Oldest frame')\n return\n self._select_frame(0)", "def down(n=1):\n f = gdb.selected_frame()\n\n for i in range(n):\n o = f.newer()\n if o:\n o.select()\n\n bt = pwndbg.commands.context.context_backtrace(with_banner=False)\n print('\\n'.join(bt))\n\n j()", "def spew(self):\n for frame in self.frames:\n print frame.func, frame", "def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)", "def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)", "def trace_function(frame, event, arg):\n co = frame.f_code\n func_name = co.co_name\n if func_name == 'write':\n # Ignore write() calls from print statements\n return\n filename = co.co_filename\n if event == 'call':\n # decend into the stack...\n return trace_function\n elif event == 'return':\n if isinstance(arg, basestring) and 'inputlocator' in filename.lower() and not func_name.startswith('_'):\n results_set.add((func_name, arg))\n # print('%s => %s' % (func_name, arg))\n return", "def print_frame(self, name, frame, on_svr=False):\n\n name = \"print{}: {}\".format(self.step, name)\n\n # print using svr\n if on_svr:\n svr.debug(name, frame)\n\n # print using openCV\n else: \n self.debug_stream(name, frame)\n\n # increment step counter\n self.step += 1", "def currentframe():\n return sys._getframe(3)", "def frame(self):", "def base_trace(self, frame, event, arg):\n\n # print(\"Tracing %s %s %s (%s))\" % (event, \"<File %s, Line %s>\" % (frame.f_code.co_filename, frame.f_lineno), str(arg), str(id(threading.current_thread()))))\n\n # if true, breakpoints will be checked\n test_breakpoints = True\n\n # check for steppings\n if self.stepping != SteppingMode.STEP_NO_STEP:\n # print(\"Tracing for %s %s %s %s (%s))\" % (str(self.stepping), event, \"<File %s, Line %s>\" % (frame.f_code.co_filename, frame.f_lineno), str(arg), str(id(threading.current_thread()))))\n\n # single execution step, to move out of return/call frames into line frames\n if self.stepping == SteppingMode.STEP_SINGLE_EXEC:\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_NO_STEP\n self.break_pause = False\n self.cont = False\n handler.pause_debugging()\n\n # step INTO and call happens on same level as we are, we are in\n # just move one step to line\n if self.stepping == SteppingMode.STEP_INTO and self.active_frame.f_back is self.stored_frames[1] and event == \"call\":\n # this will exit because call is unhandled!\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_SINGLE_EXEC\n self.pause_reason = \"stepIn\"\n\n # step INTO but there is nothing to go in\n # so only move as step\n if self.stepping == SteppingMode.STEP_INTO and self.active_frame is self.stored_frames[1] and event != \"return\":\n self.stepping = SteppingMode.STEP_NEXT\n\n # same as above but we are returning, so do single step to move out\n if self.stepping == SteppingMode.STEP_INTO and self.active_frame is self.stored_frames[1] and event != \"return\":\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_SINGLE_EXEC\n self.pause_reason = \"step\"\n\n # step OUT and return happens, just move one step to line\n if self.stepping == SteppingMode.STEP_OUT and self.active_frame is self.stored_frames[1] and event == \"return\":\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_SINGLE_EXEC\n self.pause_reason = \"stepOut\"\n return # exit evaluation\n\n # next will always break if this is line\n if self.stepping == SteppingMode.STEP_NEXT and self.active_frame is self.stored_frames[1] and event != \"call\":\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_NO_STEP\n self.break_pause = False\n self.pause_reason = \"step\"\n self.cont = False\n handler.pause_debugging()\n\n if event == \"exception\" or event == \"call\":\n return # TODO: exceptions, calls\n\n if test_breakpoints:\n # due to lock we move triggered breakpoint to here\n breaking_on = None\n\n # check breakpoints under lock\n with self.bkp_lock:\n for breakpoint in self.active_breakpoints:\n if breakpoint.applies(frame):\n breaking_on = breakpoint\n break\n if breaking_on is not None:\n print(\"Broke at %s %s %s (%s))\" % (event, \"<File %s, Line %s>\" % (frame.f_code.co_filename, frame.f_lineno), str(arg), str(id(threading.current_thread()))))\n self.break_code(breaking_on) # sets this to blocking\n\n # check for external requested pause\n if self.break_pause:\n self.break_pause = False\n self.pause_reason = \"pause\"\n self.cont = False\n handler.pause_debugging()\n\n while not self.cont:\n # spinlock when we are waiting for debugger\n time.sleep(0.1)", "def print_callsite_location():\n fi = inspect.getouterframes( inspect.currentframe() )[2]\n print(\"{path}:{line} {fname}\".format(\n line=fi.lineno, path=fi.filename, fname=fi.function))", "def _debug_stack(self):\n debug(\"current stack: %s\" % self.calc.stack)", "def currentframe():\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[2].tb_frame.f_back", "def trace_event(self, frame, event, arg):\n\n self.active_frame = frame\n self.active_call = frame\n\n if event == \"call\":\n frame.f_trace = self.trace_line\n\n self.base_trace(frame, event, arg)", "def caller_name(self, skip=6):\r\n stack = inspect.stack()\r\n start = 0 + skip\r\n if len(stack) < start + 1:\r\n return ''\r\n parentframe = stack[start][0] \r\n\r\n name = []\r\n module = inspect.getmodule(parentframe)\r\n # `modname` can be None when frame is executed directly in console\r\n # TODO(techtonik): consider using __main__\r\n if module:\r\n name.append(module.__name__)\r\n # detect classname\r\n if 'self' in parentframe.f_locals:\r\n # I don't know any way to detect call from the object method\r\n # XXX: there seems to be no way to detect static method call - it will\r\n # be just a function call\r\n name.append(parentframe.f_locals['self'].__class__.__name__)\r\n codename = parentframe.f_code.co_name\r\n if codename != '<module>': # top level usually\r\n name.append( codename ) # function or a method\r\n\r\n ## Avoid circular refs and frame leaks\r\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\r\n del parentframe, stack\r\n\r\n return \".\".join(name)", "def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)", "def _current_frames(): # real signature unknown; restored from __doc__\n return {}", "def show_frame(self, *args):\n current_pg = args[0]\n frame = self.frames[current_pg]\n frame.tkraise()\n\n if current_pg == PageTen:\n graham_picks, lynch_picks = Complete_Interface.completing_evalution()\n self.last_pg_results(graham_picks, lynch_picks, *args)", "def callstack_now():\n return checkpoints[-1]", "def dispatch_frame(self, frame):", "def currentframe():\n try:\n raise Exception\n except:\n return sys.exc_info()[2].tb_frame.f_back", "def mock_frame(stack):\n return inspect.stack()[0]", "def mock_frame(stack):\n return inspect.stack()[0]", "def get_stack_frames(self, threadId=0, startFrame=0, levels=0, format=None):\n\n # format is ignored, TODO?\n # threadId is ignored since renpy is single threaded for stuff we need\n\n clevel = 0\n slevel = 0 if startFrame is None else startFrame\n elevel = None if levels is None or levels == 0 else levels\n\n frames = []\n cframe = self.active_frame\n while cframe is not None:\n if clevel >= slevel:\n finfo = {}\n\n finfo[\"id\"] = clevel\n finfo[\"name\"] = cframe.f_code.co_name + self.format_method_signature(cframe.f_locals, cframe.f_code)\n finfo[\"source\"] = {\"path\": cframe.f_code.co_filename}\n finfo[\"line\"] = cframe.f_lineno\n finfo[\"presentationHint\"] = \"normal\"\n finfo[\"column\"] = 0\n\n dis_info = {}\n finfo[\"subsource\"] = dis_info\n\n disassembled = dis(cframe.f_code, cframe.f_lasti)\n dis_info[\"sources\"] = [{\"text\": self.format_disassembly(cframe.f_lineno, *de), \"line\": de[1], \"source\": finfo[\"source\"]} for de in disassembled]\n ord = 0\n for de in disassembled:\n if de[0]:\n break\n ord += 1\n finfo[\"subsourceElement\"] = ord\n\n frames.append(finfo)\n clevel += 1\n if elevel is not None and clevel >= elevel:\n break\n cframe = cframe.f_back\n\n return frames", "def user_call(self, frame, argument_list):\n pass", "def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))", "def do_bottom(self, arg):\n if self.curindex + 1 == len(self.stack):\n self.error('Newest frame')\n return\n self._select_frame(len(self.stack) - 1)", "def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:\n # print('frame {}'.format(frame.data.frameID))\n \n # get a copy of the frame data\n _frames.append(frame)", "def assign_token(self, frame):\n \tprint(str(frame))", "def get_frame(self, ind):\n pass", "def annotate_stacks(self):\n curthread = gdb.selected_thread()\n try:\n for thread in gdb.selected_inferior().threads():\n thread.switch()\n\n # This is different depending on gdb version\n try:\n frame = gdb.newest_frame()\n stackpointer = frame.read_register(\"sp\")\n except:\n regname, as_hex, as_int = gdb.execute(\"info register sp\", False, True).split()\n stackpointer = int(as_hex, 16)\n memrange = self.get_range(stackpointer)\n tid = thread.ptid[1] if thread.ptid[1] else thread.ptid[2]\n if memrange is None:\n print(\"Did not find stack of thread %d\" % tid)\n continue\n memrange.settype(MemoryType.Stack, \"Stack of thread %d(TID %d)\" % (thread.num, tid))\n finally:\n curthread.switch()", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def refresh_stack(self):\n self.stack, _ = self.compute_stack(self.fullstack)\n # find the current frame in the new stack\n for i, (frame, _) in enumerate(self.stack):\n if frame is self.curframe:\n self.curindex = i\n break\n else:\n self.curindex = len(self.stack)-1\n self.curframe = self.stack[-1][0]\n self.print_current_stack_entry()", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message =\"Signal received : entering python shell.Traceback:\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def frame(self):\n self.run_command('frame')", "def trace(self, frame, event, arg):\n if event == \"call\":\n if frame.f_code.co_filename.startswith(\"<memory/\"):\n return self.tracerobot\n else:\n return None\n return trace", "def start_stack(StackId=None):\n pass", "def debug_caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def frames():\n raise RuntimeError('Must be implemented by subclasses.')", "def frame_idx(self) -> int:\n pass", "def trace_dispatch(self, frame, event, arg):\n if self.quitting:\n return # None\n if event == 'line':\n return self.dispatch_line(frame)\n if event == 'call':\n return self.dispatch_call(frame, arg)\n if event == 'return':\n return self.dispatch_return(frame, arg)\n if event == 'exception':\n return self.dispatch_exception(frame, arg)\n if event == 'c_call':\n return self.trace_dispatch\n if event == 'c_exception':\n return self.trace_dispatch\n if event == 'c_return':\n return self.trace_dispatch\n print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))\n return self.trace_dispatch", "def print_frames(frames):\n for i, frame in enumerate(frames):\n clear_output(wait=True)\n print(frame['frame'])\n print(f\"Episode: {frame['episode']}\")\n print(f\"Timestep: {i + 1}\")\n print(f\"State: {frame['state']}\")\n print(f\"Previous action: {frame['action']}\")\n if frame['action'] == 0:\n print(\"Action is: south\")\n if frame['action'] == 1:\n print(\"Action is: north\")\n if frame['action'] == 2:\n print(\"Action is: east\")\n if frame['action'] == 3:\n print(\"Action is: west\")\n if frame['action'] == 4:\n print(\"Action is: pickup passenger 1 \") \n if frame['action'] == 5:\n print(\"Action is: dropoff passenger 1\")\n if frame['action'] == 6:\n print(\"Action is: pickup passenger 2\")\n if frame['action'] == 7:\n print(\"Action is: dropoff passenger 2\")\n print(f\"Reward: {frame['reward']}\")\n print(f\"Total Reward: {frame['total reward']}\")\n time.sleep(.5)", "def visualizar(self):\n print(self.stack)", "def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s", "def dump_stacks(self):\n\n dump = []\n\n # threads\n threads = dict([(th.ident, th.name) for th in threading.enumerate()])\n\n for thread, frame in sys._current_frames().items():\n if thread not in threads:\n continue\n dump.append(\"Thread 0x%x (%s)\\n\" % (thread, threads[thread]))\n dump.append(\"\".join(traceback.format_stack(frame)))\n dump.append(\"\\n\")\n\n return \"\".join(dump)", "def __init__(self):\n st = inspect.stack()\n frames = [Snapframe(tup) for tup in st[3:]]\n frames.reverse()\n self.frames=frames", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def curframe(self):\n return self._stack[self._curframe_index][0]", "def supertrace(max_len=160):\r\n tb = sys.exc_info()[2]\r\n while True:\r\n if not tb.tb_next:\r\n break\r\n tb = tb.tb_next\r\n stack = []\r\n frame = tb.tb_frame\r\n while frame:\r\n stack.append(f)\r\n frame = frame.f_back\r\n stack.reverse()\r\n # First print the regular traceback\r\n traceback.print_exc()\r\n\r\n print \"Locals by frame, innermost last\"\r\n for frame in stack:\r\n print\r\n print \"Frame %s in %s at line %s\" % (frame.f_code.co_name,\r\n frame.f_code.co_filename,\r\n frame.f_lineno)\r\n for key, value in frame.f_locals.items():\r\n print (\"\\t%20s = \" % smart_unicode(key, errors='ignore')),\r\n # We have to be careful not to cause a new error in our error\r\n # printer! Calling str() on an unknown object could cause an\r\n # error.\r\n try:\r\n s = smart_unicode(value, errors='ignore')\r\n if max_len is not None:\r\n s = s[:max_len]\r\n print s\r\n except:\r\n print \"<ERROR WHILE PRINTING VALUE>\"", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0] \n \n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def print_display(stack, count = 0):\n \n #The starting index of the print of \"stack\"\n start_index = count\n \n #Creating a stopper variable (AKA \"you have hit the 20 item mark\")\n \n stopper = stopper_function(stack, count)\n \n \n #The loop responsible for priting the items to screen \n while count <= (len(stack)-1):\n \n print \"\\nindex:\", count\n print \"name of Item:\", stack[count].name, \"\\n\"\n \n if count == stopper:\n \"\"\"\n if count == (len(stack)-1):\n \n break\n \n else:\n \n \"\"\"\n response = user_input(start_index, stopper, stack)\n \n #reseting variables\n start_index = count\n stopper = stopper_function(stack, count)\n \n if response == \"done\": #exits out of the print_to_screen loop and \n #enters you back into the main program.\n break\n \n else:\n pass\n \n else:\n pass\n \n count += 1\n \n #print \"print_display works!\" \n return \"done\"", "def dump_stacktraces():\n lines = []\n for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212\n lines.append(\"\\n######### ProcessID=%s, ThreadID=%s #########\" % (\n os.getpid(), thread_id\n ))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n lines.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n lines.append(\" %s\" % (line.strip()))\n lines.append(\"#############################################\\n\\n\")\n\n print('\\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)", "def getStackPosition(self):\r\n return self.callstack.getStack()", "def showPrevFrame(self):\n if(self.hasPrevFrame()):\n self.activeFrames.pop()\n self.activeFrames[-1].tkraise()\n else:\n self.showFrame(\"frame_start\")", "def _repr_(self):\n try:\n num = len(self)\n except TypeError:\n num = \"unknown number of\"\n return \"Animation with %s frames\"%num", "def printCallTraceRecursive(self, callStack, processedFuncIdxs=set()):\n assert callStack\n\n funcIdx = callStack[-1]\n depth = len(callStack) - 1\n print('--' * depth + str(self.getParentBaseFiles(funcIdx)) + ' ' + self.idx2Funcs[funcIdx])\n if funcIdx in processedFuncIdxs:\n # Base case 1: This function's call hierarchy has been processed (memoization)\n print('--' * (depth + 1) + '... (Truncated: Sub-hierarchy processed before)')\n callStack.pop(-1)\n return\n else:\n processedFuncIdxs.add(funcIdx)\n\n calleeList = self.caller2callee[funcIdx]\n for calleeIdx in calleeList:\n if calleeIdx not in callStack:\n callStack.append(calleeIdx)\n self.printCallTraceRecursive(callStack, processedFuncIdxs)\n else:\n # Base case 2: Recursion cycle detected\n # TODO: This base case may be redundant and never be reached because of base case 1. Can consider removing.\n print('--' * (depth + 1) + str(self.getParentBaseFiles(calleeIdx)) + ' ' + self.idx2Funcs[calleeIdx] + '(recursion)')\n callStack.pop(-1)\n return\n\n # Base case 3: Finished printing all callees\n callStack.pop(-1)\n return", "def epu_signal_stack_debug(sig, frame):\n d = {'_frame': frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n message = \"Signal recieved : entering python shell.\\nTraceback:\\n\"\n message += ''.join(traceback.format_stack(frame))\n log.info(message)\n\n message = dumpstacks()\n log.info(message)", "def j(*args):\n try:\n pc = int(gdb.selected_frame().pc())\n pwndbg.ida.Jump(pc)\n except Exception:\n pass", "def trace(func):\n @wraps(func)\n def tracer(*args, **kwargs):\n name = func.__name__\n stack_size = int(len(inspect.stack(0)) / 2) # @wraps(func) is also increasing the size\n indent = stack_size*'\\t'\n print(f'{indent} > Entering \"{name}\": args: {args}')\n result = func(*args, **kwargs)\n print(f'{indent} < Leaving \"{name}\"')\n return result\n\n return tracer", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe() # Top frame\n # if two and three depth frames exist and if the code at the top\n # recursion and the three depth frame use the same code \n if f.f_back and f.f_back.f_back and f.f_back.f_back.f_back \\\n and f.f_back.f_back.f_back.f_code == f.f_code:\n # Break the recursion\n raise TailRecurseException(args, kwargs)\n else:\n # Here you run the frame in a try catch setup\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException as e:\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def capture_stack(self):\n if not hasattr(self._thread_data, 'capture_stack'):\n self._thread_data.capture_stack = []\n return self._thread_data.capture_stack", "def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry", "def dispatch_call(self, frame, arg):\n # XXX 'arg' is no longer used\n if self.botframe is None:\n # First call of dispatch since reset()\n self.botframe = frame.f_back # (CT) Note that this may also be None!\n return self.trace_dispatch\n if not (self.stop_here(frame) or self.break_anywhere(frame)):\n # No need to trace this function\n return # None\n # Ignore call events in generator except when stepping.\n if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:\n return self.trace_dispatch\n self.user_call(frame, arg)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch", "def change_frame(self, frame):\r\n pass", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def get_frame(*args):\n return _ida_frame.get_frame(*args)", "def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()", "def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()", "def stack():\n return currentframe().f_back.f_locals.setdefault(SN, [])", "def callee(calls):\n calls.append(1)", "def trace(self, *args, **kwargs): # real signature unknown\n pass", "def getCallerParams(self,frameLevel=1):\n # frameLevel=0 is always getCallerParams. Caller should be level 1, but sometimes level 1 is still in Debug. This causes many dirty hacks.\n levelsToAdd=frameLevel-1\n #debugDir=dir(self)\n #debugDir.remove('__init__') # without removing __init__ was debug unusable in any __init__. Following line is temporary unslashed only\n debugDir=['allowed', 'allowedLevels', 'caller', 'callerLocals', 'callerName', 'dprint', 'getCallerName', 'getCallerParams', 'printHeader', 'restricted', 'settings']\n while sys._getframe(frameLevel).f_code.co_name in debugDir: # restrict returning functions from Debug instance -- dirty hack\n # but causes trouble for init which is in every class. property debugDir hacks this issue.\n if frameLevel>1: print '%i: %s'%(frameLevel,sys._getframe(frameLevel).f_code.co_name)\n frameLevel+=1\n frameLevel+=levelsToAdd # another hack to get another frame\n self.caller=sys._getframe(frameLevel)\n self.callerLocals=self.caller.f_locals\n try:\n if self.callerLocals.has_key('self'):\n #debug.dprint(print str(self.callerLocals['self'].__class__).split(' ')[1],4)\n self.callerName=(\n str(self.callerLocals['self']).split(' ')[0].replace('<__main__.','')+\n '.'+self.caller.f_code.co_name)\n # 026 #if self.callerLocals.has_key('self'): del self.callerLocals['self'] # 025 Fix - caused errors in multithreadng.\n else: self.callerName=self.caller.f_code.co_name\n except KeyError, errorInstance:\n #026 #self.headerLogger.error(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.exception(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.debug(\"callerLocals is %s\"%(str(self.callerLocals)))\n return (self.callerName,self.callerLocals)", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe()\n if f.f_back and f.f_back.f_back \\\n and f.f_back.f_back.f_code == f.f_code:\n # 抛出异常\n raise TailRecurseException(args, kwargs)\n else:\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException, e:\n # 捕获异常,重新调用栈\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def showframe(self, frame):\n self.frames[frame].show()", "def set_current_frame(self, entry):\n input_frame = int(self.current_frame_entry.get())\n self.show_frame(frame_number=input_frame)", "def do_display_stack_ascii(self, address):\n if self.reader.exception is None:\n print(\"Minidump has no exception info\")\n return\n if len(address) == 0:\n address = None\n else:\n address = self.ParseAddressExpr(address)\n self.padawan.PrintStackTraceMessage(address)", "def trace_line(self, frame, event, arg):\n\n self.active_frame = frame\n\n self.base_trace(frame, event, arg)", "def set_trace(self, frame=None):\n if frame is None:\n frame = sys._getframe().f_back\n self.reset()\n while frame:\n frame.f_trace = self.trace_dispatch\n self.botframe = frame\n frame = frame.f_back\n self.set_step()\n sys.settrace(self.trace_dispatch)", "def call_chain_to_next_log_calls_fn(cls):\n curr_frame = sys._getframe(2) # caller-of-caller's frame\n\n call_list = []\n prev_indent_level = -1\n\n found = False\n found_enabled = False\n hit_bottom = False # break both loops: reached <module>\n while not found_enabled and not hit_bottom:\n while 1: # until found a deco'd fn or <module> reached\n curr_funcname = curr_frame.f_code.co_name\n if curr_funcname == '_deco_base_f_wrapper_':\n # Previous was decorated inner fn, fixup; overwrite '_deco_base_f_wrapper_'\n # with name of wrapped function\n inner_fn = curr_frame.f_locals['f']\n call_list[-1] = inner_fn.__name__ # ~ placeholder\n\n wrapper_frame = curr_frame\n found = True\n break # inner loop\n\n call_list.append(curr_funcname)\n\n if curr_funcname == '<module>':\n hit_bottom = True\n break # inner loop\n\n globs = curr_frame.f_back.f_globals\n curr_fn = None\n if curr_funcname in globs:\n wrapper_frame = curr_frame.f_back\n curr_fn = globs[curr_funcname]\n # If curr_funcname is a decorated inner function,\n # then it's not in globs. If it's called from outside\n # its enclosing function, its caller is '_deco_base_f_wrapper_'\n # so we'll see that on next iteration.\n else:\n try:\n # if it's a decorated inner function that's called\n # by its enclosing function, detect that:\n locls = curr_frame.f_back.f_back.f_locals\n except AttributeError: # \"never happens\"\n # print(\"**** %s not found (inner fn?)\" % curr_funcname) # <<<DEBUG>>>\n pass\n else:\n wrapper_frame = curr_frame.f_back\n if curr_funcname in locls:\n curr_fn = locls[curr_funcname]\n # print(\"**** %s found in locls = curr_frame.f_back.f_back.f_locals, \"\n # \"curr_frame.f_back.f_back.f_code.co_name = %s\"\n # % (curr_funcname, curr_frame.f_back.f_back.f_locals)) # <<<DEBUG>>>\n if hasattr(curr_fn, cls._sentinels['SENTINEL_ATTR']):\n found = True\n break # inner loop\n\n curr_frame = curr_frame.f_back\n\n # If found, then call_list[-1] is log_calls-wrapped\n if found:\n # Look in stack frame (!) for (0.2.4) STACKFRAME_HACK_DICT_NAME\n # and use its values\n # _enabled, _log_call_numbers, _active_call_number, _extra_indent_level, _prefixed_fname\n if wrapper_frame.f_locals.get(STACKFRAME_HACK_DICT_NAME):\n active_call_items = wrapper_frame.f_locals[STACKFRAME_HACK_DICT_NAME]\n enabled = active_call_items['_enabled'] # it's >= 0\n log_call_numbers = active_call_items['_log_call_numbers']\n active_call_number = active_call_items['_active_call_number']\n call_list[-1] = active_call_items['_prefixed_fname'] # Hack alert (Pt 3)\n\n # only change prev_indent_level once, for nearest deco'd fn\n if prev_indent_level < 0:\n prev_indent_level = active_call_items['_extra_indent_level']\n\n if enabled and log_call_numbers:\n call_list[-1] += (' [%d]' % active_call_number)\n found_enabled = enabled # done with outer loop too if enabled\n else: # bypassed\n enabled = False\n\n if not enabled:\n curr_frame = curr_frame.f_back\n else: # not found\n # if not found, truncate call_list to first element.\n hit_bottom = True\n\n if hit_bottom:\n call_list = call_list[:1]\n return call_list, prev_indent_level", "def show_frame(self, framekey):\r\n frame = self.frames[framekey]\r\n frame.tkraise()\r\n\r\n def get_page(self, classname):\r\n \"\"\"\r\n Take the name f the page and return the objet page\r\n \"\"\"\r\n return self.frames[classname]", "def trace(self):\n\n print_on(\n \"TRACE --- {} {} {} | {} {} {} | \".format(\n *self.format_iterable(\n self.program_pointer,\n self.stack_pointer,\n self.flags,\n self.read_register(self.program_pointer),\n self.read_register(self.program_pointer + 1),\n self.read_register(self.program_pointer + 2),\n )\n )\n )\n\n print_on(\" \".join(self.format_iterable(*self.register)))\n\n print()\n\n return", "def get_frame(self, i):\n return self.get_traceback(i).tb_frame", "def findCaller(self, stack_info=False):\n \n _frame_object = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X: Frames.\n if (_frame_object is not None):\n _frame_object = _frame_object.f_back\n \n rv = (\"(unknown file)\", 0, \"(unknown function)\", None)\n while hasattr(_frame_object, 'f_code'):\n _code_object = _frame_object.f_code\n filename = os.path.normcase(_code_object.co_filename)\n \n _next = _frame_object.f_back\n # noinspection PyProtectedMember,PyUnresolvedReferences\n if (filename == logging._srcfile):\n _frame_object = _next\n continue\n \n if (_next and hasattr(_next, 'f_code')):\n _parent_code = _next.f_code\n if (_parent_code.co_name == LOGGING_WRAPPER_NAME):\n _frame_object = _next.f_back\n continue\n \n _stack_info = None\n if (stack_info):\n _str_io = StringIO()\n _str_io.write('Stack (most recent call last):\\n')\n traceback.print_stack(_frame_object, file=_str_io)\n _stack_info = _str_io.getvalue()\n if (_stack_info[-1] == '\\n'):\n _stack_info = _stack_info[:-1]\n _str_io.close()\n \n rv = (_code_object.co_filename, _frame_object.f_lineno, _code_object.co_name, _stack_info)\n break\n return rv", "def test_strframe():\n obj = pmisc.strframe\n\n def check_basic_frame(lines):\n fname = pmisc.normalize_windows_fname(os.path.realpath(__file__))\n assert lines[0].startswith(\"\\x1b[33mFrame object ID: 0x\")\n assert lines[1].startswith(\n \"File name......: {0}\".format(fname.replace(\".pyc\", \".py\"))\n )\n assert lines[2].startswith(\"Line number....: \")\n assert lines[3] == \"Function name..: test_strframe\"\n assert lines[4] == r\"Context........: [' fobj = inspect.stack()[0]\\n']\"\n assert lines[5] == \"Index..........: 0\"\n\n fobj = inspect.stack()[0]\n lines = obj(fobj).split(\"\\n\")\n check_basic_frame(lines)\n assert len(lines) == 6\n lines = [\n line\n for num, line in enumerate(obj(fobj, extended=True).split(\"\\n\"))\n if (num < 6) or line.startswith(\"f_\")\n ]\n check_basic_frame(lines)\n assert lines[6].startswith(\"f_back ID......: 0x\")\n assert lines[7].startswith(\"f_builtins.....: {\")\n assert lines[8].startswith(\"f_code.........: \" \"<code object test_strframe at \")\n assert lines[9].startswith(\"f_globals......: {\")\n assert lines[10].startswith(\"f_lasti........: \")\n assert lines[11].startswith(\"f_lineno.......: \")\n assert lines[12].startswith(\"f_locals.......: {\")\n if sys.hexversion < 0x03000000:\n assert lines[13] == \"f_restricted...: False\"\n assert lines[14].startswith(\"f_trace........: \")\n assert len(lines) == 15\n else:\n assert lines[13].startswith(\"f_trace........: \")\n assert len(lines) == 14", "def find_traceback_start(self):\n ### FILL IN ###", "def _original(self, fname):\n # callframe is oringally -1\n frame = self._plugin_callframe.setdefault(fname, -1)\n frame += 1\n self._plugin_callframe[fname] = frame\n # print(cls._plugin_stacks)\n return cls._plugin_stacks[fname][frame]", "def _request_frame(self):\n self._send_command('GET_FRAME')" ]
[ "0.73913187", "0.68238056", "0.6653886", "0.65766746", "0.6376097", "0.63759094", "0.6302292", "0.627555", "0.625705", "0.6184294", "0.6113973", "0.60998887", "0.6060529", "0.60272187", "0.5915152", "0.5864522", "0.5844503", "0.57457596", "0.5740299", "0.5735211", "0.5721572", "0.5721546", "0.5716846", "0.5677243", "0.56228405", "0.55696106", "0.55607605", "0.5555542", "0.55502766", "0.55316067", "0.55308574", "0.5529111", "0.55122", "0.5484262", "0.5478999", "0.5478999", "0.5454557", "0.5430116", "0.5424547", "0.53990585", "0.53966486", "0.53858393", "0.53664386", "0.5364383", "0.53604496", "0.53573817", "0.5352921", "0.5348026", "0.53091234", "0.5299095", "0.52939105", "0.526882", "0.5247046", "0.52463216", "0.52462363", "0.52444136", "0.5230084", "0.522671", "0.52168506", "0.5216387", "0.5212007", "0.52115446", "0.5206", "0.5196168", "0.51857984", "0.5184029", "0.5167333", "0.5163059", "0.515495", "0.51517063", "0.51428354", "0.51386887", "0.51214653", "0.51167536", "0.51141", "0.51063234", "0.51062965", "0.5102316", "0.50993294", "0.5090933", "0.5090933", "0.5090516", "0.5086902", "0.5074492", "0.50735074", "0.50703895", "0.50605965", "0.5052656", "0.504938", "0.50486064", "0.50448936", "0.5034986", "0.50345236", "0.5033488", "0.503318", "0.50283474", "0.50149596", "0.5008033", "0.50055027", "0.49991512" ]
0.668176
2
Select and print stack frame called by this one. An argument says how many frames down to go.
def down(n=1): f = gdb.selected_frame() for i in range(n): o = f.newer() if o: o.select() bt = pwndbg.commands.context.context_backtrace(with_banner=False) print('\n'.join(bt)) j()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_frame(self, arg):\n if not arg:\n # Just display the frame, without handling sticky.\n self.print_stack_entry(self.stack[self.curindex])\n return\n\n try:\n arg = int(arg)\n except (ValueError, TypeError):\n print(\n '*** Expected a number, got \"{0}\"'.format(arg), file=self.stdout)\n return\n if abs(arg) >= len(self.stack):\n print('*** Out of range', file=self.stdout)\n return\n if arg >= 0:\n self.curindex = arg\n else:\n self.curindex = len(self.stack) + arg\n self.curframe = self.stack[self.curindex][0]\n self.curframe_locals = self.curframe.f_locals\n self.print_current_stack_entry()\n self.lineno = None", "def _select_frame(self, number):\n assert 0 <= number < len(self.stack), (number, len(self.stack))\n self.curindex = number\n self.curframe = self.stack[self.curindex][0]\n self.curframe_locals = self.curframe.f_locals\n self.print_current_stack_entry()\n self.lineno = None", "def up(n=1):\n f = gdb.selected_frame()\n\n for i in range(n):\n o = f.older()\n if o:\n o.select()\n\n bt = pwndbg.commands.context.context_backtrace(with_banner=False)\n print('\\n'.join(bt))\n\n j()", "def inspect_frame(self, frame):\n while frame:\n self.inspect_single_frame(frame)\n frame = frame.f_back", "def _getframe(depth=None): # real signature unknown; restored from __doc__\n pass", "def record_python_call_stack(self, frames_to_skip: int) -> infra.Stack:\n frames_to_skip += 1 # Skip this function.\n stack = utils.python_call_stack(frames_to_skip=frames_to_skip)\n self.with_stack(stack)\n if len(stack.frames) > 0:\n self.with_location(stack.frames[0].location)\n return stack", "def record_cpp_call_stack(self, frames_to_skip: int) -> infra.Stack:\n # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info.\n # No need to skip this function because python frame is not recorded\n # in cpp call stack.\n stack = _cpp_call_stack(frames_to_skip=frames_to_skip)\n stack.message = \"C++ call stack\"\n self.with_stack(stack)\n return stack", "def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def printframe(frame,endframe):\n line = \"\\r timeframe: {:d} / {:d}\".format(frame, endframe)\n #print(line),\n sys.stdout.write(line)\n sys.stdout.flush()", "def _cpp_call_stack(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack:\n # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info.\n frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split(\"\\n\")\n frame_messages = []\n for frame in frames:\n segments = frame.split(\":\", 1)\n if len(segments) == 2:\n frame_messages.append(segments[1].strip())\n else:\n frame_messages.append(\"<unknown frame>\")\n return infra.Stack(\n frames=[\n infra.StackFrame(location=infra.Location(message=message))\n for message in frame_messages\n ]\n )", "def currentframe(_no_of_go_up_level):\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[_no_of_go_up_level - 1].tb_frame.f_back", "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])", "def callstack_push(*frame):\n callstack_now().append(frame)", "def spew(self):\n for frame in self.frames:\n print frame.func, frame", "def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)", "def do_top(self, arg):\n if self.curindex == 0:\n self.error('Oldest frame')\n return\n self._select_frame(0)", "def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)", "def base_trace(self, frame, event, arg):\n\n # print(\"Tracing %s %s %s (%s))\" % (event, \"<File %s, Line %s>\" % (frame.f_code.co_filename, frame.f_lineno), str(arg), str(id(threading.current_thread()))))\n\n # if true, breakpoints will be checked\n test_breakpoints = True\n\n # check for steppings\n if self.stepping != SteppingMode.STEP_NO_STEP:\n # print(\"Tracing for %s %s %s %s (%s))\" % (str(self.stepping), event, \"<File %s, Line %s>\" % (frame.f_code.co_filename, frame.f_lineno), str(arg), str(id(threading.current_thread()))))\n\n # single execution step, to move out of return/call frames into line frames\n if self.stepping == SteppingMode.STEP_SINGLE_EXEC:\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_NO_STEP\n self.break_pause = False\n self.cont = False\n handler.pause_debugging()\n\n # step INTO and call happens on same level as we are, we are in\n # just move one step to line\n if self.stepping == SteppingMode.STEP_INTO and self.active_frame.f_back is self.stored_frames[1] and event == \"call\":\n # this will exit because call is unhandled!\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_SINGLE_EXEC\n self.pause_reason = \"stepIn\"\n\n # step INTO but there is nothing to go in\n # so only move as step\n if self.stepping == SteppingMode.STEP_INTO and self.active_frame is self.stored_frames[1] and event != \"return\":\n self.stepping = SteppingMode.STEP_NEXT\n\n # same as above but we are returning, so do single step to move out\n if self.stepping == SteppingMode.STEP_INTO and self.active_frame is self.stored_frames[1] and event != \"return\":\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_SINGLE_EXEC\n self.pause_reason = \"step\"\n\n # step OUT and return happens, just move one step to line\n if self.stepping == SteppingMode.STEP_OUT and self.active_frame is self.stored_frames[1] and event == \"return\":\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_SINGLE_EXEC\n self.pause_reason = \"stepOut\"\n return # exit evaluation\n\n # next will always break if this is line\n if self.stepping == SteppingMode.STEP_NEXT and self.active_frame is self.stored_frames[1] and event != \"call\":\n test_breakpoints = False\n self.stepping = SteppingMode.STEP_NO_STEP\n self.break_pause = False\n self.pause_reason = \"step\"\n self.cont = False\n handler.pause_debugging()\n\n if event == \"exception\" or event == \"call\":\n return # TODO: exceptions, calls\n\n if test_breakpoints:\n # due to lock we move triggered breakpoint to here\n breaking_on = None\n\n # check breakpoints under lock\n with self.bkp_lock:\n for breakpoint in self.active_breakpoints:\n if breakpoint.applies(frame):\n breaking_on = breakpoint\n break\n if breaking_on is not None:\n print(\"Broke at %s %s %s (%s))\" % (event, \"<File %s, Line %s>\" % (frame.f_code.co_filename, frame.f_lineno), str(arg), str(id(threading.current_thread()))))\n self.break_code(breaking_on) # sets this to blocking\n\n # check for external requested pause\n if self.break_pause:\n self.break_pause = False\n self.pause_reason = \"pause\"\n self.cont = False\n handler.pause_debugging()\n\n while not self.cont:\n # spinlock when we are waiting for debugger\n time.sleep(0.1)", "def print_frame(self, name, frame, on_svr=False):\n\n name = \"print{}: {}\".format(self.step, name)\n\n # print using svr\n if on_svr:\n svr.debug(name, frame)\n\n # print using openCV\n else: \n self.debug_stream(name, frame)\n\n # increment step counter\n self.step += 1", "def _debug_stack(self):\n debug(\"current stack: %s\" % self.calc.stack)", "def frame(self):", "def trace_function(frame, event, arg):\n co = frame.f_code\n func_name = co.co_name\n if func_name == 'write':\n # Ignore write() calls from print statements\n return\n filename = co.co_filename\n if event == 'call':\n # decend into the stack...\n return trace_function\n elif event == 'return':\n if isinstance(arg, basestring) and 'inputlocator' in filename.lower() and not func_name.startswith('_'):\n results_set.add((func_name, arg))\n # print('%s => %s' % (func_name, arg))\n return", "def get_stack_frames(self, threadId=0, startFrame=0, levels=0, format=None):\n\n # format is ignored, TODO?\n # threadId is ignored since renpy is single threaded for stuff we need\n\n clevel = 0\n slevel = 0 if startFrame is None else startFrame\n elevel = None if levels is None or levels == 0 else levels\n\n frames = []\n cframe = self.active_frame\n while cframe is not None:\n if clevel >= slevel:\n finfo = {}\n\n finfo[\"id\"] = clevel\n finfo[\"name\"] = cframe.f_code.co_name + self.format_method_signature(cframe.f_locals, cframe.f_code)\n finfo[\"source\"] = {\"path\": cframe.f_code.co_filename}\n finfo[\"line\"] = cframe.f_lineno\n finfo[\"presentationHint\"] = \"normal\"\n finfo[\"column\"] = 0\n\n dis_info = {}\n finfo[\"subsource\"] = dis_info\n\n disassembled = dis(cframe.f_code, cframe.f_lasti)\n dis_info[\"sources\"] = [{\"text\": self.format_disassembly(cframe.f_lineno, *de), \"line\": de[1], \"source\": finfo[\"source\"]} for de in disassembled]\n ord = 0\n for de in disassembled:\n if de[0]:\n break\n ord += 1\n finfo[\"subsourceElement\"] = ord\n\n frames.append(finfo)\n clevel += 1\n if elevel is not None and clevel >= elevel:\n break\n cframe = cframe.f_back\n\n return frames", "def print_callsite_location():\n fi = inspect.getouterframes( inspect.currentframe() )[2]\n print(\"{path}:{line} {fname}\".format(\n line=fi.lineno, path=fi.filename, fname=fi.function))", "def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)", "def currentframe():\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[2].tb_frame.f_back", "def currentframe():\n return sys._getframe(3)", "def dispatch_frame(self, frame):", "def trace_event(self, frame, event, arg):\n\n self.active_frame = frame\n self.active_call = frame\n\n if event == \"call\":\n frame.f_trace = self.trace_line\n\n self.base_trace(frame, event, arg)", "def show_frame(self, *args):\n current_pg = args[0]\n frame = self.frames[current_pg]\n frame.tkraise()\n\n if current_pg == PageTen:\n graham_picks, lynch_picks = Complete_Interface.completing_evalution()\n self.last_pg_results(graham_picks, lynch_picks, *args)", "def do_bottom(self, arg):\n if self.curindex + 1 == len(self.stack):\n self.error('Newest frame')\n return\n self._select_frame(len(self.stack) - 1)", "def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:\n # print('frame {}'.format(frame.data.frameID))\n \n # get a copy of the frame data\n _frames.append(frame)", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message =\"Signal received : entering python shell.Traceback:\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def visualizar(self):\n print(self.stack)", "def currentframe():\n try:\n raise Exception\n except:\n return sys.exc_info()[2].tb_frame.f_back", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def _current_frames(): # real signature unknown; restored from __doc__\n return {}", "def refresh_stack(self):\n self.stack, _ = self.compute_stack(self.fullstack)\n # find the current frame in the new stack\n for i, (frame, _) in enumerate(self.stack):\n if frame is self.curframe:\n self.curindex = i\n break\n else:\n self.curindex = len(self.stack)-1\n self.curframe = self.stack[-1][0]\n self.print_current_stack_entry()", "def dump_stacktraces():\n lines = []\n for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212\n lines.append(\"\\n######### ProcessID=%s, ThreadID=%s #########\" % (\n os.getpid(), thread_id\n ))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n lines.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n lines.append(\" %s\" % (line.strip()))\n lines.append(\"#############################################\\n\\n\")\n\n print('\\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)", "def frame(self):\n self.run_command('frame')", "def frames():\n raise RuntimeError('Must be implemented by subclasses.')", "def assign_token(self, frame):\n \tprint(str(frame))", "def get_frame(self, ind):\n pass", "def callstack_now():\n return checkpoints[-1]", "def annotate_stacks(self):\n curthread = gdb.selected_thread()\n try:\n for thread in gdb.selected_inferior().threads():\n thread.switch()\n\n # This is different depending on gdb version\n try:\n frame = gdb.newest_frame()\n stackpointer = frame.read_register(\"sp\")\n except:\n regname, as_hex, as_int = gdb.execute(\"info register sp\", False, True).split()\n stackpointer = int(as_hex, 16)\n memrange = self.get_range(stackpointer)\n tid = thread.ptid[1] if thread.ptid[1] else thread.ptid[2]\n if memrange is None:\n print(\"Did not find stack of thread %d\" % tid)\n continue\n memrange.settype(MemoryType.Stack, \"Stack of thread %d(TID %d)\" % (thread.num, tid))\n finally:\n curthread.switch()", "def print_frames(frames):\n for i, frame in enumerate(frames):\n clear_output(wait=True)\n print(frame['frame'])\n print(f\"Episode: {frame['episode']}\")\n print(f\"Timestep: {i + 1}\")\n print(f\"State: {frame['state']}\")\n print(f\"Previous action: {frame['action']}\")\n if frame['action'] == 0:\n print(\"Action is: south\")\n if frame['action'] == 1:\n print(\"Action is: north\")\n if frame['action'] == 2:\n print(\"Action is: east\")\n if frame['action'] == 3:\n print(\"Action is: west\")\n if frame['action'] == 4:\n print(\"Action is: pickup passenger 1 \") \n if frame['action'] == 5:\n print(\"Action is: dropoff passenger 1\")\n if frame['action'] == 6:\n print(\"Action is: pickup passenger 2\")\n if frame['action'] == 7:\n print(\"Action is: dropoff passenger 2\")\n print(f\"Reward: {frame['reward']}\")\n print(f\"Total Reward: {frame['total reward']}\")\n time.sleep(.5)", "def dump_stacks(self):\n\n dump = []\n\n # threads\n threads = dict([(th.ident, th.name) for th in threading.enumerate()])\n\n for thread, frame in sys._current_frames().items():\n if thread not in threads:\n continue\n dump.append(\"Thread 0x%x (%s)\\n\" % (thread, threads[thread]))\n dump.append(\"\".join(traceback.format_stack(frame)))\n dump.append(\"\\n\")\n\n return \"\".join(dump)", "def supertrace(max_len=160):\r\n tb = sys.exc_info()[2]\r\n while True:\r\n if not tb.tb_next:\r\n break\r\n tb = tb.tb_next\r\n stack = []\r\n frame = tb.tb_frame\r\n while frame:\r\n stack.append(f)\r\n frame = frame.f_back\r\n stack.reverse()\r\n # First print the regular traceback\r\n traceback.print_exc()\r\n\r\n print \"Locals by frame, innermost last\"\r\n for frame in stack:\r\n print\r\n print \"Frame %s in %s at line %s\" % (frame.f_code.co_name,\r\n frame.f_code.co_filename,\r\n frame.f_lineno)\r\n for key, value in frame.f_locals.items():\r\n print (\"\\t%20s = \" % smart_unicode(key, errors='ignore')),\r\n # We have to be careful not to cause a new error in our error\r\n # printer! Calling str() on an unknown object could cause an\r\n # error.\r\n try:\r\n s = smart_unicode(value, errors='ignore')\r\n if max_len is not None:\r\n s = s[:max_len]\r\n print s\r\n except:\r\n print \"<ERROR WHILE PRINTING VALUE>\"", "def caller_name(self, skip=6):\r\n stack = inspect.stack()\r\n start = 0 + skip\r\n if len(stack) < start + 1:\r\n return ''\r\n parentframe = stack[start][0] \r\n\r\n name = []\r\n module = inspect.getmodule(parentframe)\r\n # `modname` can be None when frame is executed directly in console\r\n # TODO(techtonik): consider using __main__\r\n if module:\r\n name.append(module.__name__)\r\n # detect classname\r\n if 'self' in parentframe.f_locals:\r\n # I don't know any way to detect call from the object method\r\n # XXX: there seems to be no way to detect static method call - it will\r\n # be just a function call\r\n name.append(parentframe.f_locals['self'].__class__.__name__)\r\n codename = parentframe.f_code.co_name\r\n if codename != '<module>': # top level usually\r\n name.append( codename ) # function or a method\r\n\r\n ## Avoid circular refs and frame leaks\r\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\r\n del parentframe, stack\r\n\r\n return \".\".join(name)", "def __init__(self):\n st = inspect.stack()\n frames = [Snapframe(tup) for tup in st[3:]]\n frames.reverse()\n self.frames=frames", "def j(*args):\n try:\n pc = int(gdb.selected_frame().pc())\n pwndbg.ida.Jump(pc)\n except Exception:\n pass", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def mock_frame(stack):\n return inspect.stack()[0]", "def mock_frame(stack):\n return inspect.stack()[0]", "def epu_signal_stack_debug(sig, frame):\n d = {'_frame': frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n message = \"Signal recieved : entering python shell.\\nTraceback:\\n\"\n message += ''.join(traceback.format_stack(frame))\n log.info(message)\n\n message = dumpstacks()\n log.info(message)", "def trace_dispatch(self, frame, event, arg):\n if self.quitting:\n return # None\n if event == 'line':\n return self.dispatch_line(frame)\n if event == 'call':\n return self.dispatch_call(frame, arg)\n if event == 'return':\n return self.dispatch_return(frame, arg)\n if event == 'exception':\n return self.dispatch_exception(frame, arg)\n if event == 'c_call':\n return self.trace_dispatch\n if event == 'c_exception':\n return self.trace_dispatch\n if event == 'c_return':\n return self.trace_dispatch\n print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))\n return self.trace_dispatch", "def user_call(self, frame, argument_list):\n pass", "def print_display(stack, count = 0):\n \n #The starting index of the print of \"stack\"\n start_index = count\n \n #Creating a stopper variable (AKA \"you have hit the 20 item mark\")\n \n stopper = stopper_function(stack, count)\n \n \n #The loop responsible for priting the items to screen \n while count <= (len(stack)-1):\n \n print \"\\nindex:\", count\n print \"name of Item:\", stack[count].name, \"\\n\"\n \n if count == stopper:\n \"\"\"\n if count == (len(stack)-1):\n \n break\n \n else:\n \n \"\"\"\n response = user_input(start_index, stopper, stack)\n \n #reseting variables\n start_index = count\n stopper = stopper_function(stack, count)\n \n if response == \"done\": #exits out of the print_to_screen loop and \n #enters you back into the main program.\n break\n \n else:\n pass\n \n else:\n pass\n \n count += 1\n \n #print \"print_display works!\" \n return \"done\"", "def trace(self):\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.memory_read(self.pc),\n self.memory_read(self.pc + 1),\n self.memory_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.registers[i], end='')\n\n print()", "def _exit_print(self):\n if self.cur_frame >= self.config.MAX_FRAMES:\n self.stopped = True", "def do_display_stack_ascii(self, address):\n if self.reader.exception is None:\n print(\"Minidump has no exception info\")\n return\n if len(address) == 0:\n address = None\n else:\n address = self.ParseAddressExpr(address)\n self.padawan.PrintStackTraceMessage(address)", "def trace(self, frame, event, arg):\n if event == \"call\":\n if frame.f_code.co_filename.startswith(\"<memory/\"):\n return self.tracerobot\n else:\n return None\n return trace", "def capture_stack(self):\n if not hasattr(self._thread_data, 'capture_stack'):\n self._thread_data.capture_stack = []\n return self._thread_data.capture_stack", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe() # Top frame\n # if two and three depth frames exist and if the code at the top\n # recursion and the three depth frame use the same code \n if f.f_back and f.f_back.f_back and f.f_back.f_back.f_back \\\n and f.f_back.f_back.f_back.f_code == f.f_code:\n # Break the recursion\n raise TailRecurseException(args, kwargs)\n else:\n # Here you run the frame in a try catch setup\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException as e:\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def debug_caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))", "def trace(self):\n\n print_on(\n \"TRACE --- {} {} {} | {} {} {} | \".format(\n *self.format_iterable(\n self.program_pointer,\n self.stack_pointer,\n self.flags,\n self.read_register(self.program_pointer),\n self.read_register(self.program_pointer + 1),\n self.read_register(self.program_pointer + 2),\n )\n )\n )\n\n print_on(\" \".join(self.format_iterable(*self.register)))\n\n print()\n\n return", "def trace(self):\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.read_from_memory(self.pc),\n self.read_from_memory(self.pc + 1),\n self.read_from_memory(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.register[i], end='')\n\n print()", "def _repr_(self):\n try:\n num = len(self)\n except TypeError:\n num = \"unknown number of\"\n return \"Animation with %s frames\"%num", "def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry", "def printCallTraceRecursive(self, callStack, processedFuncIdxs=set()):\n assert callStack\n\n funcIdx = callStack[-1]\n depth = len(callStack) - 1\n print('--' * depth + str(self.getParentBaseFiles(funcIdx)) + ' ' + self.idx2Funcs[funcIdx])\n if funcIdx in processedFuncIdxs:\n # Base case 1: This function's call hierarchy has been processed (memoization)\n print('--' * (depth + 1) + '... (Truncated: Sub-hierarchy processed before)')\n callStack.pop(-1)\n return\n else:\n processedFuncIdxs.add(funcIdx)\n\n calleeList = self.caller2callee[funcIdx]\n for calleeIdx in calleeList:\n if calleeIdx not in callStack:\n callStack.append(calleeIdx)\n self.printCallTraceRecursive(callStack, processedFuncIdxs)\n else:\n # Base case 2: Recursion cycle detected\n # TODO: This base case may be redundant and never be reached because of base case 1. Can consider removing.\n print('--' * (depth + 1) + str(self.getParentBaseFiles(calleeIdx)) + ' ' + self.idx2Funcs[calleeIdx] + '(recursion)')\n callStack.pop(-1)\n return\n\n # Base case 3: Finished printing all callees\n callStack.pop(-1)\n return", "def curframe(self):\n return self._stack[self._curframe_index][0]", "def trace(self, *args, **kwargs): # real signature unknown\n pass", "def tail_call_optimized(g):\n def func(*args, **kwargs):\n f = sys._getframe()\n if f.f_back and f.f_back.f_back \\\n and f.f_back.f_back.f_code == f.f_code:\n # 抛出异常\n raise TailRecurseException(args, kwargs)\n else:\n while 1:\n try:\n return g(*args, **kwargs)\n except TailRecurseException, e:\n # 捕获异常,重新调用栈\n args = e.args\n kwargs = e.kwargs\n func.__doc__ = g.__doc__\n return func", "def trace(func):\n @wraps(func)\n def tracer(*args, **kwargs):\n name = func.__name__\n stack_size = int(len(inspect.stack(0)) / 2) # @wraps(func) is also increasing the size\n indent = stack_size*'\\t'\n print(f'{indent} > Entering \"{name}\": args: {args}')\n result = func(*args, **kwargs)\n print(f'{indent} < Leaving \"{name}\"')\n return result\n\n return tracer", "def change_frame(self, frame):\r\n pass", "def show_stack(self) -> None:\n print(\"Show stack: \")\n ok = 1\n for i in reversed(self.items):\n print(i)\n ok = 0\n if ok:\n print(\"The stack is empty!\")\n print(\"\\n\")", "def set_trace(self, frame=None):\n if frame is None:\n frame = sys._getframe().f_back\n self.reset()\n while frame:\n frame.f_trace = self.trace_dispatch\n self.botframe = frame\n frame = frame.f_back\n self.set_step()\n sys.settrace(self.trace_dispatch)", "def showPrevFrame(self):\n if(self.hasPrevFrame()):\n self.activeFrames.pop()\n self.activeFrames[-1].tkraise()\n else:\n self.showFrame(\"frame_start\")", "def trace_line(self, frame, event, arg):\n\n self.active_frame = frame\n\n self.base_trace(frame, event, arg)", "def frame_idx(self) -> int:\n pass", "def start_stack(StackId=None):\n pass", "def close_frames(depth: int) -> None:\n d = current_depth - depth\n for i in range(d):\n j = depth + d - (i + 1) # Current depth\n line = f'· {\"│ \" * j}└{\"┄\" * 3}' # * depth_widths[j]\n print(c.BRIGHT_WHITE + line.ljust(0, '━') + c.ENDC) # 80 also work", "def debug(sig, frame):\n d={'_frame':frame} # Allow access to frame object.\n d.update(frame.f_globals) # Unless shadowed by global\n d.update(frame.f_locals)\n\n i = code.InteractiveConsole(d)\n message = \"Signal received : entering python shell.\\nTraceback:\\n\"\n message += ''.join(traceback.format_stack(frame))\n i.interact(message)", "def get_frame(self, i):\n return self.get_traceback(i).tb_frame", "def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s", "def showframe(self, frame):\n self.frames[frame].show()", "def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()", "def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()", "def get_frame(*args):\n return _ida_frame.get_frame(*args)", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def tprint(self, cmd, end='\\n'):\n if ENABLE_DEBUG:\n stackIndex = 0\n for index, stackFrame in enumerate(stack()):\n caller = getframeinfo(stackFrame[0])\n if caller.filename == fullPath:\n stackIndex = index\n break \n caller = getframeinfo(stack()[stackIndex][0])\n self.fileHandle.write(\"# \" + targetFile + \":\" + str(caller.lineno) + '\\n')\n self.tprint_raw(cmd, end)", "def print_frame(frame):\n if 'DHCP' in frame:\n bootp_fields = frame[BOOTP].fields\n dhcp_options = frame[DHCP].options\n type_value = dhcp_options[0][1]\n type_name = scapy.layers.dhcp.DHCPTypes[type_value]\n\n print(\"\\n\\nFRAME: {}\".format(frame.summary()))\n print(\"TYPE: DHCP-{}\".format(type_name))\n if type_value in DHCPSRVR_PACKETS:\n print(\"OPTIONS:\")\n check_bootp, check_dhcp = parse_bootp_fields(bootp_fields), parse_dhcp_opt(dhcp_options)\n if check_bootp is True or check_dhcp is True:\n log_frame(frame)\n print(\"LOGGED: ./{}\".format(PCAP_LOG))", "def draw(self, screen):\n # pylint: disable=invalid-name\n x = 0\n y = 0\n\n for node in self.call_stack:\n self._draw_text(screen, node.name, (x, y))\n x += 12\n y += 12", "def recursiveTraceJumptablese(ea, function=False):", "def DumpStackTracebacks():\n results = []\n id_name_map = {}\n for thread in threading.enumerate():\n id_name_map[thread.ident] = thread.name\n\n results.append(\n '*****\\n'\n '*\\n'\n '* Dumping debug information.\\n'\n '*\\n'\n '*****\\n')\n # pylint: disable=protected-access\n for thread_id, stack in sys._current_frames().items():\n results.append('Thread %s (id=%d):\\n' %\n (id_name_map.get(thread_id, 'unnamed-%d' % thread_id),\n thread_id))\n for filename, line_no, function_name, text in (\n traceback.extract_stack(stack)):\n # Same format as the usual Python stack trace, but indented\n # twice\n results.append(' File: \"%s\", line %d, in %s\\n' % (\n filename, line_no, function_name))\n if text:\n results.append(' %s\\n' % text.strip())\n\n results.append('***** End of debug information.\\n')\n\n return ''.join(results)", "def test_strframe():\n obj = pmisc.strframe\n\n def check_basic_frame(lines):\n fname = pmisc.normalize_windows_fname(os.path.realpath(__file__))\n assert lines[0].startswith(\"\\x1b[33mFrame object ID: 0x\")\n assert lines[1].startswith(\n \"File name......: {0}\".format(fname.replace(\".pyc\", \".py\"))\n )\n assert lines[2].startswith(\"Line number....: \")\n assert lines[3] == \"Function name..: test_strframe\"\n assert lines[4] == r\"Context........: [' fobj = inspect.stack()[0]\\n']\"\n assert lines[5] == \"Index..........: 0\"\n\n fobj = inspect.stack()[0]\n lines = obj(fobj).split(\"\\n\")\n check_basic_frame(lines)\n assert len(lines) == 6\n lines = [\n line\n for num, line in enumerate(obj(fobj, extended=True).split(\"\\n\"))\n if (num < 6) or line.startswith(\"f_\")\n ]\n check_basic_frame(lines)\n assert lines[6].startswith(\"f_back ID......: 0x\")\n assert lines[7].startswith(\"f_builtins.....: {\")\n assert lines[8].startswith(\"f_code.........: \" \"<code object test_strframe at \")\n assert lines[9].startswith(\"f_globals......: {\")\n assert lines[10].startswith(\"f_lasti........: \")\n assert lines[11].startswith(\"f_lineno.......: \")\n assert lines[12].startswith(\"f_locals.......: {\")\n if sys.hexversion < 0x03000000:\n assert lines[13] == \"f_restricted...: False\"\n assert lines[14].startswith(\"f_trace........: \")\n assert len(lines) == 15\n else:\n assert lines[13].startswith(\"f_trace........: \")\n assert len(lines) == 14", "def set_trace(self, frame=None):\r\n if frame is None:\r\n frame = sys._getframe().f_back\r\n # See pudb issue #52. If this works well enough we should upstream to\r\n # stdlib bdb.py.\r\n #self.reset()\r\n while frame:\r\n frame.f_trace = self.trace_dispatch\r\n self.botframe = frame\r\n frame = frame.f_back\r\n self.set_step()\r\n sys.settrace(self.trace_dispatch)", "def test_func_stack(self):\n cmd = \"deref $_stack()\"\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd))\n res = gdb_start_silent_cmd(cmd)\n self.assertNoException(res)\n if is_64b():\n self.assertRegex(res, r\"\\+0x0*20: *0x0000000000000000\\n\")\n else:\n self.assertRegex(res, r\"\\+0x0.*20: *0x00000000\\n\")" ]
[ "0.74253505", "0.6774561", "0.6765047", "0.6575413", "0.65482455", "0.6473951", "0.630916", "0.6290664", "0.6155866", "0.6132151", "0.6068907", "0.60600847", "0.60045964", "0.5957227", "0.5937258", "0.59002703", "0.5870945", "0.58447737", "0.58314973", "0.5798143", "0.57620335", "0.56988525", "0.5584277", "0.5583397", "0.5547701", "0.55173206", "0.5514798", "0.55101913", "0.5474967", "0.5474739", "0.5468681", "0.5464733", "0.54352343", "0.54268134", "0.54205894", "0.54184425", "0.54066175", "0.53935844", "0.53933775", "0.53577304", "0.53524977", "0.53515464", "0.53478295", "0.5347167", "0.5346546", "0.5334472", "0.5324545", "0.53228873", "0.53149974", "0.5313097", "0.52862585", "0.52644306", "0.5255449", "0.525433", "0.525433", "0.52394396", "0.5237685", "0.52108985", "0.5203508", "0.519079", "0.5189169", "0.51844", "0.5183692", "0.51613754", "0.51317066", "0.51294696", "0.5120309", "0.51125544", "0.510915", "0.5105502", "0.5098274", "0.509016", "0.50868434", "0.5080997", "0.50795704", "0.50763386", "0.5075865", "0.5075768", "0.5055067", "0.50468063", "0.5044681", "0.5043695", "0.5041323", "0.50350285", "0.50305337", "0.5029684", "0.5027248", "0.5024051", "0.5022176", "0.5022176", "0.50160396", "0.5014771", "0.50125486", "0.5012219", "0.5008089", "0.5006604", "0.5005069", "0.5001487", "0.5000688", "0.49997783" ]
0.6162764
8
Copy `in_tree` to `out_tree`, checking selection(in_tree) for each event.
def tree_copy_selection(in_tree, out_tree, selection): for entry in in_tree: if selection(entry): out_tree.Fill()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_tree_checker(src, dst):\n copy_tree(src, dst)\n return True", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def _write (self, in_tree, dest):\n\t\t## Preparation:\n\t\tself._src_tree = in_tree\n\t\tself._dest_strm = dest\n\t\t## Main:\n\t\troot = in_tree.root\n\t\tif (not root):\n\t\t\troot = in_tree.get_centroid_nodes()[0]\n\t\tself._writeNode (root)", "def copy_tree(self, infile, outfile,\n preserve_mode=1, preserve_times=1, preserve_symlinks=0,\n level=1, condition=None):\n return copy_tree(\n infile, outfile,\n preserve_mode,preserve_times,preserve_symlinks,\n not self.force,\n dry_run=self.dry_run,\n condition=condition)", "def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else src.length + node.length if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n res.support = (node.support if move in ('down', 'bottom')\n else src.support)\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res", "def tree_copy_duplicate_removal(in_tree, out_tree, key, keys):\n for entry in in_tree:\n key_value = getattr(entry, key)\n if not key_value in keys:\n out_tree.Fill()\n keys.add(key_value)", "def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()", "def convertTreeToCoveringTree( self, tree ):\n\n self.debug( \"convertTreeToCoveringTree: tree at start\" )\n if E.getLogLevel() >= 2: self.printTree( tree )\n \n ntree = self.addChildren( tree )\n \n #######\n # descend tree and add new domains\n # if domain has only a single child: delete the child and\n # rewire\n for t in ntree:\n info, children = t\n \n if info:\n node, parent, level, ranges = info\n \n if len(children) == 1:\n ntree[children[0]][0] = None\n ntree[node][1] = ntree[children[0]][1]\n \n #######\n # build new tree with new node identifiers\n current_node = 0\n covering_tree = []\n \n levels = map( lambda x: [], [0] * len(tree))\n \n for t in ntree:\n info, children = t\n \n if not info: continue\n node, parent, level, ranges = info\n \n if len(children) == 2:\n \n # add new node to tree, rename parent in children and\n # set borders\n leftchild = children[0]\n rightchild = children[1] \n \n # change left child\n lnode, lparent, llevel, lranges = ntree[leftchild][0]\n rnode, rparent, rlevel, rranges = ntree[rightchild][0] \n \n if ranges:\n lranges, rranges = self.getCoveringRanges( lranges, rranges, ranges )\n else:\n continue\n \n # change left child\n ntree[leftchild][0]= (None, current_node, level + 1, lranges) \n \n # change right child \n # cnode, cparent, clevel, cranges = ntree[rightchild][0]\n ntree[rightchild][0]= (None, current_node, level + 1, rranges )\n \n covering_tree.append( [level, parent, 0, 0, ranges] )\n levels[level].append( current_node )\n \n current_node += 1\n \n max_range = covering_tree[0][4][0][1]\n \n self.debug( \"convertTreeToCoveringTree: tree before removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n ###################################\n ## remove small fragments\n ## has to be done per level in order to be consistent\n ## done here and not during matrix decomposition, so that\n ## matrix needs not to be permuted more than once.\n for l in range(0, len(levels)):\n if len(levels[l]) == 0: break\n # collect all domains per level in a list of the form\n # (from, to, node)\n ranges = []\n for node in levels[l]:\n ranges += map(lambda x: (x[0], x[1], node), covering_tree[node][4])\n covering_tree[node][4] = []\n \n # and remove small fragments\n new_ranges = self.removeSmallRanges( ranges )\n \n # and put back into tree if there is more than one range\n for (xfrom, xto, node) in new_ranges:\n covering_tree[node][4].append( (xfrom, xto) )\n \n ###################################\n ## delete nodes with empty ranges or only a single child.\n ## renumber nodes so that there are no gaps\n\n self.debug( \"convertTreeToCoveringTree: after removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n return self.collapseTree( covering_tree )", "def test_copy_button_clicked_with_no_selection_on_to_task_tree_view(self):\n # select one task in from_task_tree_view\n\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n self.assertEqual(PatchedMessageBox.called_function, '')\n\n # now try to copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n self.assertEqual(PatchedMessageBox.called_function, 'critical')\n self.assertEqual(PatchedMessageBox.title, 'Error')\n self.assertEqual(PatchedMessageBox.message,\n 'Please select a task from <b>To Task</b> list')", "def selection_correction_method2(tree, scale, h_in, h_out):\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n RT = event.DD_Rise[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n energy_S15 = event.DD_AmplADU[S15_ch]\n if cut[0]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n energy = energy_S15*scale\n h_in.Fill(energy)\n cut[0]=1\n if cut[1]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and ((onset>=15 and onset<=36) or (onset>=50 and onset<=110)):\n energy = energy_S15*scale\n h_out.Fill(energy)\n cut[1]=1", "def command_copytree(args):\n for srcdir in args.srcdirs:\n basename = os.path.basename(srcdir)\n destdir2 = os.path.normpath(os.path.join(args.destdir, basename))\n if os.path.exists(destdir2):\n shutil.rmtree(destdir2)\n sys.stdout.write(\"copytree: %s => %s\\n\" % (srcdir, destdir2))\n shutil.copytree(srcdir, destdir2)\n return 0", "def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def tree_removeDeadBranches():\n nonlocal d_tree\n d_tree = { k : v for k, v in d_tree.items() if v}\n # By creating a new binding for 'd_tree', we have effectively\n # severed the connection back to the original dictionary.\n # We now need to copy this d_tree to the self.d_inputTree\n # self.d_outputTree structures\n self.d_inputTree = d_tree\n self.d_outputTree = self.d_inputTree.copy()", "def cpr(src, dst):\n shutil.copytree(src, dst)", "def copytree(src, dest):\n shutil.copytree(src, dest)\n restorecon(dest, recursive=True)", "def selection_correction_method1(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break\n return h_in, h_out", "def copy_subtree(src, dst):\n for src_f in os.listdir(src):\n src_path = os.path.join(src, src_f)\n if os.path.isdir(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copytree(src_path, dst_path)\n else:\n ProcessJson.copy_subtree(src_path, dst_path)\n elif os.path.isfile(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copy(src_path, dst_path)", "def selection_correction_method1_v2(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def test_copy_button_clicked_with_same_task_is_selected_in_both_sides(self):\n # select one task in from_task_tree_view\n\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n # Select Task4 in to_task_tree_view\n selection_model = self.dialog.to_task_tree_view.selectionModel()\n model = self.dialog.to_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.to_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.to_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n self.assertEqual(PatchedMessageBox.called_function, '')\n\n # now try to copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n self.assertEqual(PatchedMessageBox.called_function, 'critical')\n self.assertEqual(PatchedMessageBox.title, 'Error')\n self.assertEqual(PatchedMessageBox.message,\n 'Please select two different tasks')", "def range(self, event):\r\n \r\n p = (event.x, self.toCartesian(event.y))\r\n \r\n if self.selectedRegion is None:\r\n self.selectedStart = Region(p[X],p[Y], p[X],p[Y])\r\n self.selectedRegion = self.selectedStart.unionPoint(p)\r\n \r\n self.paint()\r\n \r\n # return (node,sub-tree) where sub-tree is True if draining entire tree\r\n # rooted at node. Draw these as shaded red rectangle to identify whole\r\n # sub-tree is selected.\r\n for pair in self.tree.range(self.selectedRegion):\r\n p = pair[0].point\r\n \r\n if pair[1]:\r\n self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min), \r\n pair[0].region.x_max, self.toTk(pair[0].region.y_max),\r\n fill='Red', stipple='gray12')\r\n else:\r\n self.canvas.create_rectangle(p[X] - BoxSize, self.toTk(p[Y]) - BoxSize, \r\n p[X] + BoxSize, self.toTk(p[Y]) + BoxSize, fill='Red')\r\n\r\n self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min), \r\n self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max), \r\n outline='Red', dash=(2, 4))", "def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)", "def test_after_creation_copy():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(2, move=True)\n builder.add_child(13)\n builder.move_to_parent()\n builder.add_child(7)\n\n t1 = builder.build()\n\n builder.move_to_root()\n builder.set_data(4)\n builder.add_child(3, move=True)\n builder.add_child(15)\n\n t2 = builder.build()\n\n assert t2 is not t1\n assert t2[0] is not t1[0]\n assert t2[0][0] is not t1[0][0]\n assert t2[1] is not t1[1]\n\n assert t2.data == 4\n assert t2[0].data == 2\n assert t2[0][0].data == 13\n assert t2[1].data == 7\n assert t2[2].data == 3\n assert t2[2][0].data == 15\n\n assert len(t2) == 3\n assert len(t2[0]) == 1\n assert len(t2[1]) == 0\n assert len(t2[2]) == 1", "def onSelectionChanging(self, event):\n\t\tif self.ignore:\n\t\t\tevent.Skip()\n\t\t\treturn\n\t\tif not self.multiSelect and not self.programmatic:\n\t\t if platform.system() not in [\"Darwin\", \"Linux\"]: \n\t\t\t self.tree.UnselectAll()\n\t\titem = event.GetItem()\n\t\tif not item.IsOk():\n\t\t\tLogging.info(\"Item %s is not ok\" % str(item), kw = \"io\")\n\t\t\treturn\n\t\t\t\t\n\t\tobj = self.tree.GetPyData(item)\n\t\tif obj == \"1\":\n\t\t\t#self.tree.UnselectItem(item)\n\t\t\tevent.Veto()\n\t\t\treturn\n\t\telif obj == \"2\":\n\t\t\t# Select it's children\n\t\t\tself.ignore = 1\n\t\t\tself.tree.UnselectItem(item)\n\t\t\tcitem, cookie = self.tree.GetFirstChild(item)\n\t\t\twhile citem.IsOk():\n\t\t\t\tif not self.tree.IsSelected(citem):\n\t\t\t\t\tself.tree.ToggleItemSelection(citem)\n\t\t\t\tcitem = self.tree.GetNextSibling(citem) \n\t\t\tevent.Veto()\n\t\t\tself.ignore = 0", "def copy_tree(t):\n return tree(label(t), [copy_tree(b) for b in branches(t)])", "def copySpecial():\n depNode = nuke.dependencies(nuke.selectedNode())\n dependNode = nuke.dependentNodes(nuke.INPUTS or nuke.HIDDEN_INPUTS or nuke.EXPRESSIONS, [nuke.selectedNode()])\n i = 0\n if dependNode[0].Class() in ['Scene', 'MergeGeo']:\n i = nuke.inputs(dependNode[0])+1\n\n nuke.nodeCopy(nukescripts.cut_paste_file())\n\n for node in nuke.allNodes():\n node['selected'].setValue(0)\n\n nuke.nodePaste(nukescripts.cut_paste_file())\n\n newNode = nuke.selectedNode()\n newNode.setInput(0, depNode[0])\n dependNode[0].setInput(i+1, newNode)", "def visit(self):\n self.tree = self.recursive_visit(self.tree)\n # assert self.current_line == self.tree.absolute_bounding_box.bottom_right.line", "def see(self, cut):\n newptree = PTree()\n newptree._root = self._root.see(cut)\n return newptree", "def analyze(self, event): \n trgObjects = Collection(event,self.trgColl)\n if self.trgMuMinPt!=None and self.trgMuMinPt>0:\n trgObjIdx = [ idx for idx,trg in enumerate(trgObjects) if getattr(trg,\"pt\")>self.trgMuMinPt and getattr(trg,self.trgBranch)==1]\n \n else:\n trgObjIdx = [ idx for idx,trg in enumerate(trgObjects) if getattr(trg,self.trgBranch)==1]\n \n \n if len(trgObjIdx)==0 and self.skipNoTrgEvt: \n return False\n\n passedPath= [ path for path in self.selectionPathList if getattr(event,path)]\n if len(self.selectionPathList)>0 and len(passedPath)==0:\n if self.skipNoTrgEvt:\n return False\n trgObjIdx=[]\n if len(trgObjIdx)==0:\n for br in self.branches:\n self.out.fillBranch(\"%s_%s\"%(self.outputColl,br),[])\n for col in self.recoColl:\n self.out.fillBranch(\"%s_isTrg\"%(col),0)\n if self.skipProbe or self.skipTag:\n return False\n else:\n Bmu_fired=0\n # print trgObjIdx\n for idx,col in zip(self.recoIdx,self.recoColl):\n out=getattr(event,idx)\n if out in trgObjIdx:\n self.out.fillBranch(\"%s_isTrg\"%(col),1)\n Bmu_fired+=1\n else:\n self.out.fillBranch(\"%s_isTrg\"%(col),0)\n\n if Bmu_fired==0 and self.skipProbe: \n return False \n if Bmu_fired>0 and Bmu_fired==len(trgObjIdx) and self.skipTag:\n return False\n \n for br in self.branches:\n out=[ getattr(trgObjects[idx],br) for idx in trgObjIdx ]\n self.out.fillBranch(\"%s_%s\"%(self.outputColl,br),out)\n return True", "def copy(self):\n new_tree = Tree(support_label=self._support_label, remove_name_quotes=self._remove_name_quotes)\n new_tree.name = self.name\n new_tree._is_cladogram = self._is_cladogram\n new_tree._cladogram_branch = self._cladogram_branch\n new_tree._node_id_template = self._node_id_template\n new_tree._node_ids = self._node_ids.copy()\n new_tree._node_id_index = self._node_id_index\n new_tree.root = self.root.copy(new_tree)\n self.copy_nodes(self.root, new_tree.root, new_tree)\n new_tree.process_tree_nodes()\n return new_tree", "def copy(self, new_tree):\n new_node = new_tree.new_tree_node(parent=self.parent, node_id=self.id)\n new_node.name = self.name\n new_node.branch = self.branch\n new_node.support = self.support\n new_node.support_type = self.support_type\n new_node.comment = self.comment\n new_node.children = self.children[::]\n new_node._been_processed = self._been_processed\n return new_node", "def prune_tree( cls, tree, begin_index, end_index ):\n \n begin_path = tree.leaf_treeposition(begin_index)\n end_path = tree.leaf_treeposition(end_index)\n\n current_node = tree[begin_path[:-1]]\n end_node = tree[end_path[:-1]]\n \n new_tree = ParentedTree('(' + tree.node + ')')\n ## Initialize new tree\n l = []\n current_new = new_tree\n current_old = tree\n for i in xrange(len(begin_path)-1):\n if type(current_old[begin_path[i]]) != str:\n current_new.insert(0, ParentedTree('('+current_old[begin_path[i]].node +')'))\n current_new = current_new[0]\n current_old = current_old[begin_path[i]]\n \n while current_old != end_node:\n if not (type(current_old[0]) == str or type(current_old[0]) == unicode):\n current_old = current_old[0]\n current_new.insert( 0, ParentedTree('('+current_old.node +')'))\n current_new = current_new[0]\n else:\n current_new.insert(0, current_old[0])\n while len(current_old.parent()) == current_old.parent_index() + 1:\n current_old = current_old.parent()\n current_new = current_new.parent()\n\n current_old = current_old.parent()[current_old.parent_index() + 1]\n current_new.parent().insert( current_new.parent_index() + 1,\n ParentedTree('('+current_old.node +')'))\n \n current_new = current_new.parent()[current_new.parent_index() + 1]\n current_new.insert(0, current_old[0])\n# print current_new\n return new_tree", "def _play_new_event_on_tree(self, event: Event, matches: OutputStream):\n raise NotImplementedError()", "def copy_tree(src, dst, verbose=False):\n\n print('Copying {} to {}'.format(src, dst))\n for src_dir, sub_dirs, basenames in tf.io.gfile.walk(src):\n rel_dir = os.path.relpath(src_dir, src)\n dst_dir = os.path.join(dst, rel_dir)\n for sub_dir in sorted(sub_dirs):\n path = os.path.join(dst, rel_dir, sub_dir)\n print('Make dir {}'.format(path))\n tf.io.gfile.makedirs(path)\n if basenames:\n print('Copying {} files from {} to {}'.format(\n len(basenames), src_dir, dst_dir))\n for basename in basenames:\n src_path = os.path.join(src_dir, basename)\n dst_path = os.path.join(dst_dir, basename)\n if verbose:\n print('Copying {} to {}'.format(src_path, dst_path))\n tf.io.gfile.copy(src_path, dst_path)", "def vs_create_tree(event):\n get_vs(event['c']).create_tree()", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n parser = E.OptionParser( version = \"%prog version: $Id: tree2tree.py 2782 2009-09-10 11:40:29Z andreas $\",\n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"-d\", \"--value\", dest=\"value\", type=\"float\",\n help=\"normalizing value.\" )\n parser.add_option(\"-m\", \"--method\", dest=\"methods\", type=\"string\",\n help=\"\"\"methods to apply [normalize|divide-by-tree|divide-by-tree|rename|set-uniform-branch-length|extract-with-pattern|build-map|remove-pattern|unroot|midpoint-root|balanced-root|add-node-names\"\"\" )\n parser.add_option(\"-2\", \"--filename-tree2\", dest=\"filename_tree2\", type=\"string\",\n help=\"filename with second tree.\" )\n parser.add_option(\"-o\", \"--outgroup\", dest=\"outgroup\", type=\"string\",\n help=\"reroot with outgroup before processing.\")\n parser.add_option(\"-p\", \"--parameters\", dest=\"parameters\", type=\"string\",\n help=\"parameters for methods.\")\n parser.add_option(\"-e\", \"--template-identifier\", dest=\"template_identifier\", type=\"string\",\n help=\"\"\"template identifier [%default]. A %i is replaced by the position\n of the sequence in the file.\"\"\" )\n parser.add_option(\"-i\", \"--invert-map\", dest=\"invert_map\", action=\"store_true\",\n help=\"\"\"invert map.\"\"\")\n parser.add_option(\"-f\", \"--filter\", dest=\"filter\", type=\"choice\",\n choices=(\"max-branch-length\",),\n help=\"filter trees\")\n parser.add_option(\"--output-format\", dest=\"output_format\", type=\"choice\",\n choices=( \"nh\", \"nhx\" ),\n help=(\"output format for trees.\"))\n parser.add_option(\"-b\", \"--no-branch-lengths\", dest=\"with_branchlengths\", action=\"store_false\",\n help=\"\"\"do not write branchlengths. Per default, 0 branch lengths are added.\"\"\")\n\n parser.set_defaults(\n value = 0,\n methods = \"\",\n filename_tree2 = None,\n outgroup = None,\n parameters = \"\",\n template_identifier=\"ID%06i\",\n write_map = False,\n invert_map = False,\n filter = None,\n output_format = \"nh\",\n with_branchlengths = True,\n )\n\n (options, args) = E.Start( parser, add_pipe_options = True )\n\n options.methods = options.methods.split(\",\")\n options.parameters = options.parameters.split(\",\") \n\n other_trees = []\n ## read other trees\n if options.filename_tree2:\n other_nexus = TreeTools.Newick2Nexus( open(options.filename_tree2, \"r\") )\n if len(other_nexus.trees) > 0:\n other_trees = other_nexus.trees\n else:\n other_tree = other_nexus.trees[0]\n other_trees = [other_tree]\n\n lines = sys.stdin.readlines()\n\n ntotal, nskipped, ntree = 0, 0, 0\n\n if options.filter:\n\n nexus = TreeTools.Newick2Nexus( lines )\n \n new_trees = []\n\n value = float(options.parameters[0])\n del options.parameters[0]\n\n ## decision functions: return true, if tree\n ## is to be skipped\n if options.filter == \"max-branch-length\":\n f = lambda x: x >= value\n\n for tree in nexus.trees:\n ntotal += 1\n\n for id, node in tree.chain.items():\n if f(node.data.branchlength):\n nskipped += 1\n break\n else:\n new_trees.append(tree)\n ntree += 1\n \n nexus.trees = new_trees\n\n options.stdout.write(TreeTools.Nexus2Newick( nexus, with_names = True ) + \"\\n\" )\n \n else:\n\n ## iterate over chunks\n chunks = filter( lambda x: lines[x][0] == \">\", range(len(lines)))\n\n map_old2new = {}\n\n if chunks:\n for c in range(len(chunks)-1):\n a, b = chunks[c], chunks[c+1]\n options.stdout.write( lines[a] )\n a += 1\n Process( lines[a:b], other_trees, options, map_old2new,ntree )\n\n options.stdout.write( lines[chunks[-1]] )\n t, s, ntree = Process( lines[chunks[-1]+1:], other_trees, options, map_old2new, ntree )\n ntotal += t\n nskipped += s\n else:\n ntotal, nskipped, ntree = Process( lines, other_trees, options, map_old2new,ntree )\n\n if options.write_map:\n p = options.parameters[0]\n if p:\n outfile = open(p, \"w\")\n else:\n outfile = options.stdout\n\n outfile.write(\"old\\tnew\\n\") \n for old_id, new_id in map_old2new.items():\n outfile.write(\"%s\\t%s\\n\" % (old_id, new_id) )\n if p:\n outfile.close()\n\n if options.loglevel >= 1:\n options.stdlog.write( \"# ntotal=%i, nskipped=%i\\n\" % (ntotal, nskipped))\n \n E.Stop()", "def __branch_out(self, pos, tree) -> List[Tuple[int, int]]:\n moves = self.__new_branches(self.__free_spots(pos, tree))\n for x, y in moves:\n self.__activate(x, y, tree)\n return moves", "def testExtractSpanningTree(self):\n prevNewick1 = NXNewick().writeString(self.mcTree1)\n # Check a dead-simple spanning tree with 3 closely related leaves.\n spanHCB = self.mcTree1.extractSpanningTree([\"HUMAN\", \"CHIMP\", \"BABOON\"])\n # Check that the existing tree hasn't been modified (OK, a bit\n # silly, but just in case).\n self.assertEqual(NXNewick().writeString(self.mcTree1), prevNewick1)\n # Check the actual spanning tree.\n self.assertEqual(NXNewick().writeString(spanHCB), \"((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.025291,BABOON:0.044568)Anc3;\")\n\n # Now test a more complicated tree, where we should remove as\n # many of the ancestors as possible (they will add extra\n # losses for no reason!).\n spanHCC = self.mcTree1.extractSpanningTree([\"HUMAN\", \"CHIMP\", \"CAT\"])\n self.assertEqual(NXNewick().writeString(self.mcTree1), prevNewick1)\n self.assertEqual(NXNewick().writeString(spanHCC), \"((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.158551,CAT:0.197381)Anc0;\")", "def do(self, cut):\n newptree = PTree()\n newptree._root = self._root.do(cut)\n return newptree", "def test_copy_button_is_working_properly(self):\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n # Select Task8 in to_task_tree_view\n selection_model = self.dialog.to_task_tree_view.selectionModel()\n model = self.dialog.to_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.to_task_tree_view.expand(project1_item.index())\n\n task2_item = project1_item.child(1, 0)\n self.dialog.to_task_tree_view.expand(task2_item.index())\n\n task8_item = task2_item.child(1, 0)\n\n selection_model.select(\n task8_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n # before copying anything\n # check if there are no versions under Task8\n self.assertTrue(self.test_task8.versions == [])\n self.assertEqual(len(self.test_task4.versions), 9)\n\n take_name_count = DBSession\\\n .query(distinct(Version.take_name))\\\n .filter(Version.task == self.test_task4)\\\n .count()\n self.assertEqual(take_name_count, 3)\n\n # for testing purposes set question result to QtGui.QMessageBox.Yes\n PatchedMessageBox.Yes = self.original_message_box.Yes\n PatchedMessageBox.No = self.original_message_box.No\n PatchedMessageBox.question_return_value = self.original_message_box.Yes\n\n # copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n # and expect as many versions under task8 as the task4 take_names\n # check if it still has 9 versions\n self.assertEqual(len(self.test_task4.versions), 9)\n self.assertEqual(\n len(self.test_task8.versions),\n take_name_count\n )\n\n # check if files are copied there\n for version in self.test_task8.versions:\n self.assertTrue(os.path.exists(version.absolute_full_path))", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def collapseTree(self, tree):\n \n self.setChildren(tree)\n map_old2new = {}\n \n index = 1\n new_tree = []\n \n ## write root\n new_tree.append( [0, 0, 0, 0, tree[0][4]] )\n map_old2new[0] = 0\n \n ## PrintTree(tree)\n \n for old_node in range(1, len(tree)):\n level, parent, left_child, right_child, ranges = tree[old_node]\n \n ## if only a single child of a parent, skip this node\n ## if ranges are empty: skip this node\n if tree[parent][2] == 0 or tree[parent][3] == 0 or len(ranges) == 0:\n map_old2new[old_node] = map_old2new[parent]\n continue\n \n map_old2new[old_node] = index\n \n ## add to new tree\n new_tree.append( [new_tree[map_old2new[parent]][0] + 1, map_old2new[parent], 0, 0, ranges] )\n \n index += 1\n \n ## PrintTree( new_tree )\n \n ## PrintTree( new_tree )\n ## print \"#########\"\n \n return new_tree", "def copy_tree(src, dst):\n if not os.path.isdir(src):\n raise Exception, \\\n \"cannot copy tree '%s': not a directory\" % src\n try:\n names = os.listdir(src)\n except os.error, (_, errstr):\n raise Exception, \\\n \"error listing files in '%s': %s\" % (src, errstr)\n\n makedirs(dst)\n\n outputs = []\n\n for n in names:\n src_name = os.path.join(src, n)\n dst_name = os.path.join(dst, n)\n\n if os.path.islink(src_name):\n link_dest = os.readlink(src_name)\n \n os.symlink(link_dest, dst_name)\n outputs.append(dst_name)\n\n elif os.path.isdir(src_name):\n outputs.extend(copy_tree(src_name, dst_name))\n else:\n copyfile(src_name, dst_name)\n \n outputs.append(dst_name)\n\n return outputs", "def rf_treeMode(self, selTree, treeDict):\n for node in treeDict['tree']['_order']:\n newItem = TreeNode(**treeDict['tree'][node])\n if len(node.split('/')) == 1:\n self.addTopLevelItem(newItem)\n else:\n parent = self._getItemFromTreePath('/'.join(node.split('/')[:-1]))\n parent.addChild(newItem)\n if getattr(newItem, 'nodeType') == 'shotNode':\n newItem._itemPath = node\n newItem._dataPath = os.path.join(self.pm._treePath, selTree)\n for fld in node.split('/'):\n newItem._dataPath = os.path.join(newItem._dataPath, fld)\n newItem._dataPath = pFile.conformPath(newItem._dataPath)\n newItem._dataFile = \"%s.py\" % newItem._dataPath\n for step in treeDict['steps']:\n newStep = TreeNode(nodeType='step', nodeLabel=step, nodeName=step)\n newStep._tree = selTree\n newStep._step = step\n newStep._dataPath = newItem._dataPath\n newStep._ltPath = pFile.conformPath(os.path.join(newStep._dataPath, 'lt', step))\n newStep._dataFile = newItem._dataFile\n newItem.addChild(newStep)", "def menu_save_copy(self, event=None):\n if self.app.children:\n self.app.childActive.saveCopy()", "def copy_tree(source_directory, target_directory):\n try:\n shutil.copytree(source_directory, target_directory)\n except Exception as e:\n print('Error found: {}'.format(e))", "def events_merging(file_names, tree_name):\n\n branches_to_read = get_all_branch_names(file_names[0], tree_name)\n branches = { branch_name: [] for branch_name in branches_to_read }\n for file_name in file_names:\n tree = uproot.open(file_name + \":\" + tree_name)\n for branch_name in branches_to_read:\n branches[branch_name].append(tree[branch_name].array())\n\n tree_branches = { k: ak.concatenate(v, axis=0) for k, v in branches.items() }\n\n return tree_branches", "def update_in_out1(filename):\r\n import shutil\r\n\r\n with open(filepath(filename, 'Edges'), 'r',\r\n encoding='utf8') as edge_file:\r\n edge_reader = csv.reader(edge_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n\r\n # edges = [l for l in edge_reader] # List of lists\r\n \r\n for predecessor, successor in edge_reader:\r\n chk_append_in_out1(successor, predecessor, 'Predecessors')\r\n chk_append_in_out1(predecessor, successor, 'Successors')\r\n\r\n listtocheck = os.listdir(os.path.abspath(\r\n '/home/cyneo/Work/Scans/Processed Data/Word Dictionary/')\r\n )\r\n\r\n for item in listtocheck:\r\n filename = os.path.abspath(\r\n '/home/cyneo/Work/Scans/Processed Data/Word Dictionary/' + item)\r\n tempfile = os.path.abspath(\r\n '/home/cyneo/Work/Scans/Processed Data/Word Dictionary/'\r\n + 'tmp ' + item)\r\n\r\n with open(filename, 'r', encoding='utf8') as word_file:\r\n file_reader = csv.reader(word_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n list_of_things = [thing[0] for thing in file_reader]\r\n set_of_things = set(list_of_things)\r\n \r\n with open(tempfile, 'w', encoding='utf8') as temp_file:\r\n temp_writer = csv.writer(temp_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n for item in set_of_things:\r\n temp_writer.writerow([item])\r\n \r\n shutil.move(tempfile, filename)", "def clean():\n new_tree = None", "def copytree(src, dst, overwrite=False, changed_only=True):\n assert os.path.isdir(src), \\\n (\"Source path `%s` does not name an existing directory\" % src)\n errors = []\n if not os.path.exists(dst):\n os.makedirs(dst)\n for name in os.listdir(src):\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if os.path.isdir(srcname):\n errors.extend(\n copytree(srcname, dstname, overwrite, changed_only))\n else:\n copyfile(srcname, dstname)\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, why))\n return errors", "def copytree2(src, dst, symlinks=False, ignore=None):\n\tnames = os.listdir(src)\n\tcpy_err={'flag':True, 'error':None}\n\tif ignore is not None:\n\t\tignored_names = ignore(src, names)\n\telse:\n\t\tignored_names = set()\n\n\tif not os.path.exists(dst):\n\t\ttry:\n\t\t\tos.makedirs(dst)\n\t\texcept:\n\t\t\tcpy_err= {'flag':False, 'error':\"Impossible to create a directory, PLATO is accountered a problem, contact the administrator\"}\n\t\t\treturn cpy_err\n\t\n\tfor name in names:\n\t\tif name in ignored_names:\n\t\t\tcontinue\n\t\tsrcname = os.path.join(src, name)\n\t\tdstname = os.path.join(dst, name)\n\t\ttry:\n\t\t\tif symlinks and os.path.islink(srcname):\n\t\t\t\tlinkto = os.readlink(srcname)\n\t\t\t\tos.symlink(linkto, dstname)\n\t\t\telif os.path.isdir(srcname):\n\t\t\t\tcopytree(srcname, dstname, symlinks, ignore)\n\t\t\telse:\n\t\t\t\tcopy2(srcname, dstname)\n\t\t\t# XXX What about devices, sockets etc.?\n\t\texcept (IOError, os.error) as why:\n\t # errors.append((srcname, dstname, str(why)))\n\t\t\tcpy_err={'flag':False,'error':str(why)}\n\t\t\treturn cpy_err\n\t\t# catch the Error from the recursive copytree so that we can\n\t\t# continue with other files\n\t\texcept Error as err:\n\t\t\tcpy_err={'flag':False,'error':str(err.args[0])}\n\t\t\treturn cpy_err\n\t\t # errors.extend(err.args[0])\n\ttry:\n\t\tcopystat(src, dst)\n\texcept OSError as why:\n\t\t# errors.extend((src, dst, str(why)))\n\t\tcpy_err={'flag':False,'error':str(why)}\n\t\treturn cpy_err\n\t# if errors:\n\t# raise Error(errors)\n\t\n\treturn cpy_err", "def walk_copy_tree ( source, dest, subdir_root=False, **walk_kwargs ):\n source_path = os.path.abspath ( source )\n dest_path = os.path.abspath ( dest )\n\n get_entry = lambda path: (\n path, os.lstat ( path ) if os.path.lexists ( path ) else None\n )\n get_stat_list = lambda s, d, names: (\n [ ( get_entry ( s + name ), get_entry ( d + name ) ) for name in names ]\n )\n\n for root, root_rel, dirnames, filenames in walk_relpath (\n source_path, include_root=subdir_root, **walk_kwargs\n ):\n root_dest = ( dest + os.sep + root_rel if root_rel else dest )\n\n dirs = get_stat_list ( root + os.sep, root_dest + os.sep, dirnames )\n files = get_stat_list ( root + os.sep, root_dest + os.sep, filenames )\n\n yield root, root_dest, root_rel, dirs, files, dirnames", "def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_output_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_agg = deepcopy(gene1.aggregation)\n gene1_bias = deepcopy(gene1.bias)\n gene2_act = deepcopy(gene2.activation)\n gene2_agg = deepcopy(gene2.aggregation)\n gene2_bias = deepcopy(gene2.bias)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.activation = 'c'\n gene3.aggregation = 'c'\n gene3.bias = -10\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.aggregation, gene1_agg)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.aggregation, gene2_agg)\n self.assertEqual(gene2.bias, gene2_bias)", "def climb_tree():\n global UP_TREE\n westdesc = \"\"\n eastdesc = \"\"\n northdesc = \"\"\n southdesc = \"\"\n UP_TREE = True\n westinvalid = False\n eastinvalid = False\n northinvalid = False\n southinvalid = False\n\n\n printmessage(\"You climb the large tree to get a look at your surroundings.\", 5, MAGENTA, 2)\n\n if ZERO_BASE_PLYR_POS in range(0, 10):\n northinvalid = True\n if ZERO_BASE_PLYR_POS in range(90, 100):\n southinvalid = True\n if ZERO_BASE_PLYR_POS in range(0, 91, 10):\n eastinvalid = True\n if ZERO_BASE_PLYR_POS in range(9, 100, 10):\n westinvalid = True\n \n if not westinvalid: \n westpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 1]\n if HAS_COMPASS: \n DISCOVERED[ZERO_BASE_PLYR_POS + 1] = \"Y\"\n if westpos == 10: # Water\n westdesc = TREE_VIEWS[2]\n else:\n westdesc = TREE_VIEWS[1]\n\n westpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 1]\n if westpos == 1:\n westdesc = TREE_VIEWS[3]\n elif westpos == 2:\n westdesc = TREE_VIEWS[4]\n else:\n westdesc = TREE_VIEWS[5]\n\n if not eastinvalid:\n eastpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 1]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 1] = \"Y\"\n if eastpos == 10: # Water\n eastdesc = TREE_VIEWS[2]\n else:\n eastdesc = TREE_VIEWS[1]\n\n eastpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 1]\n if eastpos == 1:\n eastdesc = TREE_VIEWS[3]\n elif eastpos == 2:\n eastdesc = TREE_VIEWS[4]\n else:\n eastdesc = TREE_VIEWS[6]\n\n\n if not northinvalid:\n northpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS - 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS - 10] = \"Y\"\n if northpos == 10: # Water\n northdesc = TREE_VIEWS[2]\n else:\n northdesc = TREE_VIEWS[1]\n\n northpos = ENEMY_LIST[ZERO_BASE_PLYR_POS - 10]\n if northpos == 1: # bear\n northdesc = TREE_VIEWS[3]\n elif northpos == 2: # grizzly\n northdesc = TREE_VIEWS[4]\n else:\n northdesc = TREE_VIEWS[7]\n\n\n if not southinvalid:\n southpos = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS + 10]\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS + 10] = \"Y\"\n if southpos == 10: # Water\n southdesc = TREE_VIEWS[2]\n else:\n southdesc = TREE_VIEWS[1]\n\n southpos = ENEMY_LIST[ZERO_BASE_PLYR_POS + 10]\n if southpos == 1: # bear\n southdesc = TREE_VIEWS[3]\n elif southpos == 2: # grizzly\n southdesc = TREE_VIEWS[4]\n else:\n southdesc = TREE_VIEWS[8]\n\n clear_messages(0)\n printmessage(\"West: \" + westdesc, 2, GREEN, 0)\n printmessage(\"East: \" + eastdesc, 3, YELLOW, 0)\n printmessage(\"North: \" + northdesc, 4, CYAN, 0)\n printmessage(\"South: \" + southdesc, 5, MAGENTA, 0)\n #show_movement(True, 10)\n update_player_on_map()\n pause_for_keypress()\n clear_messages(0)", "def traverse_up(node, node_callback):\n node_callback(node)\n for e in node.edges_out:\n traverse_up(e.dst, node_callback)", "def _from_etree_to_tree(self, lang='en-US'):\n #clear existing tree\n# for i in self.tree.get_children():\n# self.tree.delete(i)\n self.tree.delete(*self.tree.get_children())\n #now insert old tree\n for category in self.trout:\n tagged = category.get('tags')\n if tagged is None:\n tagged = \"('{}',)\".format(category.tag)\n if tagged[-1] == ')':\n inserttext = tagged[2:3].upper() + tagged[3:tagged.find(')')-2]\n else:\n inserttext = tagged[1:2].upper() + tagged[2:-1]\n #messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(lang, inserttext))\n thiscategory = self.tree.insert('', 'end', iid=inserttext.lower(), values=['', ''], \\\n text=LOCALIZED_TEXT[lang][inserttext], tags=\"{}\".format(inserttext.lower()))\n for term in category:\n values = eval(term.get('values'))\n tags = term.get('tags')\n# messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(values, tags))\n thisterm = self.tree.insert(thiscategory, 'end')\n self.tree.item(thisterm, tags=term.get('tags'))\n self.tree.item(thisterm, text=term.text)\n self.tree.item(thisterm, values=[str(values[0]), str(values[1])])\n# tags=term.get('tags'))\n for rendering in term:\n thisrendering = self.tree.insert(thisterm, 'end', \\\n text=rendering.text, values=term.get('values'), \\\n tags=rendering.get('tags'))\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')\n self.tree.update() \n pass", "def onSelectAll(self, event):\n\t\tself.ignore = 1\n\t\tself.selectAll(self.tree.GetRootItem())\n\t\tself.ignore = 0", "def _auxRefreshTree(self, tree_index):\n tree_item = self.treeItem(tree_index)\n logger.debug(\"_auxRefreshTree({}): {}{}\".format(\n tree_index, tree_item.obj_path,\n \"*\" if tree_item.children_fetched else \"\"))\n\n if tree_item.children_fetched:\n\n old_items = tree_item.child_items\n new_items = self._fetchObjectChildren(tree_item.obj,\n tree_item.obj_path)\n\n old_item_names = [(item.obj_name,\n item.is_attribute) for item in old_items]\n new_item_names = [(item.obj_name,\n item.is_attribute) for item in new_items]\n seqMatcher = SequenceMatcher(isjunk=None, a=old_item_names,\n b=new_item_names,\n autojunk=False)\n opcodes = seqMatcher.get_opcodes()\n\n logger.debug(\"(reversed) \"\n \"opcodes: {}\".format(list(reversed(opcodes))))\n\n for tag, i1, i2, j1, j2 in reversed(opcodes):\n\n if 1 or tag != 'equal':\n logger.debug(\" {:7s}, a[{}:{}] ({}), b[{}:{}] ({})\"\n .format(tag, i1, i2,\n old_item_names[i1:i2], j1, j2,\n new_item_names[j1:j2]))\n\n if tag == 'equal':\n # Only when node names are equal is _auxRefreshTree\n # called recursively.\n assert i2-i1 == j2-j1, (\"equal sanity \"\n \"check failed \"\n \"{} != {}\".format(i2-i1, j2-j1))\n for old_row, new_row in zip(range(i1, i2), range(j1, j2)):\n old_items[old_row].obj = new_items[new_row].obj\n child_index = self.index(old_row, 0, parent=tree_index)\n self._auxRefreshTree(child_index)\n\n elif tag == 'replace':\n # Explicitly remove the old item and insert the new.\n # The old item may have child nodes which indices must be\n # removed by Qt, otherwise it crashes.\n assert i2-i1 == j2-j1, (\"replace sanity \"\n \"check failed \"\n \"{} != {}\").format(i2-i1, j2-j1)\n\n # row number of first removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" calling \"\n \"beginInsertRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n\n elif tag == 'delete':\n assert j1 == j2, (\"delete\"\n \" sanity check \"\n \"failed. {} != {}\".format(j1, j2))\n # row number of first that will be removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n elif tag == 'insert':\n assert i1 == i2, (\"insert \"\n \"sanity check \"\n \"failed. {} != {}\".format(i1, i2))\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" \"\n \"calling beginInsertRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n else:\n raise ValueError(\"Invalid tag: {}\".format(tag))", "def export_raw_tree(tree, filename): # Warning : Ugly, need to be improved\n # We save the tree\n print(tree, file=open(\"{:s}.tree\".format(filename), \"w\"), end=\"\\n\")\n\n #delete artefacts\n f = open(\"{:s}.tree\".format(filename), \"r\")\n lines = f.readlines()\n f.close()\n out = open(\"{:s}.tree\".format(filename), \"w\")\n\n reg = r\"Move\\.from_uci\\(\\'(\\w+)\\'\\)\"\n for l in lines:\n l = re.sub(reg, r\"\\1\", l)\n l = l.replace(\"(\", \"{\")\n l = l.replace(\")\", \"}\")\n l = l.replace(\"[\", \"{\")\n l = l.replace(\"]\", \"}\")\n out.write(l)", "def check(src, dst):\n walker = Walker()\n walker.check(src, dst)\n return", "def __copy__(self) -> 'Tree':\n return non_recursive_tree_copy(self)", "def test_tree_mode4(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_2.reparent(None)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def copy_tree ( self,\n source_root, dest_root, overwrite=True, followlinks=False\n ):\n dodir = self.dodir\n copy_file = self.copy_file\n\n if overwrite:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )\n else:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n if dest_stat is None:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if dest_stat is None:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def process_tree(self, src, dst):\n srcset_fmt = self.kw['image_srcset_format']\n srcset_sizes_all = self.kw['image_srcset_sizes']\n base_len = len(src.split(os.sep))\n for root, dirs, files in os.walk(src, followlinks=True):\n root_parts = root.split(os.sep)\n dst_dir = os.path.join(dst, *root_parts[base_len:])\n utils.makedirs(dst_dir)\n for src_name in files:\n if (not src_name.lower().endswith(tuple(self.image_ext_list)) and not src_name.upper().endswith(tuple(self.image_ext_list))):\n continue\n dst_file = os.path.join(dst_dir, src_name)\n src_file = os.path.join(root, src_name)\n srcset_name, srcset_ext = os.path.splitext(src_name)\n\n # Find out the width of the image so we only resize up to that size\n try:\n src_width = Image.open(src_file).size[0]\n except UnidentifiedImageError:\n # e.g. for SVGs: we don't need srcsets\n src_width = 1\n # then trim our list of sizes to only those below the image width:\n srcset_sizes = [ size for size in srcset_sizes_all if (size < src_width) ]\n \n # Create the list of filenames, starting with the \"max_sized\" version that bears the same name as the original file:\n dsts = [dst_file]\n\n # Now add all the other filenames, based on their size:\n for srcset_size in srcset_sizes:\n srcset_size_file = os.path.join(dst_dir, srcset_fmt.format(\n name = srcset_name,\n size = srcset_size,\n ext = srcset_ext,\n ))\n dsts.append(srcset_size_file)\n\n # If we have extra output formats for images, we need to add them to the list as well:\n for extra_format in self.kw['extra_image_extensions']:\n # First the largest / default image:\n dsts.append(os.path.join(dst_dir, srcset_name + extra_format))\n \n # Then the smaller ones:\n for srcset_size in srcset_sizes:\n srcset_size_file = os.path.join(dst_dir, srcset_fmt.format(\n name = srcset_name,\n size = srcset_size,\n ext = extra_format,\n ))\n dsts.append(srcset_size_file)\n \n yield {\n 'name': dst_file,\n 'file_dep': [src_file],\n 'targets': dsts,\n 'actions': [(self.process_image, (src_file, dsts, srcset_sizes))],\n 'clean': True,\n }", "def walk_to_tree_insertion_point(self):\n\n while not (self._latest_node.is_start_node() or self._latest_node.is_cont_node()):\n self._latest_node = self._latest_node.parent\n if self._latest_node == self.root:\n log.error('Latest node == root while trying to find an insertion point.')\n break", "def process_swapped_tree(region=settings.PRIMARY_REGION):\n with transaction.atomic():\n # get latest tree - not yet active because we want to activate it only immediately after\n # finishing reindexing (and swapping index aliases)\n new_tree = NomenclatureTree.objects.filter(region=region).latest(\"start_date\")\n # get active (but not latest) tree\n prev_tree = NomenclatureTree.get_active_tree(region=region)\n if prev_tree:\n prev_tree.end_date = timezone.now()\n prev_tree.save()\n\n # activate the latest tree so that ES indexes objects from that tree (but it's not\n # yet visible in the app since the transaction didn't finish)\n new_tree.end_date = None\n new_tree.save()\n\n yield prev_tree, new_tree\n\n new_tree.end_date = timezone.now()\n new_tree.save()\n\n prev_tree.end_date = None\n prev_tree.save()", "def convert(tree,fileName=None):\n rootNode = tree.getroot()\n if rootNode.tag not in ['Simulation', 'OutStreamManager', 'Steps']:\n ## This is not a valid input file, or at least not one we care about for\n ## this conversion\n return tree\n osmNode = None\n stepsNode = None\n if rootNode.tag == 'Simulation':\n osmNode = rootNode.find('OutStreamManager')\n stepsNode = rootNode.find('Steps')\n elif rootNode.tag == 'outstreamManager':\n ## Case for when the OutStreamManager node is specified in an external file.\n ## (Steps should not be in this file?)\n osmNode = rootNode\n elif rootNode.tag == 'Steps':\n ## Case for when the Steps node is specified in an external file.\n ## (OutStreamManager should not be in this file?)\n stepsNode = rootNode\n\n if osmNode is not None:\n osmNode.tag = 'OutStreams'\n\n if stepsNode is not None:\n for outputNode in stepsNode.iter('Output'):\n if 'class' in outputNode.attrib and outputNode.attrib['class'] == 'OutStreamManager':\n outputNode.attrib['class'] = 'OutStreams'\n\n return tree", "def split(self, thresh=0):\n\n new_tree_bounds = []\n new_tree_ids = []\n\n self.contains_null = False\n\n for qi, quad in enumerate(self.tree):\n\n left, bottom, right, top = quad.bounds\n xcenter = left + (right - left) / 2.0\n ycenter = top - (top - bottom) / 2.0\n\n quad_id = self.tree_ids[qi]\n\n for id_, bbox in zip(\n [1, 3, 0, 2],\n [\n (left, ycenter, xcenter, top),\n (xcenter, ycenter, right, top),\n (left, bottom, xcenter, ycenter),\n (xcenter, bottom, right, ycenter),\n ],\n ):\n\n id_list = list(self.sindex.intersection(bbox))\n\n if id_list:\n\n if len(id_list) > thresh:\n\n new_tree_bounds.append(bbox)\n new_tree_ids.append(quad_id + str(id_))\n\n else:\n self.contains_null = True\n\n else:\n self.contains_null = True\n\n self.tree_bounds = new_tree_bounds\n self.tree_ids = new_tree_ids\n\n return self", "def treeshrink(tree_file, output_dir, output_ext, quantiles):\n subdir = util.file_name(tree_file)\n\n cmd = ' '.join([\n 'run_treeshrink.py',\n '--tree {}'.format(tree_file),\n '--centroid',\n '--mode per-gene',\n '--quantiles {}'.format(quantiles),\n '--outdir {}'.format(subdir),\n '--tempdir {}'.format(subdir)])\n\n with util.cd(output_dir):\n subprocess.check_call(cmd, shell=True)\n\n mask = util.file_name(subdir + '_*', ext=EXT_IN, dir_=subdir)\n tree_src = glob(mask)[0]\n tree_dst = util.file_name(tree_file, output_ext + EXT_OUT)\n\n with open(tree_src) as in_file, open(tree_dst, 'w') as out_file:\n content = in_file.read()\n out_file.write(content.replace(\"'\", ''))\n\n rmtree(subdir)\n\n return tree_dst", "def update_tree(tree, subtree_hierarchy):\n new_tree = subtree_hierarchy.copy()\n for bg_pop, row in subtree_hierarchy.iterrows():\n # Remove not showing pops from new_tree\n if row['To_show'] == 'no':\n new_tree = new_tree.drop(bg_pop)\n continue\n\n # Find Parent\n parent_to_show = row['Parent']\n # If bg_pop has no Parent, skip\n if parent_to_show == '':\n continue\n # If Parent not in subtree, skip\n if parent_to_show not in subtree_hierarchy.index:\n continue\n # If Parent has To_show = 'no', find Parent of Parent, etc.\n while subtree_hierarchy.at[parent_to_show, 'To_show'] == 'no':\n parent_to_show = subtree_hierarchy.at[parent_to_show, 'Parent']\n # Set Parent to show in new_tree\n new_tree.at[bg_pop, 'Parent'] = parent_to_show\n\n new_tree = new_tree.reset_index()[['index', 'BG_population', 'Parent', 'BG_label']]\n # For pairs ('BG_population', 'Parent') that has coords, add coords\n new_tree_pos = new_tree.merge(tree.reset_index(), how='left', on=['BG_population', 'Parent'])\n new_tree_pos = new_tree_pos[['index_x', 'BG_population', 'Parent', 'posX', 'posY', 'BG_label_x']] \\\n .rename(columns={'index_x': 'index', 'BG_label_x': 'BG_label'}) \\\n .fillna('')\n\n return new_tree_pos", "def convertGENSIM(infiles,outfilename,Nmax=-1):\n start1 = time.time()\n\n lqids = [9000008, 9000009]\n xids = [9000006, 9000007]\n dmids = [9000005]\n\n print \">>> loading files...\"\n events = Events(infiles)\n outfile = TFile(outfilename, 'RECREATE')\n\n print \">>> creating trees and branches...\"\n tree_event = TTree('event', 'event')\n tree_jet = TTree('jet', 'jet')\n tree_jet_raw= TTree('jet_raw', 'jet_raw')\n\n # EVENT\n tree_event.addBranch('nbgen', 'i')\n tree_event.addBranch('nbcut', 'i')\n tree_event.addBranch('nbcut50', 'i')\n tree_event.addBranch('njet', 'i')\n tree_event.addBranch('ntau', 'i')\n tree_event.addBranch('ntaucut', 'i')\n tree_event.addBranch('ntaucut50', 'i')\n tree_event.addBranch('ntaucut_vis', 'i')\n tree_event.addBranch('ntaucut50_vis', 'i')\n tree_event.addBranch('nlq', 'i')\n tree_event.addBranch('ndm', 'i')\n tree_event.addBranch('nx', 'i')\n tree_event.addBranch('met_pt', 'f')\n tree_event.addBranch('met_phi', 'f')\n tree_event.addBranch('jet1_mass', 'f')\n tree_event.addBranch('jet2_mass', 'f')\n tree_event.addBranch('jet3_mass', 'f')\n tree_event.addBranch('jet1_pt', 'f')\n tree_event.addBranch('jet2_pt', 'f')\n tree_event.addBranch('jet3_pt', 'f')\n tree_event.addBranch('jet1_eta', 'f')\n tree_event.addBranch('jet2_eta', 'f')\n tree_event.addBranch('jet3_eta', 'f')\n tree_event.addBranch('jet1_phi', 'f')\n tree_event.addBranch('jet2_phi', 'f')\n tree_event.addBranch('jet3_phi', 'f')\n tree_event.addBranch('sumjet', 'f')\n tree_event.addBranch('dphi_jj', 'f')\n tree_event.addBranch('deta_jj', 'f')\n tree_event.addBranch('dr_jj', 'f')\n tree_event.addBranch('mjj', 'f')\n tree_event.addBranch('lq1_mass', 'f')\n tree_event.addBranch('lq2_mass', 'f')\n tree_event.addBranch('lq1_pt', 'f')\n tree_event.addBranch('lq2_pt', 'f')\n tree_event.addBranch('lq1_eta', 'f')\n tree_event.addBranch('lq2_eta', 'f')\n tree_event.addBranch('lq1_phi', 'f')\n tree_event.addBranch('lq2_phi', 'f')\n tree_event.addBranch('dm1_mass', 'f')\n tree_event.addBranch('dm2_mass', 'f')\n tree_event.addBranch('dm1_pt', 'f')\n tree_event.addBranch('dm2_pt', 'f')\n tree_event.addBranch('dm1_eta', 'f')\n tree_event.addBranch('dm2_eta', 'f')\n tree_event.addBranch('dm1_phi', 'f')\n tree_event.addBranch('dm2_phi', 'f')\n tree_event.addBranch('b1_mass', 'f')\n tree_event.addBranch('b2_mass', 'f')\n tree_event.addBranch('b1_pt', 'f')\n tree_event.addBranch('b2_pt', 'f')\n tree_event.addBranch('b1_eta', 'f')\n tree_event.addBranch('b2_eta', 'f')\n tree_event.addBranch('b1_phi', 'f')\n tree_event.addBranch('b2_phi', 'f')\n tree_event.addBranch('x_mass', 'f')\n tree_event.addBranch('x_pt', 'f')\n tree_event.addBranch('x_eta', 'f')\n tree_event.addBranch('x_phi', 'f')\n tree_event.addBranch('m_lqlq', 'f')\n tree_event.addBranch('tau1_mass', 'f')\n tree_event.addBranch('tau2_mass', 'f')\n tree_event.addBranch('tau1_pt', 'f')\n tree_event.addBranch('tau2_pt', 'f')\n tree_event.addBranch('tau1_eta', 'f')\n tree_event.addBranch('tau2_eta', 'f')\n tree_event.addBranch('tau1_phi', 'f')\n tree_event.addBranch('tau2_phi', 'f')\n tree_event.addBranch('tau1_y', 'f')\n tree_event.addBranch('tau2_y', 'f')\n tree_event.addBranch('tau1_massvis', 'f')\n tree_event.addBranch('tau2_massvis', 'f')\n tree_event.addBranch('tau1_ptvis', 'f')\n tree_event.addBranch('tau2_ptvis', 'f')\n tree_event.addBranch('tau1_etavis', 'f')\n tree_event.addBranch('tau2_etavis', 'f')\n tree_event.addBranch('tau1_phivis', 'f')\n tree_event.addBranch('tau2_phivis', 'f')\n tree_event.addBranch('ditau_dy', 'f')\n tree_event.addBranch('ditau_chi', 'f')\n tree_event.addBranch('m_tauvis1_jet1', 'f')\n tree_event.addBranch('st', 'f') # scalar sum pT\n tree_event.addBranch('st_met', 'f') # scalar sum pT with MET\n # tree_event.addBranch('jets_pt' , 'f', standard=False)\n # tree_event.addBranch('jets_eta' , 'f', standard=False)\n # tree_event.addBranch('jets_phi' , 'f', standard=False)\n # tree_event.addBranch('jets_mass', 'f', standard=False)\n\n tree_event.addBranch('weight', 'f')\n tree_event.addBranch('event_id', 'i')\n\n # JETS raw\n tree_jet_raw.addBranch('mass', 'f')\n tree_jet_raw.addBranch('pt', 'f')\n tree_jet_raw.addBranch('eta', 'f')\n tree_jet_raw.addBranch('phi', 'f')\n tree_jet_raw.addBranch('weight', 'f')\n tree_jet_raw.addBranch('event_id', 'i')\n\n # JETS cleaned\n tree_jet.addBranch('mass', 'f')\n tree_jet.addBranch('pt', 'f')\n tree_jet.addBranch('eta', 'f')\n tree_jet.addBranch('phi', 'f')\n tree_jet.addBranch('weight', 'f')\n tree_jet.addBranch('event_id', 'i')\n\n handle_gps, label_gps = Handle('std::vector<reco::GenParticle>'), 'genParticles'\n handle_jets, label_jets = Handle('std::vector<reco::GenJet>'), 'ak4GenJets'\n handle_met, label_met = Handle('vector<reco::GenMET>'), 'genMetTrue'\n handle_weight, label_weight = Handle('GenEventInfoProduct'), 'generator'\n\n evtid = 0\n sec_per_evt = 0.023 # seconds per event\n Ntot = Nmax if Nmax>0 else events.size()\n print \">>> start processing %d events, ETA %s...\"%(Ntot,formatTimeShort(sec_per_evt*Ntot))\n step = stepsize(Ntot)\n start_proc = time.time()\n\n # LOOP OVER EVENTS\n for event in events:\n # print ' --- NEW EVENT'\n # print '='*30\n # print evtid\n if Nmax>0 and evtid>=Nmax: break\n if evtid>0 and evtid%step==0: print \">>> processed %4s/%d events, ETA %s\"%(evtid,Ntot,ETA(start_proc,evtid+1,Ntot))\n evtid += 1\n tree_event.event_id[0] = evtid\n\n event.getByLabel(label_gps,handle_gps)\n gps = handle_gps.product()\n\n event.getByLabel(label_jets,handle_jets)\n jets = handle_jets.product()\n\n event.getByLabel(label_met,handle_met)\n met = handle_met.product()\n\n event.getByLabel(label_weight,handle_weight)\n gweight = handle_weight.product()\n weight = gweight.weight()\n\n # GEN PARTICLES\n gps_final = [p for p in gps if isFinal(p) and abs(p.pdgId()) in [5,6,15,16]+lqids + dmids + xids]\n gps_mother = [p for p in gps_final if abs(p.pdgId()) in lqids and p.status()>60]\n gps_mother .sort(key=lambda p: p.pt(), reverse=True)\n gps_dm = [p for p in gps_final if abs(p.pdgId()) in dmids]\n gps_dm .sort(key=lambda p: p.pt(), reverse=True)\n gps_x = [p for p in gps_final if abs(p.pdgId()) in xids]\n gps_x .sort(key=lambda p: p.pt(), reverse=True)\n\n gps_bgen = [p for p in gps_final if abs(p.pdgId())==5 and p.status()==71]\n gps_bgen .sort(key=lambda p: p.pt(), reverse=True)\n gps_bfromNP = [p for p in gps if abs(p.pdgId())==5 and abs(p.mother(0).pdgId()) in lqids+dmids+xids]\n gps_bfromNP .sort(key=lambda p: p.pt(), reverse=True)\n gps_bcut = [p for p in gps_bgen if p.pt()>20 and abs(p.eta())<2.5]\n gps_bcut50 = [p for p in gps_bgen if p.pt()>50 and abs(p.eta())<2.5]\n\n gps_tau = [p for p in gps_final if abs(p.pdgId())==15 and p.status()==2]\n gps_tau .sort(key=lambda p: p.pt(), reverse=True)\n gps_taufromLQ= [p for p in gps if abs(p.pdgId())==15 and abs(p.mother(0).pdgId()) in lqids]\n gps_taufromLQ.sort(key=lambda p: p.pt(), reverse=True)\n gps_taucut = [p for p in gps_tau if p.pt()>20 and abs(p.eta())<2.5]\n gps_taucut50 = [p for p in gps_tau if p.pt()>50 and abs(p.eta())<2.5]\n\n gps_tau_vis = []\n gps_taucut_vis = []\n gps_taucut50_vis = []\n # find taus that survive ptvis > 20 and >50 cuts\n for p in gps_tau:\n while p.status()!=2 :\n p = p.daughter(0)\n findau = finalDaughters(p, [])\n tauvisp4 = p4sumvis(findau)\n thisptvis = tauvisp4.pt()\n thisetavis = tauvisp4.eta()\n gps_tau_vis.append(tauvisp4)\n if thisptvis > 20 and abs(thisetavis) < 2.5: gps_taucut_vis.append(tauvisp4)\n if thisptvis > 50 and abs(thisetavis) < 2.5: gps_taucut50_vis.append(tauvisp4)\n gps_tau_vis .sort(key=lambda p: p.pt(), reverse=True)\n gps_taucut_vis .sort(key=lambda p: p.pt(), reverse=True)\n gps_taucut50_vis.sort(key=lambda p: p.pt(), reverse=True)\n\n\n # if not len(gps_bgen) == 2:\n # print 'len, gps_mother = ', len(gps_mother)\n # print 'number of b\\'s: %i' % len(gps_bgen)\n # print 'number of b\\'s from NP decays: %i' % len(gps_bfromNP)\n # if len(gps_mother) > 0:\n # print '=' * 15 + ' NEW EVENT' + '='*15\n # printDecayChain(gps_mother)\n # continue\n\n # # REMOVE TOP QUARK if its final daughter is also in the list\n # for top in gps_tgen[:]:\n # dau = top\n # while abs(dau.daughter(0).pdgId())==6:\n # dau = dau.daughter(0)\n # if dau!=top and dau in gps_tgen:\n # gps_tgen.remove(top)\n\n # # write raw jets to tree\n # jets_pt = []\n # jets_eta = []\n # jets_phi = []\n # jets_mass = []\n # for jet in jets:\n # jets_pt.append(jet.pt())\n # jets_eta.append(jet.eta())\n # jets_phi.append(jet.phi())\n # jets_mass.append(jet.mass())\n # tree_event.jets_pt[0] = jets_pt\n # tree_event.jets_pt[0] = jets_pt\n # tree_event.jets_eta[0] = jets_eta\n # tree_event.jets_phi[0] = jets_phi\n # tree_event.jets_mass[0] = jets_mass\n\n # write raw jets to a tree\n for jet in jets:\n tree_jet_raw.mass[0] = jet.mass()\n tree_jet_raw.pt[0] = jet.pt()\n tree_jet_raw.eta[0] = jet.eta()\n tree_jet_raw.phi[0] = jet.phi()\n tree_jet_raw.weight[0] = weight\n tree_jet_raw.event_id[0] = evtid\n tree_jet_raw.Fill()\n\n # REMOVE JET-LEPTON OVERLAP\n jets, dummy = cleanObjectCollection(jets,gps_tau_vis,dRmin=0.4)\n # print 'now cleaning from DM, now: %f' % (len(jets))\n jets, dummy = cleanObjectCollection(jets,gps_dm,dRmin=0.4, mfrac=-1.)\n # print 'after cleaning from DM, now: %f' % (len(jets))\n jets .sort(key=lambda p: p.pt(), reverse=True)\n njets = 0\n sumjet = 0\n jets30 = [ ]\n\n for jet in jets:\n if jet.pt()>30 and abs(jet.eta())<5:\n sumjet += jet.pt()\n njets += 1\n tree_jet.mass[0] = jet.mass()\n tree_jet.pt[0] = jet.pt()\n tree_jet.eta[0] = jet.eta()\n tree_jet.phi[0] = jet.phi()\n tree_jet.weight[0] = weight\n tree_jet.event_id[0] = evtid\n tree_jet.Fill()\n jets30.append(jet)\n\n # MULTIPLICITIES\n tree_event.nlq[0] = len(gps_mother)\n tree_event.ndm[0] = len(gps_dm)\n tree_event.nx[0] = len(gps_x)\n tree_event.nbcut[0] = len(gps_bcut)\n tree_event.nbcut50[0] = len(gps_bcut50)\n tree_event.nbgen[0] = len(gps_bgen)\n tree_event.njet[0] = njets\n tree_event.ntau[0] = len(gps_tau)\n tree_event.ntaucut[0] = len(gps_taucut)\n tree_event.ntaucut50[0] = len(gps_taucut50)\n tree_event.ntaucut_vis[0] = len(gps_taucut_vis)\n tree_event.ntaucut50_vis[0] = len(gps_taucut50_vis)\n\n # JETS\n tree_event.met_pt[0] = met[0].pt()\n tree_event.met_phi[0] = met[0].phi()\n tree_event.sumjet[0] = sumjet\n if len(jets30) >= 2:\n tree_event.jet1_mass[0] = jets30[0].mass()\n tree_event.jet2_mass[0] = jets30[1].mass()\n tree_event.jet1_pt[0] = jets30[0].pt()\n tree_event.jet2_pt[0] = jets30[1].pt()\n tree_event.jet1_eta[0] = jets30[0].eta()\n tree_event.jet2_eta[0] = jets30[1].eta()\n tree_event.jet1_phi[0] = jets30[0].phi()\n tree_event.jet2_phi[0] = jets30[1].phi()\n if len(jets30) >= 3:\n tree_event.jet3_mass[0] = jets30[2].mass()\n tree_event.jet3_pt[0] = jets30[2].pt()\n tree_event.jet3_eta[0] = jets30[2].eta()\n tree_event.jet3_phi[0] = jets30[2].phi()\n else:\n tree_event.jet3_mass[0] = -1.\n tree_event.jet3_pt[0] = -1.\n tree_event.jet3_eta[0] = -9.\n tree_event.jet3_phi[0] = -9.\n tree_event.dphi_jj[0] = deltaPhi(jets30[0].phi(), jets30[1].phi())\n tree_event.deta_jj[0] = jets30[0].eta() - jets30[1].eta()\n tree_event.dr_jj[0] = deltaR(jets30[0].eta(),jets30[0].phi(),jets30[1].eta(),jets30[1].phi())\n dijetp4 = jets30[0].p4() + jets30[1].p4()\n tree_event.mjj[0] = dijetp4.M()\n elif len(jets30) == 1:\n tree_event.jet1_mass[0] = jets30[0].mass()\n tree_event.jet2_mass[0] = -1.\n tree_event.jet3_mass[0] = -1.\n tree_event.jet1_pt[0] = jets30[0].pt()\n tree_event.jet2_pt[0] = -1.\n tree_event.jet3_pt[0] = -1.\n tree_event.jet1_eta[0] = jets30[0].eta()\n tree_event.jet2_eta[0] = -9.\n tree_event.jet3_eta[0] = -9.\n tree_event.jet1_phi[0] = jets30[0].phi()\n tree_event.jet2_phi[0] = -9.\n tree_event.jet3_phi[0] = -9.\n tree_event.dphi_jj[0] = -9.\n tree_event.deta_jj[0] = -9.\n tree_event.dr_jj[0] = -1.\n tree_event.mjj[0] = -1.\n else:\n tree_event.jet1_mass[0] = -1.\n tree_event.jet2_mass[0] = -1.\n tree_event.jet3_mass[0] = -1.\n tree_event.jet1_pt[0] = -1.\n tree_event.jet2_pt[0] = -1.\n tree_event.jet3_pt[0] = -1.\n tree_event.jet1_eta[0] = -9.\n tree_event.jet2_eta[0] = -9.\n tree_event.jet3_eta[0] = -9.\n tree_event.jet1_phi[0] = -9.\n tree_event.jet2_phi[0] = -9.\n tree_event.jet3_phi[0] = -9.\n tree_event.dphi_jj[0] = -9.\n tree_event.deta_jj[0] = -9.\n tree_event.dr_jj[0] = -1.\n tree_event.mjj[0] = -1.\n\n if len(gps_bgen) >= 2:\n tree_event.b1_mass[0]= gps_bgen[0].mass()\n tree_event.b1_pt[0] = gps_bgen[0].pt()\n tree_event.b1_eta[0] = gps_bgen[0].eta()\n tree_event.b1_phi[0] = gps_bgen[0].phi()\n tree_event.b2_mass[0]= gps_bgen[1].mass()\n tree_event.b2_pt[0] = gps_bgen[1].pt()\n tree_event.b2_eta[0] = gps_bgen[1].eta()\n tree_event.b2_phi[0] = gps_bgen[1].phi()\n elif len(gps_bgen) == 1:\n tree_event.b1_mass[0]= gps_bgen[0].mass()\n tree_event.b1_pt[0] = gps_bgen[0].pt()\n tree_event.b1_eta[0] = gps_bgen[0].eta()\n tree_event.b1_phi[0] = gps_bgen[0].phi()\n tree_event.b2_mass[0]= -1.\n tree_event.b2_pt[0] = -1.\n tree_event.b2_eta[0] = -9.\n tree_event.b2_phi[0] = -9.\n else:\n tree_event.b1_mass[0]= -1.\n tree_event.b1_pt[0] = -1.\n tree_event.b1_eta[0] = -9.\n tree_event.b1_phi[0] = -9.\n tree_event.b2_mass[0]= -1.\n tree_event.b2_pt[0] = -1.\n tree_event.b2_eta[0] = -9.\n tree_event.b2_phi[0] = -9.\n\n # SCALAR SUM PT\n if len(gps_tau_vis)>=1 and len(gps_bcut)>=1:\n st = 0\n for tauvis in gps_tau_vis[:2]:\n st += tauvis.pt()\n for b in gps_bcut:\n st += b.pt()\n stmet = st + met[0].pt()\n else:\n st = -1\n stmet = -1\n\n if len(gps_tau) >=2 :\n tree_event.tau1_mass[0] = gps_tau[0].mass()\n tree_event.tau2_mass[0] = gps_tau[1].mass()\n tree_event.tau1_pt[0] = gps_tau[0].pt()\n tree_event.tau2_pt[0] = gps_tau[1].pt()\n tree_event.tau1_eta[0] = gps_tau[0].eta()\n tree_event.tau2_eta[0] = gps_tau[1].eta()\n tree_event.tau1_phi[0] = gps_tau[0].phi()\n tree_event.tau2_phi[0] = gps_tau[1].phi()\n tree_event.tau1_y[0] = gps_tau[0].p4().Rapidity()\n tree_event.tau2_y[0] = gps_tau[1].p4().Rapidity()\n tree_event.tau1_massvis[0] = gps_tau_vis[0].mass()\n tree_event.tau2_massvis[0] = gps_tau_vis[1].mass()\n tree_event.tau1_ptvis[0] = gps_tau_vis[0].pt()\n tree_event.tau2_ptvis[0] = gps_tau_vis[1].pt()\n tree_event.tau1_etavis[0] = gps_tau_vis[0].eta()\n tree_event.tau2_etavis[0] = gps_tau_vis[1].eta()\n tree_event.tau1_phivis[0] = gps_tau_vis[0].phi()\n tree_event.tau2_phivis[0] = gps_tau_vis[1].phi()\n dy = abs(gps_tau[0].p4().Rapidity() - gps_tau[1].p4().Rapidity())\n tree_event.ditau_dy[0] = dy\n tree_event.ditau_chi[0] = exp(dy)\n elif len(gps_tau) == 1:\n tree_event.tau1_mass[0] = gps_tau[0].mass()\n tree_event.tau2_mass[0] = -1.\n tree_event.tau1_pt[0] = gps_tau[0].pt()\n tree_event.tau2_pt[0] = -1.\n tree_event.tau1_eta[0] = gps_tau[0].eta()\n tree_event.tau2_eta[0] = -9.\n tree_event.tau1_phi[0] = gps_tau[0].phi()\n tree_event.tau2_phi[0] = -9.\n tree_event.tau1_y[0] = gps_tau[0].p4().Rapidity()\n tree_event.tau2_y[0] = -9.\n tree_event.tau1_massvis[0] = gps_tau_vis[0].mass()\n tree_event.tau2_massvis[0] = -1.\n tree_event.tau1_ptvis[0] = gps_tau_vis[0].pt()\n tree_event.tau2_ptvis[0] = -1.\n tree_event.tau1_etavis[0] = gps_tau_vis[0].eta()\n tree_event.tau2_etavis[0] = -9.\n tree_event.tau1_phivis[0] = gps_tau_vis[0].phi()\n tree_event.tau2_phivis[0] = -9.\n tree_event.ditau_dy[0] = -9.\n tree_event.ditau_chi[0] = -1.\n else:\n tree_event.tau1_mass[0] = -1.\n tree_event.tau2_mass[0] = -1.\n tree_event.tau1_pt[0] = -1.\n tree_event.tau2_pt[0] = -1.\n tree_event.tau1_eta[0] = -9.\n tree_event.tau2_eta[0] = -9.\n tree_event.tau1_phi[0] = -9.\n tree_event.tau2_phi[0] = -9.\n tree_event.tau1_y[0] = -9.\n tree_event.tau2_y[0] = -9.\n tree_event.tau1_massvis[0] = -1.\n tree_event.tau2_massvis[0] = -1.\n tree_event.tau1_ptvis[0] = -1.\n tree_event.tau2_ptvis[0] = -1.\n tree_event.tau1_etavis[0] = -9.\n tree_event.tau2_etavis[0] = -9.\n tree_event.tau1_phivis[0] = -9.\n tree_event.tau2_phivis[0] = -9.\n tree_event.ditau_dy[0] = -9.\n tree_event.ditau_chi[0] = -1.\n\n tree_event.st[0] = st\n tree_event.st_met[0] = stmet\n\n # M (tauvis1, jet1)\n if len(gps_tau) >= 1 and len(jets30) >= 1:\n tree_event.m_tauvis1_jet1[0] = (gps_tau[0].p4() + jets30[0].p4()).M()\n else:\n tree_event.m_tauvis1_jet1[0] = -1.\n\n\n tree_event.weight[0] = weight\n\n if len(gps_mother)==1:\n tree_event.lq1_mass[0] = gps_mother[0].mass()\n tree_event.lq2_mass[0] = -1.\n tree_event.lq1_pt[0] = gps_mother[0].pt()\n tree_event.lq2_pt[0] = -1.\n tree_event.lq1_eta[0] = gps_mother[0].eta()\n tree_event.lq2_eta[0] = -9.\n tree_event.lq1_phi[0] = gps_mother[0].phi()\n tree_event.lq2_phi[0] = -9.\n tree_event.m_lqlq[0] = -1.\n elif len(gps_mother)>=2:\n tree_event.lq1_mass[0] = gps_mother[0].mass()\n tree_event.lq2_mass[0] = gps_mother[1].mass()\n tree_event.lq1_pt[0] = gps_mother[0].pt()\n tree_event.lq2_pt[0] = gps_mother[1].pt()\n tree_event.lq1_eta[0] = gps_mother[0].eta()\n tree_event.lq2_eta[0] = gps_mother[1].eta()\n tree_event.lq1_phi[0] = gps_mother[0].phi()\n tree_event.lq2_phi[0] = gps_mother[1].phi()\n dilqp4 = gps_mother[0].p4() + gps_mother[1].p4()\n tree_event.m_lqlq[0] = dilqp4.M()\n else:\n tree_event.lq1_mass[0] = -1.\n tree_event.lq2_mass[0] = -1.\n tree_event.lq1_pt[0] = -1.\n tree_event.lq2_pt[0] = -1.\n tree_event.lq1_eta[0] = -9.\n tree_event.lq2_eta[0] = -9.\n tree_event.lq1_phi[0] = -9.\n tree_event.lq2_phi[0] = -9.\n tree_event.m_lqlq[0] = -1.\n\n if len(gps_dm) == 1:\n tree_event.dm1_mass[0] = gps_dm[0].mass()\n tree_event.dm2_mass[0] = -1.\n tree_event.dm1_pt[0] = gps_dm[0].pt()\n tree_event.dm2_pt[0] = -1.\n tree_event.dm1_eta[0] = gps_dm[0].eta()\n tree_event.dm2_eta[0] = -9.\n tree_event.dm1_phi[0] = gps_dm[0].phi()\n tree_event.dm2_phi[0] = -9.\n elif len(gps_dm)>=2:\n tree_event.dm1_mass[0] = gps_dm[0].mass()\n tree_event.dm2_mass[0] = gps_dm[1].mass()\n tree_event.dm1_pt[0] = gps_dm[0].pt()\n tree_event.dm2_pt[0] = gps_dm[1].pt()\n tree_event.dm1_eta[0] = gps_dm[0].eta()\n tree_event.dm2_eta[0] = gps_dm[1].eta()\n tree_event.dm1_phi[0] = gps_dm[0].phi()\n tree_event.dm2_phi[0] = gps_dm[1].phi()\n else:\n tree_event.dm1_mass[0] = -1.\n tree_event.dm2_mass[0] = -1.\n tree_event.dm1_pt[0] = -1.\n tree_event.dm2_pt[0] = -1.\n tree_event.dm1_eta[0] = -9.\n tree_event.dm2_eta[0] = -9.\n tree_event.dm1_phi[0] = -9.\n tree_event.dm2_phi[0] = -9.\n\n\n\n if len(gps_x) == 1:\n tree_event.x_mass[0] = gps_x[0].mass()\n tree_event.x_pt[0] = gps_x[0].pt()\n tree_event.x_eta[0] = gps_x[0].eta()\n tree_event.x_phi[0] = gps_x[0].phi()\n else:\n tree_event.x_mass[0] = -1.\n tree_event.x_pt[0] = -1.\n tree_event.x_eta[0] = -9.\n tree_event.x_phi[0] = -9.\n tree_event.Fill()\n\n print \">>> processed %4s events in %s\"%(evtid,formatTime(time.time()-start_proc))\n print \">>> writing to output file %s...\"%(outfilename)\n outfile.Write()\n outfile.Close()\n print \">>> done in in %s\"%(formatTime(time.time()-start1))", "def test_bin_tree():\n n1 = BinTreeNode(1)\n n2 = BinTreeNode(2)\n n3 = BinTreeNode(3)\n n4 = BinTreeNode(4)\n n5 = BinTreeNode(5)\n n1.left = n2\n n1.right = n3\n n2.left = n4\n n3.right = n5\n t = BinTree(n1)\n print('pre order')\n preorder_trav(t.root)\n print('in order')\n inorder_trav(t.root)\n print('post order')\n postorder_trav(t.root)", "def filter_selected_nodes(tree) -> list:\n return [n for n in tree.nodes if n.select and n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}]", "def __copytree(source, destination, symlinks=False):\n logger.info(\"copytree: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copytree(source, destination, symlinks)\n return True\n except Exception as e:\n logger.exception(\n \"copytree: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def copy_tree_to_path(src_dir, dest_dir):\n names = os.listdir(src_dir)\n\n for name in names:\n srcname = os.path.join(src_dir, name)\n destname = os.path.join(dest_dir, name)\n\n if os.path.isdir(srcname):\n shutil.copytree(srcname, destname)\n else:\n shutil.copy(srcname, destname)", "def _tree_update(self, new_tree: Tree, tree_update_time: datetime):\n self.__tree_update_time = tree_update_time\n self.__new_tree = new_tree\n self.__new_event_types_listeners = self._register_event_listeners(self.__new_tree)\n self.__is_simultaneous_state = True", "def __is_tree_node(self, node):\n if not node.input:\n if len(node.output) > 1:\n return False\n\n if len(node.output) > 1:\n return False\n\n for input_node in node.input:\n cls = self.__is_tree_node(input_node)\n if not cls:\n return False\n return True", "def move_many(self,\r\n sourcerange=None,\r\n destinationrange=None,\r\n subordinate=False,\r\n makecompact=False,\r\n all_children=False,\r\n withchildren=True,\r\n copy=False):\r\n\r\n if sourcerange is None:\r\n sourcerange = []\r\n if destinationrange is None:\r\n destinationrange = []\r\n flatten = False\r\n\r\n\r\n\r\n if not withchildren:\r\n sourcerange = [a_temp for a_temp in sourcerange if a_temp.is_top()]\r\n\r\n if len(destinationrange)==1 and \\\r\n (subordinate and str(destinationrange[0]) in self.indexes()\r\n and str(destinationrange[0].child()) not in self.indexes()):\r\n\r\n for i_temp in sourcerange:\r\n\r\n self.move(i_temp,\r\n destinationrange[0].subordinate(i_temp),\r\n withchildren=False,\r\n copy=copy)\r\n\r\n elif len(destinationrange)==1 and (makecompact\r\n and str(destinationrange[0])\r\n in self.indexes()\r\n and str(destinationrange[0].child()) not in self.indexes()):\r\n\r\n j_temp = destinationrange[0].child()\r\n for i_temp in sourcerange:\r\n self.move(i_temp,\r\n j_temp,\r\n withchildren=False,\r\n copy=copy)\r\n j_temp = j_temp.next()\r\n\r\n elif len(destinationrange)==1 and all_children \\\r\n and str(destinationrange[0]) in self.indexes():\r\n\r\n childcount = 1\r\n for i_temp in sourcerange:\r\n j_temp = destinationrange[0]\r\n for a_temp in range(childcount):\r\n j_temp = j_temp.child()\r\n childcount += 1\r\n self.move(i_temp,\r\n j_temp,\r\n withchildren=withchildren,\r\n copy=copy)\r\n\r\n else:\r\n\r\n sourcecycle = cycle(sourcerange)\r\n destinationcycle = cycle(destinationrange)\r\n if withchildren: #not deleted\r\n flatten = True\r\n\r\n for a_temp in range(len(sourcerange)):\r\n\r\n i_temp = next(sourcecycle)\r\n j_temp = next(destinationcycle)\r\n\r\n self.move(i_temp,\r\n j_temp,\r\n withchildren=withchildren,\r\n flatten=flatten,\r\n copy=copy)", "def copy_tree(src, dst, preserve_mode=1, preserve_times=1,\n preserve_symlinks=0, update=0, verbose=1, dry_run=0):\n preserve_symlinks = 1\n import os\n from distutils.dir_util import mkpath\n from distutils import log\n\n from distutils.file_util import copy_file\n\n if not dry_run and not os.path.isdir(src):\n raise DistutilsFileError(\n \"cannot copy tree '%s': not a directory\" % src)\n try:\n names = os.listdir(src)\n except OSError as e:\n if dry_run:\n names = []\n else:\n raise DistutilsFileError(\n \"error listing files in '%s': %s\" % (src, e.strerror))\n\n if not dry_run:\n mkpath(dst, verbose=verbose)\n\n outputs = []\n\n for n in names:\n src_name = os.path.join(src, n)\n dst_name = os.path.join(dst, n)\n\n if n.startswith('.nfs'):\n # skip NFS rename files\n continue\n\n if preserve_symlinks and os.path.islink(src_name):\n link_dest = os.readlink(src_name)\n if verbose >= 1:\n log.info(\"linking %s -> %s\", dst_name, link_dest)\n if not dry_run:\n if os.path.exists(dst_name):\n os.unlink(dst_name)\n os.symlink(link_dest, dst_name)\n outputs.append(dst_name)\n\n elif os.path.isdir(src_name):\n outputs.extend(\n copy_tree(src_name, dst_name, preserve_mode,\n preserve_times, preserve_symlinks, update,\n verbose=verbose, dry_run=dry_run))\n else:\n copy_file(src_name, dst_name, preserve_mode,\n preserve_times, update, verbose=verbose,\n dry_run=dry_run)\n outputs.append(dst_name)\n\n return outputs", "def apply(self, tree):\n raise NotImplementedError()", "def doSelect(inCmpFile, outCmpFile, idxs):\n def trimDataset(groupName, alnIdxID, inCmp, outCmp, fmt, idName = 'ID'):\n ids = outCmp[fmt.ALN_INDEX][:,alnIdxID]\n nds = '/'.join([groupName, idName])\n msk = NP.array([x in ids for x in inCmp[nds].value]) # got to be an NP.array\n for dsName in inCmp[groupName].keys():\n copyDataset('/'.join([groupName, dsName]), inCmp, outCmp,\n msk, fmt)\n\n def copyGroup(groupName, inCmp, outCmp):\n if groupName in inCmp:\n outCmp.copy(inCmp[groupName], groupName)\n\n try:\n inCmp = H5.File(inCmpFile, 'r')\n outCmp = H5.File(outCmpFile, 'w') # fail if it exists.\n idxs = NP.array(idxs)\n fmt = CmpH5Format(inCmp)\n\n if not (NP.max(idxs) < inCmp[fmt.ALN_INDEX].shape[0] and\n NP.min(idxs) >= 0):\n raise PBH5ToolsException(\"Invalid idxs specified, must be within [0, %d)\" %\n inCmp[fmt.ALN_INDEX].shape[0])\n\n # copy over the AlnIndex and other AlnInfo elements\n # correpsonding to idxs to new file.\n for dsName in inCmp[fmt.ALN_INFO].keys():\n copyDataset('/'.join([fmt.ALN_INFO, dsName]), inCmp, outCmp, idxs, fmt)\n\n # reset the ALN_ID\n outCmp[fmt.ALN_INDEX][:,fmt.ID] = \\\n NP.array(range(1, outCmp[fmt.ALN_INDEX].shape[0] + 1))\n\n # trim the other datasets\n trimDataset(fmt.ALN_GROUP, fmt.ALN_ID, inCmp, outCmp, fmt)\n # trimDataset(fmt.REF_GROUP, fmt.REF_ID, inCmp, outCmp, fmt)\n # trimDataset(fmt.MOVIE_INFO, fmt.MOVIE_ID, inCmp, outCmp, fmt)\n # copy Ref,Movie dataset whole\n for groupName in [fmt.REF_GROUP,fmt.MOVIE_INFO]:\n for dsName in inCmp[groupName].keys():\n copyDataset('/'.join([groupName,dsName]), inCmp, outCmp, None, fmt)\n\n # other groups will go over whole hog\n copyGroup(fmt.FILE_LOG, inCmp, outCmp)\n copyGroup(fmt.REF_INFO, inCmp, outCmp)\n copyGroup(fmt.BARCODE_INFO, inCmp, outCmp)\n\n # now we copy over the actual data\n for i in xrange(0, outCmp[fmt.ALN_GROUP_ID].shape[0]):\n # figure out what reads are in this group.\n agID = outCmp[fmt.ALN_GROUP_ID][i]\n agPT = outCmp[fmt.ALN_GROUP_PATH][i]\n alnIdx = outCmp[fmt.ALN_INDEX].value\n whReads = NP.where(agID == alnIdx[:,fmt.ALN_ID])[0]\n offBegin = alnIdx[whReads, fmt.OFFSET_BEGIN]\n offEnd = alnIdx[whReads, fmt.OFFSET_END]\n totalSize = NP.sum((offEnd - offBegin) + 1) # 0 in between\n\n for dsName in inCmp[agPT].keys():\n fullPath = '/'.join([agPT, dsName])\n newDs = outCmp.create_dataset(fullPath, shape = (totalSize,),\n dtype = inCmp[fullPath].dtype)\n origDs = inCmp[fullPath]\n cs = 0\n for j in xrange(0, len(whReads)):\n newEnd = cs + offEnd[j] - offBegin[j]\n newDs[cs:newEnd] = origDs[offBegin[j]:offEnd[j]]\n outCmp[fmt.ALN_INDEX][whReads[j],fmt.OFFSET_BEGIN] = cs\n outCmp[fmt.ALN_INDEX][whReads[j],fmt.OFFSET_END] = newEnd\n cs = newEnd\n\n # copy over the top-level attributes\n copyAttributes(inCmp, outCmp)\n\n # remove the offset table\n deleteIfExists(outCmp, fmt.REF_OFFSET_TABLE)\n deleteAttrIfExists(outCmp, fmt.INDEX_ATTR)\n\n # close the sucker\n logging.debug(\"Closing output cmp.h5 file.\")\n outCmp.close()\n\n except Exception, e:\n logging.exception(e)\n try:\n os.remove(outCmpFile)\n except:\n pass\n raise e", "def create_tree(outFile, tree, path='/'):\n for key, foo in tree.list():\n if outFile.has_node(path, key):\n logging.debug('Path already found:', path, key)\n continue\n logging.debug('Creating group:', path, key)\n outFile.create_group(path, key, key)\n dest = path + key + '/'\n if outFile.has_node(dest):\n continue\n create_tree(outFile, tree.child(key), dest)", "def copy_tree_ignore_except(src_dir, dest_dir,\n file_exts=['.py'],\n ignore_dirs=['checkpoints', 'external', 'datasets', 'stable_checkpoints', 'outputs']):\n print('Copying tree from \"{}\" to \"{}\"'.format(src_dir, dest_dir))\n print('Keeping only files with the following extensions: {}'.format(', '.join(file_exts)))\n print('Ignoring the following directories completely: {}'.format(', '.join(ignore_dirs)))\n\n def ignore_filter(cur_dir, contents):\n # contents are from os.listdir() and could be files or directories\n\n # ignore this directory completely\n if os.path.basename(cur_dir) in ignore_dirs:\n return contents\n\n ignored = []\n for c in contents:\n if c in ignore_dirs:\n continue\n if not os.path.isdir(os.path.join(cur_dir, c)): # isn't a directory\n # ignore files that don't have desired extension\n ignore = True\n for ext in file_exts:\n if c.endswith(ext):\n ignore = False\n if ignore:\n ignored.append(c)\n return ignored\n\n # ignore is a callable that receives directory being visited, and list of its contents\n shutil.copytree(src_dir, dest_dir, ignore=ignore_filter)", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def test_tree_mode3(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_and.remove_child(xp_2)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def view_tree_set(v, treeset):\r\n treeset.add(v)\r\n for cl, v_input_pos_to_cl in v.clients:\r\n if cl == 'output':\r\n continue\r\n vmap = getattr(cl.op, 'view_map', {})\r\n dmap = getattr(cl.op, 'destroy_map', {})\r\n for opos, iposlist in vmap.items() + dmap.items():\r\n if v_input_pos_to_cl in iposlist:\r\n if cl.outputs[opos] not in treeset:\r\n view_tree_set(cl.outputs[opos], treeset)", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def prune_tree(self):\n tree = copy.deepcopy(self.tree)\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node(tree, tree.root)\n return tree", "def convert_treebank(input_dir, output_dir, strategy, subtask):\n\n for f in input_dir.iterdir():\n with open(f, \"r\") as json_file:\n docs = json.load(json_file)\n trees = \"\"\n for doc in docs[\"docs\"]:\n for sent in doc[\"sents\"]:\n graph = sent[\"graph\"]\n if strategy == \"start\":\n tree = traverse_graph_start(graph)\n elif strategy == \"start-without-pos\":\n tree = traverse_graph_start_without_pos(graph)\n elif strategy == \"end\":\n tree = traverse_graph_end(graph)\n elif strategy == \"end-extra-node\":\n tree = traverse_graph_end_extra_node(graph)\n elif strategy == \"start-end-extra-node\":\n tree = traverse_graph_start_end_extra_node(graph)\n elif strategy == \"start-end-extra-node-heuristic\":\n tree = traverse_graph_start_end_extra_node_heuristic(graph) \n if subtask:\n tree = subtask_prune(tree)\n tree_string = get_string(tree)\n trees += tree_string + \"\\n\"\n with open(output_dir.joinpath(f.name).with_suffix(\".txt\"), \"w+\") as tree_files:\n tree_files.write(trees)", "def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_simple_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_agg = deepcopy(gene1.aggregation)\n gene1_bias = deepcopy(gene1.bias)\n gene2_act = deepcopy(gene2.activation)\n gene2_agg = deepcopy(gene2.aggregation)\n gene2_bias = deepcopy(gene2.bias)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.activation = 'c'\n gene3.aggregation = 'c'\n gene3.bias = -10\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.aggregation, gene1_agg)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.aggregation, gene2_agg)\n self.assertEqual(gene2.bias, gene2_bias)", "def recursively_compare_tree_against_html(self, func):\n def inner(obj, node):\n # invoke comparator function\n func(obj=obj, node=node)\n\n # filter\n child_nodes = self.get_children_of_node(node)\n\n # same number of object children and html child nodes\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # loop over children and call recursive compare on them\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n inner(obj=child_obj, node=child_node)\n\n # call inner() with root elements\n inner(obj=self.document.root, node=self.soup.body)", "def _copy_output(src: Graph, dst: Graph):\n for n_src, n_dst in zip(src.nodes, dst.nodes):\n if n_src.op == 'output':\n n_dst.meta = n_src.meta", "def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_connection_genes((-1, 0), cfg)\n gene1_en = deepcopy(gene1.enabled)\n gene1_w = deepcopy(gene1.weight)\n gene2_en = deepcopy(gene2.enabled)\n gene2_w = deepcopy(gene2.weight)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.enabled = False\n gene3.weight = -10\n \n # Check for unchanged parents\n self.assertEqual(gene1.enabled, gene1_en)\n self.assertEqual(gene1.weight, gene1_w)\n self.assertEqual(gene2.enabled, gene2_en)\n self.assertEqual(gene2.weight, gene2_w)", "def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))", "def copy(self, event):\n return", "def copy_skel(src, dest):\n md_common.copytree(src, dest)", "def run():\n \n # GET SELECTED NODES\n sel = hou.selectedNodes()\n \n # DISPLAY WARNINGS IF TWO NODES ARE NOT SELECTED\n if len(sel) != 2:\n hou.ui.displayMessage(\"Please select exactly two nodes.\")\n\n\n # INITIALIZE VARIABLES\n node1 = sel[0]\n node2 = sel[1]\n\n # COPY PARAMETERS\n copy(node1, node2)" ]
[ "0.62684405", "0.5746463", "0.5593152", "0.5558629", "0.5449127", "0.5425309", "0.53680193", "0.5225137", "0.51527005", "0.5107789", "0.5051202", "0.504769", "0.5014795", "0.50122386", "0.49738657", "0.49728838", "0.49718148", "0.496103", "0.49524227", "0.4934227", "0.49341118", "0.4926265", "0.49151012", "0.48898342", "0.48875463", "0.48850015", "0.4859013", "0.4840263", "0.48214337", "0.47888294", "0.47600335", "0.4749244", "0.46801072", "0.46659586", "0.4663929", "0.4656415", "0.46423173", "0.4624362", "0.46205473", "0.4583041", "0.45786503", "0.4560235", "0.45472118", "0.45337403", "0.45207727", "0.45112887", "0.44993055", "0.4489996", "0.4488905", "0.44863674", "0.44850913", "0.4479716", "0.445536", "0.445374", "0.44504267", "0.44501138", "0.44448534", "0.4439312", "0.4431234", "0.44299302", "0.44248676", "0.4421627", "0.441884", "0.44124612", "0.43999428", "0.4396387", "0.43931618", "0.4392247", "0.43871716", "0.4385632", "0.43855047", "0.43806234", "0.43778628", "0.43775904", "0.4373529", "0.4370976", "0.4356231", "0.43430954", "0.43307644", "0.4326611", "0.43246064", "0.43230322", "0.43199414", "0.43178347", "0.43168542", "0.43122676", "0.43108058", "0.43086076", "0.43059015", "0.43055815", "0.43046093", "0.42992818", "0.42922378", "0.4290908", "0.42908645", "0.42885724", "0.4288156", "0.42843276", "0.427575", "0.427336" ]
0.78454643
0
Copy `in` to `out` for events where event.`key` does not exist in `keys` `keys` is the set of keys seen so far.
def tree_copy_duplicate_removal(in_tree, out_tree, key, keys): for entry in in_tree: key_value = getattr(entry, key) if not key_value in keys: out_tree.Fill() keys.add(key_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_missing_values(events):\n ret = deepcopy(events)\n srchd, key_events = [], []\n for evt in events:\n _tmp = [(j, e) for j, e in enumerate(events) if e['key']\n == evt['key'] and not e['key'] in srchd]\n if _tmp != []:\n key_events.append(_tmp)\n srchd.append(evt['key'])\n dels = []\n for di_evts in key_events:\n if di_evts[0][1]['event'] == 'keystrokeUp':\n dels.append(di_evts[0][0])\n if di_evts[len(di_evts) - 1][1]['event'] == 'keystrokeDown':\n dels.append(di_evts[len(di_evts) - 1][0])\n if dels != []:\n for i in sorted(dels, reverse=True):\n del ret[i]\n return ret", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def without_keys(keys):\n keys = frozenset(keys) # frozenset has efficient membership lookup\n return filter_keys_c(fnot(partial(operator.contains, keys)))", "def key_not_in(self, key_not_in):\n\n self._key_not_in = key_not_in", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def except_keys(dic, *keys):\n ret = dic.copy()\n for key in keys:\n try:\n del ret[key]\n except KeyError:\n pass\n return ret", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def exclude(m, keys):\n return {k: v for k, v in m.items() if k not in keys}", "def del_dict_keys(dict_in, keys):\n for key in keys:\n if key in dict_in:\n del dict_in[key]\n return dict_in", "def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)", "def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated", "def Exclude(*keys):\n\n def exclude(row):\n res = dict(row)\n for k in keys:\n if k in res:\n del res[k]\n return res\n\n return \"Exclude\" >> beam.Map(exclude)", "def _filter_keys(item, keys):\n return dict((k, v) for k, v in item.iteritems() if k in keys)", "def _filter_keys(d: dict, keys: set) -> dict:\n return {key: d[key] for key in keys if key in d}", "def remove_outlier(keys):\n for key in keys:\n data_dict.pop(key, 0)", "def update_ifnotin(d1, d2):\n for k, v in d2.items():\n if k not in d1:\n d1[k] = v\n return d1", "def exclude_keys(dictionary: Mapping, keys: Sequence[Hashable]) -> dict:\n return {k: v for k, v in dictionary.items() if k not in keys}", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def dict_filter(d, keys, into=dict):\n \n if hasattr(keys, \"__call__\"):\n f = keys\n keys = filter(f, d.keys())\n return into(map(lambda k:(k,d[k]), keys))", "def select_features(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def discard(self, key):\r\n if key in self.map: \r\n key, prev, next = self.map.pop(key)\r\n prev[NEXT] = next\r\n next[PREV] = prev\r\n if self.emitter:\r\n self.emitter.emit()", "def _pick(d, keys):\n return {k: v for k, v in d.items() if k in keys}", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def remove_outlier(dict_object, keys):\r\n for key in keys:\r\n dict_object.pop(key, 0)", "def dfilter(d: dict, *keys: Iterable, reverse=False) -> dict:\n return {k: v for k, v in d.items() if k in keys and not reverse or k not in keys and reverse}", "def remove_outlier(dict_object, keys):\n for key in keys:\n dict_object.pop(key, 0)", "def filterKeys(document, keys):\n return {key: document[key] for key in keys}", "def discard(self, key):\r\n if key in self.map: \r\n key, prev, next = self.map.pop(key)\r\n prev[NEXT] = next\r\n next[PREV] = prev", "def evictOldkeys(self, cutOff):\n for key in self.values.keys():\n time = self.values[key][3]\n if time < cutOff:\n del self.values[key]", "def remove_by_keys(self, keys):\n return list(filter(lambda item: item.keyword not in set(keys), self._metadata))", "def invert(data_dict, keys = 'all'):\n if keys.lower() == 'all':\n to_fix_keys = set(data_dict.keys()) - set({'time'})\n \n else:\n to_fix_keys = np.array([keys]).flatten()\n \n out = data_dict.copy()\n tmp = dict()\n \n for key in to_fix_keys:\n tmp.update({key:data_array_builder()})\n \n for key in to_fix_keys:\n ida = iterable_data_array(out, key)\n for x in ida:\n tmp[key].append(-1*x)\n \n for key in to_fix_keys:\n out.update({key:tmp[key].build()})\n \n return out", "def key_not_contains(self, key_not_contains):\n\n self._key_not_contains = key_not_contains", "def _prepare_keys(self, keys):\n # sorting is guaranteed to enable comparisons throughout the class\n for key in keys:\n key.sort()\n\n # ensure no overlap with the primary key\n if self._primary_key in keys:\n keys.remove(self._primary_key)\n\n return keys", "def remove_unused_keys(cop):\n delete_these = [\n 'officer_atty',\n 'officer_atty_firm',\n 'case_id',\n 'cop_first_name',\n 'cop_middle_initial',\n 'cop_last_name',\n 'entered_by',\n 'entered_when',\n 'fact_checked_by',\n 'fact_checked_when',\n 'matched_by',\n 'matched_when'\n ]\n\n for key in delete_these:\n del cop[key]\n\n return cop", "def remove_keys(_dict, _keys):\n if isinstance(_keys, str):\n if _keys in _dict:\n del _dict[_keys]\n else:\n for _key in _keys:\n _dict = remove_keys(_dict, _key)\n return _dict", "def pad_keys(items, keys):\n for key in keys:\n if key not in items:\n items[key] = EmptySignature()\n return items", "def only_some_keys(dic, *keys):\n ret = {}\n for key in keys:\n ret[key] = dic[key] # Raises KeyError.\n return ret", "def keep_entry(dict_input, parent_key, child_keys):\n\n dict_output = dict()\n\n child_keys = [''.join((parent_key, '_', child_key)) for child_key in child_keys]\n\n for key, value in dict_input.items():\n if key.startswith(parent_key) and key not in child_keys:\n pass\n else:\n dict_output.update({key: value})\n\n return dict_output", "def exclusively(self, keys, lst=None):\n minimal = self.minimal() if lst is None else lst\n\n def make_exclusive(d, keys):\n dct = {}\n for k in keys:\n if k in d:\n dct[k] = d[k]\n else:\n dct[k] = -999\n return dct\n\n lst = []\n for d in minimal:\n dct = make_exclusive(d, keys)\n if len(dct) > 0:\n lst.append(dct)\n return lst", "def filter_dic_by_keys(dic,allowed_keys):\n new_dic = {}\n for key in dic:\n if key in allowed_keys:\n new_dic[key] = dic[key]\n return new_dic", "def pick(keys, _dict):\n\n key_set = set(keys) & set(_dict.keys())\n\n return dict((key, _dict[key]) for key in key_set)", "def remap_keys(ds, new_keys):\n logger.info(\"Remapping keys of every element using config:\\n %s\", _dict_to_logstring(new_keys))\n\n def remap_keys(x):\n return {new_keys.get(k, k): v for k, v in x.items() if new_keys.get(k, k) is not None}\n return ds.map(remap_keys, num_parallel_calls=TF_AUTOTUNE)", "def removeAllKeys(self) -> None:\n ...", "def entries_not_in(self, other):\n other_keys = set(other._entries.keys())\n filtered_order = [k for k in self._order if k not in other_keys]\n return [self._entries[k] for k in filtered_order]", "def pullSerializedAll(*keys):", "def keep_types(self, base_key, out_key, *types):\n self.params['%s.%s' % (base_key, out_key)] = keep_types(\n self.params[base_key], *types)", "def key_not_starts_with(self, key_not_starts_with):\n\n self._key_not_starts_with = key_not_starts_with", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def key_not_ends_with(self, key_not_ends_with):\n\n self._key_not_ends_with = key_not_ends_with", "def unset(self, keys=None):\n if not keys:\n keys = self._trans_dict.keys()\n for key in keys:\n key = key.upper()\n self._trans_dict[key] = key", "def keep_types(self, base_key, out_key, *types):\n self.params[\"%s.%s\" % (base_key, out_key)] = self.keep_types_s(self.params[base_key], types)", "def select_keys(dictionary, keys):\n return dict((k, dictionary[k]) for k in keys\n if k in dictionary)", "def clearKeys(self):\n for attr in self._filter():\n pm.cutKey(attr)", "def pick(m, *keys):\n return {k: v for k, v in m.items() if k in keys}", "def pullAll(*keys):", "def _exclusive_intersect(self, keys):\n #inc_s = reduce(lambda x, y: x.intersection(y), \n # (self[x] for x in keys))\n inc_s = self[keys[0]].copy()\n for other_key in self:\n if other_key in keys:\n inc_s.intersection_update(self[other_key])\n else:\n inc_s.difference_update(self[other_key])\n\n return inc_s", "def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp", "def winnow_by_keys(dct, keys=None, filter_func=None):\n has = {}\n has_not = {}\n\n for key in dct:\n key_passes_check = False\n if keys is not None:\n key_passes_check = key in keys\n elif filter_func is not None:\n key_passes_check = filter_func(key)\n\n if key_passes_check:\n has[key] = dct[key]\n else:\n has_not[key] = dct[key]\n\n return WinnowedResult(has, has_not)", "def pick(self, *keys):\n return _({k: self[k] for k in keys})", "def invalidate_keys(self, keys):\r\n if not keys:\r\n return\r\n flush, flush_keys = self.find_flush_lists(keys)\r\n\r\n if flush:\r\n cache.delete_many(flush)\r\n if flush_keys:\r\n self.clear_flush_lists(flush_keys)", "def exclude_keys(value, *exclude):\n\n if not isinstance(value, QueryDict):\n raise RuntimeError(\"getquerydict should be used with QueryDict instances only (e.g. request.GET)\")\n\n value = value.copy()\n for key in exclude:\n if key in value: del value[key]\n return value", "def remove_from_dictionary(self,dictionary,*keys):\r\n for key in keys:\r\n if key in dictionary:\r\n value = dictionary.pop(key)\r\n logger.info(\"removed item with key '%s' and value '%s'\" %(key,value))\r\n else:\r\n logger.info(\"Key '%s' not found\" %(key))", "def select_keys(my_dict: Dict, keys: Sequence) -> Dict:\n keyset = set(keys)\n return {k: v for k, v in my_dict.items() if k in keyset}", "def filter_valid(self, keys):\n return np.fromiter(filter(lambda id: id in self.embed.ind, keys), dtype=np.int32)\n # return np.fromiter((key for key in keys if key in self.embed.ind), dtype=np.int32)", "def delete_keys_from_dict(self, orig_dict, keys_whitelist):\n for k in list(orig_dict.keys()):\n if k not in keys_whitelist:\n del orig_dict[k]\n\n for v in orig_dict.values():\n if isinstance(v, dict):\n self.delete_keys_from_dict(v, keys_whitelist)\n\n return orig_dict", "def copy_keys(source: str, destination: str) -> None:\n try:\n keys = [filename for filename in os.listdir(source) if filename.lower().endswith(\".bikey\")]\n except FileNotFoundError:\n logging.debug(f\"Error when searching for *.bikey files to copy at {source}\", exc_info=True)\n keys = []\n\n if len(keys) == 0:\n logging.warning(f\"No *.bikey files found in {source}\")\n return\n\n os.makedirs(destination, exist_ok=True)\n\n for key in keys:\n shutil.copy2(os.path.join(source, key), destination)", "def extract_keys(dic, *keys):\n for k in keys:\n if k not in dic:\n raise KeyError(\"key %r is not in original mapping\" % k)\n r1 = {}\n r2 = {}\n for k, v in dic.items():\n if k in keys:\n r1[k] = v\n else:\n r2[k] = v\n return r1, r2", "def delete_keys_from_dict(dict_del, the_keys):\n # make sure the_keys is a set to get O(1) lookups\n if type(the_keys) is not set:\n the_keys = set(the_keys)\n for k, v in dict_del.items():\n if k in the_keys:\n del dict_del[k]\n if isinstance(v, dict):\n delete_keys_from_dict(v, the_keys)\n if isinstance(v, list):\n for item in v:\n if isinstance(item, dict):\n delete_keys_from_dict(item, the_keys)\n return dict_del", "def _filter_node_map(\n node_map: 'collections.OrderedDict[str, p_pb2.PipelineNode]',\n from_node_ids: Collection[str], to_node_ids: Collection[str],\n skip_node_ids: Collection[str]\n) -> 'collections.OrderedDict[str, p_pb2.PipelineNode]':\n ancestors_of_to_nodes = _traverse(node_map, _Direction.UPSTREAM, to_node_ids)\n descendents_of_from_nodes = _traverse(node_map, _Direction.DOWNSTREAM,\n from_node_ids)\n nodes_to_keep = ancestors_of_to_nodes.intersection(\n descendents_of_from_nodes) - set(skip_node_ids)\n filtered_node_map = collections.OrderedDict()\n for node_id, node in node_map.items():\n if node_id in nodes_to_keep:\n filtered_node_map[node_id] = node\n return filtered_node_map", "def delete_keys_from(self, entry_from_key, do_manual_check=True):\r\n entry = self.get_entry()\r\n for key in self.keys:\r\n del entry_from_key[key]\r\n if do_manual_check:\r\n to_del = []\r\n for key, key_entry in entry_from_key.iteritems():\r\n if key_entry == entry:\r\n to_del.append(key)\r\n for key in to_del:\r\n del entry_from_key[key]", "def filter(self, new_set):\n for old_set in self.itervalues():\n for feat in old_set.iterkeys():\n if feat not in new_set:\n del old_set[feat]\n return self", "def filter_args(func, keys):\n filtered = {}\n sign = list(signature(func).parameters.keys())\n for k, v in {**keys}.items():\n if k in sign:\n filtered[k] = v\n return filtered", "def delete_keys_tags(self,\r\n index,\r\n deletedkeys):\r\n\r\n\r\n for k_temp in deletedkeys:\r\n k_temp = k_temp.strip()\r\n if k_temp in set(self.get_keys()):\r\n self.discard_index_from_key(k_temp, index)\r\n if self.get_indexes_for_key(k_temp) == set():\r\n self.eliminate_key(k_temp)\r\n for t_temp in self.get_tags():\r\n if k_temp in self.get_keys_for_tag(t_temp):\r\n self.discard_key_from_tag(t_temp,k_temp)\r\n if not self.get_keys_for_tag(t_temp):\r\n self.delete_tag(t_temp)", "def delete_keys_from_dict(d, keys):\n if isinstance(d, dict):\n for field in d.keys():\n if field in keys:\n del d[field]\n elif isinstance(d[field], dict) or isinstance(d[field], list) or isinstance(d[field], set):\n delete_keys_from_dict(d[field], keys)\n elif isinstance(d, dict) or isinstance(d, list) or isinstance(d, set):\n for i in d:\n delete_keys_from_dict(i, keys)", "def _invertMapping(mapping):\n invertedMapping = ddict(set)\n for key, values in viewitems(mapping):\n for value in values:\n invertedMapping[value].add(key)\n return invertedMapping", "def generate_new_keys(old_keys):\n new_keys = {}\n for new, old in enumerate(sorted(old_keys), 1):\n new_keys[old] = str(new) # key in JSON object is always string\n return new_keys", "def spend_pkh_fund(tx_ins, in_keys, tx_outs):\n _txs_in = []\n _un_spent = []\n for tx_id, idx, balance, address in tx_ins:\n # must h2b_rev NOT h2b\n tx_id_b = h2b_rev(tx_id)\n _txs_in.append(TxIn(tx_id_b, idx))\n\n script = network.contract.for_address(address)\n _un_spent.append(Spendable(balance, script, tx_id_b, idx))\n\n _txs_out = []\n for balance, receiver_address in tx_outs:\n _txs_out.append(TxOut(balance, network.contract.for_address(receiver_address)))\n\n version, lock_time = 1, 0\n tx = Tx(version, _txs_in, _txs_out, lock_time)\n tx.set_unspents(_un_spent)\n\n solver = build_hash160_lookup([int(pri_hex, 16) for pri_hex in in_keys], [secp256k1_generator])\n tx.sign(solver, hash_type=SIGHASH_ALL)\n\n return tx.as_hex(), tx.id()", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def get_flush_lists(self, keys):\r\n return set(e for flush_list in\r\n filter(None, cache.get_many(keys).values())\r\n for e in flush_list)", "def copymod(dct, without=None, **kwargs):\r\n if without is None:\r\n without = []\r\n rval = copy(dct)\r\n for a in without:\r\n if a in rval:\r\n del rval[a]\r\n for kw, val in kwargs.items():\r\n rval[kw] = val\r\n return rval", "def setkeys(self, keys):\n # FIXME: Efficiency? (use set for Python 2.4 :-)\n # NOTE: list(keys) rather than keys[:] because keys[:] returns\n # a tuple, if keys is a tuple.\n kcopy = list(keys)\n kcopy.sort()\n self._sequence.sort()\n if kcopy != self._sequence:\n raise KeyError('Keylist is not the same as current keylist.')\n # NOTE: This makes the _sequence attribute a new object, instead\n # of changing it in place.\n # FIXME: efficiency?\n self._sequence = list(keys)", "def subtract(d1, d2):\n res = {}\n \n for key in d1:\n if key not in d2:\n res[key]=None\n\n return res", "def key_press(keys):\n return lambda e: e.key if e.type == pygame.KEYDOWN \\\n and e.key in keys else EventConsumerInfo.DONT_CARE", "def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist", "def cut(d, k):\n\tif isinstance(d, dict):\n\t\tn = d.copy()\n\t\tif k in n:\n\t\t\tdel n[k]\n\t\treturn n\n\treturn [v for v in d if v != k]", "def strip(a, b):\n out = {}\n for key, value in a.items():\n if key in b:\n out[key] = value\n return out", "def test_strip_unnecessary_keys():\n assert len(strip_unnecessary_keys(EXAMPLE_GOOD_OFFER)) == len(OUTPUT_KEYS)\n assert strip_unnecessary_keys(EXAMPLE_BAD_OFFER) is None", "def mget(self, keys: List[K]) -> List[Optional[V]]:\n # Note an explicit check for None, because falsy values can be valid keys\n valid_index_and_key_list = [(idx, key) for idx, key in enumerate(keys) if key is not None]\n # Initialize the results\n results = [None] * len(keys)\n\n if valid_index_and_key_list:\n # Split indices and keys into separate lists\n valid_keys = [ik[1] for ik in valid_index_and_key_list]\n # Keep track of which indices were not hit in the cache\n missed_index_and_key_list = []\n layer_hits = self._layer.mget(valid_keys)\n for layer_hit, index_and_key in zip(layer_hits, valid_index_and_key_list):\n if layer_hit is None:\n missed_index_and_key_list.append(index_and_key)\n else:\n results[index_and_key[0]] = layer_hit\n\n if missed_index_and_key_list:\n missed_keys = [ik[1] for ik in missed_index_and_key_list]\n base_hits = self._base.mget(missed_keys)\n\n mapping_to_write = {}\n for base_hit, index_and_key in zip(base_hits, missed_index_and_key_list):\n if base_hit:\n results[index_and_key[0]] = base_hit\n mapping_to_write[index_and_key[1]] = base_hit\n\n self._layer.mset(mapping_to_write)\n\n return results", "def pre_post_chests(self, keys, chests):\n keys = Counter(keys)\n pre = []\n post = list(chests)\n\n for i in chests:\n chest = self.chests[i]\n if chest['lock'] in chest['keys'] and keys[chest['lock']] != 0:\n pre.append(i)\n post.remove(i)\n chest = self.chests[i]\n keys.update(chest['keys'])\n keys.subtract([chest['lock']])\n\n return pre, post, keys", "def merge_lines(old_lines, new_dict):\n old_dict = collections.OrderedDict()\n for key, value in old_lines:\n old_dict.setdefault(key, []).append(value)\n\n old_keys = set(old_dict)\n\n del_keys = {k for k, v in new_dict.iteritems() if not v}\n new_keys = ({k for k, v in new_dict.iteritems() if v} | old_keys) - del_keys\n\n # delete keys\n new_lines = [(k, v) for k, v in old_lines if k in new_keys]\n\n for change_key in (new_keys & old_keys):\n insert_idx = None\n to_nuke = set()\n for i, (k, v) in enumerate(new_lines):\n if k == change_key:\n if insert_idx is None:\n insert_idx = i\n to_nuke.add(i)\n assert to_nuke # because it's in old_keys\n new_lines = [(k, v) for i, (k, v) in enumerate(new_lines)\n if i not in to_nuke]\n new_lines[insert_idx:insert_idx] = [\n (change_key, v)\n for v in new_dict.get(change_key, old_dict[change_key])\n ]\n\n for add_key in new_dict: # Preserve sort order of new lines\n if add_key in old_keys or add_key in del_keys:\n continue\n new_lines.extend((add_key, v) for v in new_dict[add_key])\n\n return new_lines", "def prune_option_list(opts, keys):\n opt_d = opt_to_dict(opts)\n for k in keys:\n if k in opt_d:\n del opt_d[k]\n return [k for item in opt_d.iteritems() for k in item]", "def _partition_keys_by_slot(self, keys: Iterable[KeyT]) -> Dict[int, List[KeyT]]:\n\n slots_to_keys = {}\n for key in keys:\n slot = key_slot(self.encoder.encode(key))\n slots_to_keys.setdefault(slot, []).append(key)\n\n return slots_to_keys", "def subtract(d1, d2):\n res = {}\n for key in d1:\n if key not in d2:\n res[key] = None\n return res", "def key_value_copy(source_data, dest_data, dest_key_cv, dest_value_cv):\n new_dest_key_cv = []\n new_dest_value_cv = []\n num_keys = len(dest_key_cv)\n to_copy = source_data if source_data is not None else dest_data\n for i, data_val in enumerate(to_copy):\n if i < num_keys:\n new_dest_key_cv.append(\n (dest_key_cv[i][0], dest_key_cv[i][1], data_val)\n )\n else:\n if (source_data is None or\n dest_data is None or\n data_val != dest_data[i]):\n new_dest_value_cv.append(\n (dest_value_cv[i - num_keys][0],\n dest_value_cv[i - num_keys][1], data_val)\n )\n return (new_dest_key_cv, new_dest_value_cv)", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def _filter_new(self, metadata, keys):\n single_key_list = []\n key_path_list = []\n new_meta = {}\n for key in keys:\n key = key.replace(\"extendedMetadata\", \"metadata\").replace(\"extendedmetadata\", \"metadata\")\n #\n # fixing issues with oci-metadata not working with hyphenated\n # keys; this was done initially to be consistent with the OCI SDK.\n # if key.find('-') >= 0:\n # key = key.replace('-', '_')\n\n if key.find('/') >= 0:\n # key is a path\n new_keys = []\n key_l = key.split(\"/\")\n meta = metadata\n _get_path_keys(meta, key_l, new_keys)\n key_path_list += new_keys\n for nkey in new_keys:\n value = _get_by_path(metadata, nkey)\n new_meta[str(nkey)] = value\n else:\n single_key_list.append(key)\n if len(single_key_list) > 0:\n ret_meta = self._filter(metadata, single_key_list)\n else:\n ret_meta = {}\n\n for key_path in key_path_list:\n _set_by_path(ret_meta, key_path, new_meta[str(key_path)])\n\n return ret_meta", "def _set_unique_keys(self, keys):\n # make this final once set\n if self._unique_keys:\n raise AlreadySetError()\n\n self._unique_keys = self._prepare_keys(keys)" ]
[ "0.623754", "0.6082424", "0.5918844", "0.59167224", "0.56955945", "0.5677415", "0.5673033", "0.5609741", "0.55079687", "0.55027145", "0.5486954", "0.5394884", "0.53932744", "0.5390138", "0.53668404", "0.53537357", "0.533538", "0.52792567", "0.5232413", "0.5232282", "0.5213723", "0.5176005", "0.50757587", "0.504963", "0.50445294", "0.50268996", "0.49940884", "0.49480045", "0.49420717", "0.49132234", "0.49010262", "0.48277548", "0.4820372", "0.47837672", "0.47768933", "0.47610307", "0.4757291", "0.47431347", "0.47331098", "0.4731148", "0.47298455", "0.47240356", "0.4720815", "0.4714404", "0.4659387", "0.46529418", "0.464689", "0.46286586", "0.46157268", "0.45983833", "0.45966652", "0.45931122", "0.45821133", "0.457288", "0.4571924", "0.45645133", "0.45579326", "0.45573768", "0.4555154", "0.45485693", "0.45479697", "0.4547265", "0.45384753", "0.45351592", "0.45217094", "0.44971967", "0.4497074", "0.44825277", "0.44733107", "0.44716913", "0.44716042", "0.44606566", "0.445916", "0.4451341", "0.4450891", "0.44494987", "0.4445257", "0.44403827", "0.44334388", "0.44268265", "0.4419229", "0.4417815", "0.4416823", "0.44164717", "0.44103858", "0.44014367", "0.44013217", "0.4398568", "0.43956375", "0.43942127", "0.438293", "0.4381028", "0.43780774", "0.43733242", "0.4372134", "0.43663076", "0.4362357", "0.43590218", "0.4354962", "0.4352248" ]
0.663589
0
Returns a list of all the keys in a directory, only taking the most recent TKey.GetCycle() per TKey.GetName().
def most_recent_cycle_keys(directory): most_recent = {} # Generate a list of (name, (cyclenum, keyobject)) values all_keys = list(directory.GetListOfKeys()) keys = ((k.GetName(), (k.GetCycle(), k)) for k in all_keys) for name, cyclekey in keys: most_recent[name] = (max(cyclekey, most_recent[name]) if name in most_recent else cyclekey) # Just the key objects, sorted in name order. recent_keys = [most_recent[name][-1] for name in sorted(most_recent.keys())] # Return all_keys so that we don't lose anything return all_keys, recent_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _keys(self):\n for name in listdir(abspath(self._path)):\n key, ext = splitext(name)\n if ext == \".pkl\":\n yield key", "def list(self):\n entries = []\n regex = re.compile('^%s' % self.directory)\n\n for obj in self.driver.list(self.directory):\n path, name = os.path.split(obj['Key'])\n entries.append(Entry(self.path(regex.sub(\"\", path)), name))\n\n return entries", "def list_all_keys(self):\n \n return self.keys", "def ListNames(self):\n\n # TODO: This check is flawed, because the current definition of\n # \"IsDirectory\" is the negation of \"is a file\". One registry path can\n # actually refer to a key (\"directory\"), a value of the same name (\"file\")\n # and the default value of the key at the same time.\n if not self.IsDirectory():\n return\n\n # Handle the special case where no hive is specified and just list the hives\n if self.hive is None:\n for name in dir(winreg):\n if name.startswith(\"HKEY_\"):\n yield name\n\n return\n\n try:\n with OpenKey(self.hive, self.local_path) as key:\n (self.number_of_keys, self.number_of_values,\n self.last_modified) = QueryInfoKey(key)\n\n found_keys = set()\n\n # First keys\n for i in range(self.number_of_keys):\n try:\n key_name = EnumKey(key, i)\n found_keys.add(key_name)\n yield key_name\n except OSError:\n pass\n\n # Now Values\n for i in range(self.number_of_values):\n try:\n name, unused_value, unused_value_type = EnumValue(key, i)\n\n # A key might contain a sub-key and value of the same name. Do not\n # yield the same name twice in this case. With only the name,\n # the caller cannot differentiate between a key and a value anyway.\n if name not in found_keys:\n yield name\n except OSError:\n pass\n\n except OSError as e:\n raise IOError(\"Unable to list key %s: %s\" % (self.key_name, e))", "def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False):\n storageScheme, keys = self.getkeys(\n path, filename=filename, directories=directories, recursive=recursive)\n keys = [storageScheme + \":///\" + key.bucket.name + \"/\" + key.name for key in keys]\n keys.sort()\n keys = select(keys, start, stop)\n return keys", "def list_(bank):\n try:\n _, keys = api.kv.get(bank + \"/\", keys=True, separator=\"/\")\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f'There was an error getting the key \"{bank}\": {exc}')\n if keys is None:\n keys = []\n else:\n # Any key could be a branch and a leaf at the same time in Consul\n # so we have to return a list of unique names only.\n out = set()\n for key in keys:\n out.add(key[len(bank) + 1 :].rstrip(\"/\"))\n keys = [o for o in out if not o.endswith(_tstamp_suffix)]\n return keys", "async def keys(self) -> Iterable[str]:", "def keys(self):\n keys = set()\n with pd.HDFStore(self.rootpath, mode=\"r\") as hdf:\n hdf5_keys = hdf.keys()\n\n for key in hdf5_keys:\n kp = key.split(\"/\")\n if len(kp) == 5:\n print(kp, len(kp))\n keys.add(kp[4])\n return list(keys)", "def keys(self):\n self._remove_expired()\n\n return self._d.keys()", "def keys(self, depth=None):\n if depth is not None:\n levels = self.levels[0:depth]\n else:\n levels = self.levels\n\n return [level.key for level in levels]", "def get_keys(self, ckey=None):\n if ckey:\n keys = self._get_keys(ckey)\n else:\n keys = self.keys()\n for key in self.keys():\n keys += [k for k in self._get_keys(key)]\n return list(set(keys))", "def fingerprint(dirnames, prefix=None, previous=[]):\n #pylint:disable=dangerous-default-value\n results = []\n for dirname in dirnames:\n for filename in os.listdir(dirname):\n fullpath = os.path.join(dirname, filename)\n if os.path.isdir(fullpath):\n results += fingerprint(\n [fullpath], prefix=filename, previous=previous)\n else:\n fullname = fullpath\n if prefix and fullname.startswith(prefix):\n fullname = fullname[len(prefix):]\n found = False\n for prevpath in previous:\n if fullname == prevpath['Key']:\n found = True\n break\n if not found:\n mtime = datetime.datetime.fromtimestamp(\n os.path.getmtime(fullpath), tz=utc)\n results += [{\"Key\": fullname,\n \"LastModified\": mtime.strftime(\n '%a, %d %b %Y %H:%M:%S %Z')}]\n return results", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def keys(self):\n # Collect all keys in each bucket\n all_keys = []\n for bucket in self.buckets:\n for key, value in bucket.items():\n all_keys.append(key)\n return all_keys", "def iterkeys(self):\n return self._d.iterkeys()", "def walk(self):\n for _root, _dirs, files in os.walk(self.root):\n for filename in files:\n if self.is_key(filename):\n yield filename", "def getUncachedGameKeys(self):\n theKeys = HashSet()\n for game in theCacheDirectory.listFiles():\n theKeys.add(game.__name__.replace(\".zip\", \"\"))\n return theKeys", "def keys(self) -> List:\n pass", "def keys(self):\n ks = dict.keys(self)\n ks.sort()\n return ks", "def keys(self):\n # TODO: Collect all keys in each of the buckets\n all_keys = [] # Will store all the key\n for bucket in self.buckets:\n for key in bucket:\n if key is not None:\n all_keys.append(key[0])\n return all_keys", "def keys(self):\n klst = list(self._maps.keys())\n klst.sort()\n return klst", "def get_keys(self):\r\n return self._keys", "def AllKeys(self) -> _n_0_t_1[str]:", "def keys(self):\n for ts in self:\n yield ts", "def keys(self) -> List[str]:\n raise NotImplementedError", "def allkeys(self, as_str=False):\n for key in self.__allkeys((\"__ROOT__\",), {\"__ROOT__\": self}):\n yield \".\".join(key) if as_str else key", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def list_keys(bucket, path, suffix=None):\n\t# Apparently there is no easy way of doing this except to loop over the result\n\t# chek the parameters delimiter='', marker=''\n\t# then the list returns boto.s3.prefix.Prefix objects on matches\n\tfiles = []\n\tpath = path.strip('/')\n\tfor key in bucket.list(path):\n\t\trelative_path = key.name.replace(path, '').lstrip('/')\n\t\tif not relative_path:\n\t\t\t# Empty\n\t\t\tcontinue\n\t\tif '/' in relative_path.strip('/'):\n\t\t\t# Skip sub-folders\n\t\t\tcontinue\n\n\t\tif not suffix or relative_path.endswith(suffix):\n\t\t\tfiles.append(relative_path)\n\treturn files", "def keys(self):\n return list(self.__iter__())", "def RecurseKeys(self):\n root_key = self.GetRootKey()\n if root_key:\n for registry_key in root_key.RecurseKeys():\n yield registry_key", "def iterkeys(self):\n\n for bucket in self.buckets.itervalues():\n for key in bucket.iterkeys():\n yield key", "def get_adjacent_keys(self, key: str) -> List[str]:\n return [k for k in self.get_adjacent(key)]", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def getkeys(self):\n return list(self.keys)", "def get_filenames(self, bucket, directory, delimiter=''):\n b = self.conn.get_bucket(bucket)\n rs = b.list(directory, delimiter)\n return [key.name for key in rs if '$folder$' not in key.name]", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def keys(self):\n return list(self.iterkeys())", "def keys(self):\r\n return [k for k in self]", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def keys(self):\n return self._d.keys()", "def keys(self):\n return [key for key, value in self.items()]", "def RecurseKeys(self):\n yield self\n for subkey in self.GetSubkeys():\n for key in subkey.RecurseKeys():\n yield key", "def keys(self):\n pattern = r'^\\d+-aws-billing-csv-[\\d+]{4}-[\\d+]{2}.csv$'\n for key in self.bucket.get_all_keys():\n if re.search(pattern, key.name):\n yield key", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def scandir(path='.'):\r\n for name in os.listdir(path):\r\n yield GenericDirEntry(path, name)", "def keys(self, pattern=\"*\"):\n lenOfPrefix = len(self.appendKeys(\"\"))\n return [key[lenOfPrefix:] for key in\n self.redis.keys(self.appendKeys(pattern))]", "async def get_keys(self):\n return self.dict.keys()", "def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys", "def keysAll():", "def keys(self):\n self._load()\n return list(self._file_openers.keys())", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def keys(self):\n with self.__plock:\n return self._keys[:]", "def keys(self):\n return [ x for x in self ]", "def list_sorted_filenames(directory):\n with os.scandir(directory) as entries:\n filenames = [entry.name for entry in entries if entry.is_file()]\n filenames.sort()\n return filenames.copy()", "def iterkeys(self, multi=False):\n root = self.root\n curr = root[NEXT]\n if multi:\n while curr is not root:\n yield curr[KEY]\n curr = curr[NEXT]\n else:\n yielded = set()\n yielded_add = yielded.add\n while curr is not root:\n k = curr[KEY]\n if k not in yielded:\n yielded_add(k)\n yield k\n curr = curr[NEXT]", "def get_all_keys(self):\n return self.psettings.allKeys()", "def keys(self) -> List[str]:\n return self.__stash.keys()", "def get_all_childname(self, key):\n return [x.split(\"/\")[1] for x in self.get_all_keys() if x.split(\"/\")[0] == key]", "def get_keys(weat_db):\n import updater\n keys = updater.list_keys(weat_db, verbose=False)\n return keys", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n d = HBINCell(self._buf, key_offset, self)\n\n try:\n for k in d.child().keys():\n yield k\n except RegistryStructureDoesNotExist:\n raise ParseException(\"Unsupported subkey list encountered.\")\n\n key_index += 4", "def keys(self) -> t.List[str]: # type: ignore[override]\n return list(self.__keys)", "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "def get_contents_of_directory(directory, bucket=None):\n if bucket is None:\n bucket, directory = get_bucket_and_path_from_uri(directory)\n bucket = get_bucket(bucket)\n\n return [x.key for x in bucket.list(prefix=directory)]", "def keys(self):\n\n return list(self.iterkeys())", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def keys(self, installer_context):\n keys = set()\n for source in self.sources:\n keys.update(set(source.keys(installer_context)))\n return list(keys)", "def get_dirnames(path):\n storage = DefaultStorage()\n dirnames = storage.listdir(path)[0]\n dirnames.sort()\n return dirnames", "def getImmediateSubdirectories(dir):", "def iterkeys(self):", "def iterkeys(self):", "async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)", "def readKeys(keydir):\r\n for filename in os.listdir(keydir):\r\n if filename.startswith('.'):\r\n continue\r\n basename, ext = os.path.splitext(filename)\r\n if ext != '.pub':\r\n continue\r\n\r\n if not isSafeUsername(basename):\r\n log.warn('Unsafe SSH username in keyfile: %r', filename)\r\n continue\r\n\r\n path = os.path.join(keydir, filename)\r\n f = file(path)\r\n for line in f:\r\n line = line.rstrip('\\n')\r\n yield (basename, line)\r\n f.close()", "def keys(self, _prec=\"\"):\n if self.isLeaf:\n yield _prec + self.ch\n\n for chld in self.children.values():\n yield from chld.keys(_prec + self.ch)", "def iterkeys(self):\n return DictKeysIterator(self)", "def get_checkpoint_list(dir):\n ckpt_fnames = glob.glob(os.path.join(dir, '*.index'))\n ckpt_fnames = [x.replace('.index', '') for x in ckpt_fnames]\n ckpt_fnames.sort(key=lambda key: int(os.path.basename(key).split('-')[-1]))\n return ckpt_fnames", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def _private_keys(self) -> List[Tuple[str, str]]:\n\n directory = tedious.config.CONFIG[\"KEYS\"]['private-keys']\n if self._cached_private_keys is None:\n self._cached_private_keys = [(file, os.path.join(directory, file)) for file in os.listdir(directory) if\n os.path.isfile(os.path.join(directory, file))]\n if len(self._cached_private_keys) == 0:\n raise ValueError(\"'{}' does not contains any private keys.\".format(directory))\n return self._cached_private_keys", "def dict_keys(d):\n return list(d.keys())", "def keys(self):\n return sorted(self._local_unique_map.keys())", "def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()", "def read_all_file_names(path, extension, key=\"time\"):\n file_paths = glob.glob(path + '**/*' + extension, recursive=True)\n\n if key == 'time':\n return sorted(file_paths, key=time_key)\n \n elif key == 'natural':\n return sorted(file_paths, key=natural_key)", "def _get_keys(self, ckey):\n if self.has_key(ckey):\n doc = self[ckey]\n else:\n doc = [o for o in self.get_values(ckey)]\n if isinstance(doc, dict):\n for key in doc.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n else:\n yield ckey\n elif isinstance(doc, list):\n for item in doc:\n if isinstance(item, dict):\n for key in item.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n elif isinstance(item, list):\n for elem in item:\n if isinstance(elem, dict):\n for kkk in elem.keys():\n yield '%s.%s' % (ckey, kkk)\n else:\n yield ckey\n else: # basic type, so we reach the end\n yield ckey\n else: # basic type, so we reach the end\n yield ckey", "def keys(self):\n if self.dtype != 'array':\n raise TypeError('Property `keys` only exists for DataSet arrays')\n return [os.path.basename(p).split('.')[0] for p in\n s3.ls(self.s3_path, suffix=self.format.lower())]", "def rds_scan_keys(rds, glob):\n n = 0\n keys = []\n while(True):\n n, k = rds.scan(n, match=glob)\n keys.extend(k)\n if n == 0:\n break\n return keys", "def retrieve_keys(bucket, key, prefix='', postfix='', delim='/',\n directories=False, recursive=False):\n if key and prefix:\n assert key.endswith(delim)\n\n key += prefix\n # check whether key is a directory\n if not key.endswith(delim) and key:\n # check for matching prefix\n if BotoClient.check_prefix(bucket, key + delim, delim=delim):\n # found a directory\n key += delim\n\n listdelim = delim if not recursive else None\n results = bucket.list(prefix=key, delimiter=listdelim)\n if postfix:\n func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True)\n return filter(func, results)\n elif not directories:\n func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False)\n return filter(func, results)\n else:\n return results", "def list_keys(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def _candidate_dirs(self, key: CacheKey):\n return [os.path.join(d, str(key))\n for d in self.data_dirs]", "def iterkeys(d):\r\n return iter(getattr(d, _iterkeys)())", "def iterkeys(d):\n return iter(getattr(d, _iterkeys)())", "def keys(self, **kwargs) -> Iterable:\n return self.store.keys(**kwargs)", "def keys_breadth_first(self, include_dicts=False):\n namespaces = []\n for key in self._key_order:\n if isinstance(getattr(self, key), DotDict):\n namespaces.append(key)\n if include_dicts:\n yield key\n else:\n yield key\n for a_namespace in namespaces:\n for key in self[a_namespace].keys_breadth_first(include_dicts):\n yield '%s.%s' % (a_namespace, key)", "def keys(self):\n return self.keys", "def keys(self):\n\n objs = []\n with pd.get_store(self.store_path) as store:\n objs = store.keys()\n return objs", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def keys(self):\n sql = u\"\"\"\n SELECT `key` FROM `{table}` WHERE 1\n \"\"\".format(table=self.name)\n\n for row in self.conn.execute(sql):\n yield row['key']" ]
[ "0.701094", "0.67127836", "0.6352646", "0.6343178", "0.63064986", "0.6274958", "0.6232864", "0.6204033", "0.6202406", "0.6179034", "0.61675316", "0.6141725", "0.61349463", "0.6060003", "0.60563046", "0.603784", "0.6035911", "0.6027133", "0.59946287", "0.5978249", "0.59718513", "0.59582293", "0.5950911", "0.59452", "0.5938899", "0.5923173", "0.5921178", "0.5916422", "0.59062403", "0.5901566", "0.5875781", "0.58750683", "0.5871135", "0.58673733", "0.5864585", "0.5863625", "0.58628786", "0.58560467", "0.58490306", "0.58312017", "0.58285517", "0.58188987", "0.58142364", "0.58048517", "0.5796472", "0.5795541", "0.579446", "0.5794024", "0.5777232", "0.5771911", "0.5757776", "0.5755175", "0.57467127", "0.5742226", "0.57390535", "0.57325834", "0.57275957", "0.57272655", "0.57245857", "0.5708047", "0.57078326", "0.5705874", "0.5696453", "0.5695598", "0.569029", "0.5688929", "0.5683603", "0.56810254", "0.5676494", "0.5672311", "0.5672311", "0.5669106", "0.5668561", "0.5667529", "0.56665504", "0.56610686", "0.565859", "0.565859", "0.56365675", "0.563244", "0.5632372", "0.5625537", "0.562276", "0.5622695", "0.56215215", "0.5620619", "0.5616254", "0.5605353", "0.5603324", "0.5603324", "0.5600043", "0.55976236", "0.55970794", "0.55942804", "0.5589884", "0.5588306", "0.55882853", "0.5588004", "0.5580663", "0.5577122" ]
0.6699988
2
Convert the numpy array representing the GOL grid to a QImage.
def numpy_to_qimage(np_array: np.ndarray, show_age: bool): # Only support 2D array of bytes assert len(np_array.shape) == 2 and np_array.dtype == np.uint8 width = np_array.shape[1] height = np_array.shape[0] bytes_per_line = width image = QImage(np_array, width, height, bytes_per_line, QImage.Format_Indexed8) # Maps array values to color if show_age: image.setColorTable(colors.AGE_COLOR_TABLE) else: image.setColorTable(colors.BINARY_COLOR_TABLE) return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convertNumpy2Image(self, array):\n cv2image = cv2.cvtColor(array, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n return imgtk", "def make_image(self, save=False):\n\n # image_grid = np.full((self.size_x, self.size_y), '#888888', dtype=str)\n image_grid = np.full((self.size_x, self.size_y, 3), 0, dtype=np.uint8)\n\n # self.grid = np.flip(self.grid, 1)\n\n # self.grid = np.swapaxes(self.grid, 0, 0)\n \"\"\"\n image_grid[self.grid == 0] = 'FFFFFF'\n image_grid[self.grid == 1] = '000000'\n image_grid[self.grid == 2] = '00FF00'\n image_grid[self.grid == 3] = '0000FF'\n image_grid[self.grid == 4] = 'FFFF00'\n image_grid[self.grid == 5] = '00FFFF'\n image_grid[self.grid == 6] = 'FF00FF'\n \"\"\"\n image_grid[self.grid == 0] = (1, 1, 1)\n image_grid[self.grid == 1] = (0, 0, 0)\n image_grid[self.grid == 2] = (1, 0, 1)\n image_grid[self.grid == 3] = (0, 1, 0)\n image_grid[self.grid == 4] = (0, 0, 1)\n image_grid[self.grid == 5] = (0, 1, 1)\n image_grid[self.grid == 6] = (1, 1, 0)\n\n #for ant in self.ants:\n # image_grid[ant.x, ant.y] = (1, 0, 0)\n\n # image_grid = image_grid.swapaxes(0, 1)\n # self.grid = self.grid.swapaxes(0, 1)\n\n\n\n DPI = 100\n width, height = 1000, 1000\n fig = plt.figure(figsize=(width / DPI, height / DPI), dpi=DPI, facecolor='k')\n ax = fig.add_subplot()\n\n plt.axis('equal')\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n\n for y in range(self.size_x):\n for x in range(self.size_y):\n if self.grid[x, y] != 0:\n # Only plot a hexagon if its state is not zero.\n plot_hex(ax, x, y, image_grid[x, y])\n\n ax.set_xlim(0, self.size_x)\n ax.set_ylim(0, self.size_y)\n\n plt.show()\n\n logging.info(\"Finished Image Processing\")", "def rgb2qimage(rgb):\n if len(rgb.shape) != 3:\n raise ValueError(\"rgb2QImage can only convert 3D arrays\")\n if rgb.shape[2] not in (3, 4):\n raise ValueError(\n \"rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels\")\n\n h, w, channels = rgb.shape\n\n # Qt expects 32bit BGRA data for color images:\n bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')\n bgra[..., 0] = rgb[..., 2]\n bgra[..., 1] = rgb[..., 1]\n bgra[..., 2] = rgb[..., 0]\n if rgb.shape[2] == 3:\n bgra[..., 3].fill(255)\n fmt = QImage.Format_RGB32\n else:\n bgra[..., 3] = rgb[..., 3]\n fmt = QImage.Format_ARGB32\n\n result = QImage(bgra.data, w, h, fmt)\n result.ndarray = bgra\n return result", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def gray2qimage(gray):\n if len(gray.shape) != 2:\n raise ValueError(\"gray2QImage can only convert 2D arrays\")\n\n gray = numpy.require(gray, numpy.uint8, 'C')\n\n h, w = gray.shape\n\n result = QImage(gray.data, w, h, QImage.Format_Indexed8)\n result.ndarray = gray\n for i in range(256):\n result.setColor(i, QColor(i, i, i).rgb())\n return result", "def _prepare_image(self, grid):\n grid = np.array(grid, dtype=np.uint8)\n\n width = int(grid.shape[1] * self.scale_percent)\n height = int(grid.shape[0] * self.scale_percent)\n grid = cv2.resize(grid, (width, height), interpolation=cv2.INTER_AREA)\n return grid", "def render_image(grid,window):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n#top row:\r\n for j in range(Y):\r\n for sub_j in range(3): #3 rows \r\n ROW = []\r\n for i in range(X):\r\n ROW += grid[j][i].arr[sub_j]\r\n \r\n for k in range(len(ROW)):\r\n COLOR = (ROW[k],ROW[k],ROW[k])\r\n Y_pos = (3*j + sub_j)*pixel_size*scale\r\n X_pos = k*(pixel_size)*scale\r\n width = height = pixel_size*scale\r\n pygame.draw.rect(window,COLOR,(X_pos,Y_pos,width,height))\r\n \r\n# print(ROW)\r\n return", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def test_grdimage(grid):\n fig = Figure()\n fig.grdimage(grid, cmap=\"earth\", projection=\"W0/6i\")\n return fig", "def arr2img(ar):\n return Image.fromstring('L', (ar.shape[1], ar.shape[0]), ar.astype('b').tostring())", "def visualize_AQ(self):\n M = np.matrix(self.data[0])\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_aspect('equal')\n plt.imshow(M, interpolation='nearest', cmap=plt.cm.YlOrRd)\n plt.colorbar()\n plt.show()", "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top')\n ax.set_title('Gauss-Legendre Quadrature Grid')\n ax.set_xlabel('longitude index')\n ax.set_ylabel('latitude index')\n fig.tight_layout(pad=0.5)\n return fig,ax", "def showAssembled(self):\n im = np.zeros(self.puzzleImage.shape);\n r,c,d = self.puzzleImage.shape;\n r = r/len(self.puzzlePieces); # assume square matrix\n c = c/len(self.puzzlePieces);\n \n for i in range (len(self.puzzlePieces)):\n for j in range (len(self.puzzlePieces)):\n im[i*r:(i+1)*r, j*c:(j+1)*c] = self.puzzlePieces[i,j];\n \n plt.imshow(im);\n plt.show();", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def display_image ( X ):\r\n\t# on teste que le tableau contient bien 256 valeurs\r\n\tif X.size != 256:\r\n\t\traise ValueError ( \"Les images doivent etre de 16x16 pixels\" )\r\n\r\n\t# on cree une image pour imshow: chaque pixel est un tableau a 3 valeurs\r\n\t# (1 pour chaque canal R,G,B). Ces valeurs sont entre 0 et 1\r\n\tY = X / X.max ()\r\n\timg = np.zeros ( ( Y.size, 3 ) )\r\n\tfor i in range ( 3 ):\r\n\t\timg[:,i] = X\r\n\r\n\t# on indique que toutes les images sont de 16x16 pixels\r\n\timg.shape = (16,16,3)\r\n\r\n\t# affichage de l'image\r\n\tplt.imshow( img )\r\n\tplt.show ()", "def get_plain_image_as_widget(self):\n arr = self.getwin_array(order=self.rgb_order)\n\n # convert numpy array to native image widget\n image_w = self._get_wimage(arr)\n return image_w", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def display(array):\n if isinstance(array, np.ndarray):\n plt.imshow(array)\n plt.show()\n else:\n raise TypeError(\"display() needs a numpy ndarray as parameter, \"\n f\"got {type(array)}\")", "def to_qt_pixmap(self, scale=None):\n bytes_per_line = 3 * self.width\n img = self.to_color().img\n rgb = opencv.cvtColor(img, opencv.COLOR_BGR2RGB)\n q_img = QImage(rgb.data, self.width, self.height, bytes_per_line, QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(q_img)\n\n if scale is not None:\n pixmap = pixmap.scaled(scale, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)\n\n return pixmap", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def display(self):\n rows = [(self.views[0].display, len(self.views))]\n fig, axes = plt.subplots(1, len(self.views),\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, view in zip(axes.ravel(), self.views):\n ax.imshow(view.display)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=view.position.id)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def __translate(self, img):\n if not isinstance(img, Image):\n raise InvalidImageTypeException(\"display_images only accepts objects of type Image\")\n\n w = img.width()\n h = img.height()\n tkimg = Tkinter.PhotoImage(width=w, height=h)\n for x in range(w):\n for y in range(h):\n tkimg.put('#%02x%02x%02x' % img.get_rgb(x, y), (x, y))\n return tkimg", "def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image", "def display_image(np_rgb, text=None, scale_up=False):\n if scale_up:\n np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)\n np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)\n\n img_r, img_c, img_ch = np_rgb.shape\n if text is not None:\n np_t = np_text(text)\n t_r, t_c, _ = np_t.shape\n t_i_c = max(t_c, img_c)\n t_i_r = t_r + img_r\n t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)\n t_i.fill(255)\n t_i[0:t_r, 0:t_c] = np_t\n t_i[t_r:t_r + img_r, 0:img_c] = np_rgb\n np_rgb = t_i\n\n pil_img = util.np_to_pil(np_rgb)\n pil_img.show()", "def convert_image_to_QTformat(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n convertToQtFormat = QImage(image.data, image.shape[1], image.shape[0], QImage.Format_RGB888)\n qt_image = convertToQtFormat.scaled(500, 375, Qt.KeepAspectRatio)\n return qt_image", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def pix2pix_plot(self):\n if not self.HAS_PYQT5:\n return\n data = self.p2p_data\n if data is None or len(data) == 0:\n return\n\n # start up plot viewer if needed\n if self.plotviewer is None or not self.plotviewer.isVisible():\n self.plotviewer = MatplotlibPlot()\n\n self.plotviewer.setWindowTitle('Pixel-to-Pixel Comparison')\n self.plotviewer.plot_layout = 'rows'\n self.plotviewer.share_axes = self.plot_parameters['share_axes']\n self.plotviewer.plot(data)\n self.plotviewer.set_scroll('bottom')\n self.plotviewer.show()\n self.plotviewer.raise_()", "def _show_rgb(self):\n R, G, B = self._rgb_frames()\n image = numpy.dstack((R, G, B))\n imageItem = self.parent.image.getImageItem()\n imageItem.updateImage(image)", "def render(self):\n np_img = np.array(self.prev_img, dtype=np.uint8)\n np_img = np.swapaxes(np_img, 0, 2)\n return np_img", "def fig2img(figure):\n # put the figure pixmap into a numpy array\n buf = fig2data(figure)\n w, h, d = buf.shape\n import Image\n return Image.fromstring(\"RGBA\", (w, h), buf.tostring())", "def update_canvas_display_from_numpy_array(self, image_data):\n\n if len(self.drop_bands) > 0:\n zeros_image = numpy.zeros_like(image_data[:, :, 0])\n for drop_band in self.drop_bands:\n image_data[:, :, drop_band] = zeros_image\n self.canvas_decimated_image = image_data\n if self.scale_to_fit_canvas:\n scale_factor = self.compute_display_scale_factor(image_data)\n self.display_rescaling_factor = scale_factor\n self.display_image = self.get_scaled_display_data(image_data)\n else:\n self.display_image = image_data", "def get_qpimage_raw(self, idx=0):\n # Load experimental data\n with h5py.File(self.path) as h5:\n holo = h5[\"0\"][:]\n qpi = qpimage.QPImage(data=holo,\n which_data=\"raw-oah\",\n meta_data=self.get_metadata(idx),\n qpretrieve_kw=self.qpretrieve_kw,\n h5dtype=self.as_type)\n return qpi", "def newimagefromarray(self, *args, **kwargs):\n return _image.image_newimagefromarray(self, *args, **kwargs)", "def numpy_to_pixmap(img, width, height):\n img_resized = cv2.resize(img, (width, height), cv2.INTER_AREA)\n img_rgb = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)\n qt_img = QImage(img_rgb.data, img_rgb.shape[1], img_rgb.shape[0], QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(qt_img)\n return pixmap", "def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()", "def _show_numpy(tensor: ndarray, zoom: float = 1.) -> None:\n from PIL import Image\n shape = tuple(map(lambda s: round(s * zoom), tensor.shape))\n Image.fromarray(tensor).resize((shape[1], shape[0])).show()", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, ) #Qt.KeepAspectRatio\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, ) #Qt.KeepAspectRatio\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, ) #Qt.KeepAspectRatio\n return QPixmap.fromImage(p)", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def __generate_image(self):\n\t\tself.img = np.ones((self.size*self.width+self.border,self.size*self.width+self.border,1), np.uint8)*255\n\t\tfor i in range(len(self.matrix)):\n\t\t\tfor j in range(len(self.matrix)):\n\t\t\t\tif self.matrix[j][i] == 1:\n\t\t\t\t\tself.img = cv2.rectangle(self.img,(i*self.width+int(self.border/2),j*self.width+int(self.border/2))\n\t\t\t\t\t\t,(i*self.width+self.width+int(self.border/2),j*self.width+self.width+int(self.border/2)),(0,0,0),-1)\n\t\tif '.' in self.name:\n\t\t\tcv2.imwrite(self.name,self.img)\n\t\telse:\n\t\t\tcv2.imwrite(self.name+'.jpg',self.img)\n\t\tcv2.imshow(\"Image\",self.img)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()", "def fromarray(self, *args, **kwargs):\n return _image.image_fromarray(self, *args, **kwargs)", "def get_placeholder_image(self):\n\n grid = Gtk.Grid(name=\"output-placeholder_grid\", halign=3, valign=3)\n label = Gtk.Label(name=\"output-placeholder_label\",\n label=\"<b> Query to show outputs </b>\",\n use_markup=True)\n image = Gtk.Image.new_from_icon_name(\"help-about\", 6)\n image.set_name(\"output-placeholder_image\")\n grid.attach(image, 0, 0, 1, 1)\n grid.attach(label, 0, 1, 1, 1)\n return grid", "def fig2img ( fig ):\n # put the figure pixmap into a numpy array\n buf = fig2data ( fig )\n w, h, d = buf.shape\n return Image.frombytes( \"RGBA\", ( w ,h ), buf.tostring( ) )", "def fig2img ( fig ):\n # put the figure pixmap into a numpy array\n buf = fig2data ( fig )\n w, h, d = buf.shape\n return Image.frombytes( \"RGBA\", ( w ,h ), buf.tostring( ) )", "def visualize(grid, board_size=16):\n visual_grid = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append(grid[(j, i)])\n visual_grid.append(row)\n print(visual_grid)", "def draw_matrix(self, destination, version, band):\n if version == \"band\":\n matrix = self.bands[band]\n elif version == \"gauss\":\n matrix = self.gauss_bands[band]\n elif version == \"sharp\":\n matrix = self.sharp_bands[band]\n else:\n matrix = self.ndvi\n plt.imshow(matrix)\n plt.colorbar()\n plt.savefig(destination)", "def to_image(fig):\n fig.tight_layout(pad=1)\n fig.canvas.draw()\n image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n image_from_plot = image_from_plot.reshape(\n fig.canvas.get_width_height()[::-1] + (3,))\n return image_from_plot", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(\n rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n display_width = 960\n display_height = 540\n p = convert_to_Qt_format.scaled(\n display_width, display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def img_from_array(array):\n return Image.fromarray(array)", "def array2img(array):\n if len(array.shape) == 2:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='L')\n elif len(array.shape) == 3:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='RGB')\n else:\n print('Income array is not at appropriate shape!')", "def view(self):\n plt.imshow(self.texture_array, vmin = 0, vmax = 255)\n if self.texture_array.ndim == 2:\n plt.set_cmap('gray')\n \n plt.title(self.texture_name)\n plt.show()", "def toImage(cmObject):\n size = self.gridsize, self.gridsize\n cm = cmObject()\n master = []\n for item in cm:\n master.extend(item)\n\n #Apply default Mandelbrot Set Function\n master = map(f, master)\n\n col1 = (0,0,102,0)\n col2 = (255,204,51,0)\n\n def select_color(x):\n if x == 1: return col1\n else: return col2\n\n master = map(select_color, master)\n \n image = Image.new(\"RGBA\", size, (0,0,0,0))\n image.putdata(master)\n return image", "def make_image(self, mode=\"L\") -> Image:\r\n return Image.fromarray(self.fb, mode=\"L\")", "def show(self, exec_rasterize = False):\n\n if (exec_rasterize):\n self.rasterize()\n\n Image.fromarray(self._image).show()", "def convert_cv_qt(self, cv_img):\r\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\r\n h, w, ch = rgb_image.shape\r\n bytes_per_line = ch * w\r\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\r\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)\r\n return QPixmap.fromImage(p)", "def imshow(self):\n axes([0, 0, 1, 1], xticks=[], yticks=[])\n imshow(self.rgb_image())", "def convertQtDepthFrame(self):\n try:\n img = QImage(self.DepthFrameRGB,\n self.DepthFrameRGB.shape[1],\n self.DepthFrameRGB.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def visualize_q(self, grid_size, show_max_qs=False):\n q_values = None\n if show_max_qs:\n # Maximum over actions\n q_values = self.q.max(axis=1)\n else:\n # Mean over actions\n q_values = self.q.mean(axis=1)\n # Reshape to match with the grid we have\n q_values = q_values.reshape(grid_size)\n pyplot.imshow(q_values)\n pyplot.show()", "def fig2img(fig):\n # put the figure pixmap into a numpy array\n buf = fig2data(fig)\n w, h, d = buf.shape\n return Image.fromstring(\"RGBA\", (w, h), buf.tostring())", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.display_width, self.display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(self.display_width, self.display_height, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def render_q(self, model):\r\n def clip(n):\r\n return max(min(1, n), 0)\r\n\r\n if self.__render == Render.TRAINING:\r\n nrows, ncols = self.arrayMap.shape\r\n\r\n self.__ax2.clear()\r\n self.__ax2.set_xticks(np.arange(0.5, nrows, step=1))\r\n self.__ax2.set_xticklabels([])\r\n self.__ax2.set_yticks(np.arange(0.5, ncols, step=1))\r\n self.__ax2.set_yticklabels([])\r\n self.__ax2.grid(True)\r\n self.__ax2.plot(*self.storageCell, \"gs\", markersize=30) # exit is a big green square\r\n self.__ax2.text(*self.storageCell, \"Stor\", ha=\"center\", va=\"center\", color=\"white\")\r\n\r\n for cell in self.allowableCells:\r\n q = model.q(cell) if model is not None else [0, 0, 0, 0]\r\n a = np.nonzero(q == np.max(q))[0]\r\n\r\n for action in a:\r\n dx = 0\r\n dy = 0\r\n if action == Action.MOVE_LEFT:\r\n dx = -0.2\r\n if action == Action.MOVE_RIGHT:\r\n dx = +0.2\r\n if action == Action.MOVE_UP:\r\n dy = -0.2\r\n if action == Action.MOVE_DOWN:\r\n dy = 0.2\r\n\r\n # color (red to green) represents the certainty\r\n color = clip((q[action] - -1)/(1 - -1))\r\n\r\n self.__ax2.arrow(*cell, dx, dy, color=(1 - color, color, 0), head_width=0.2, head_length=0.1)\r\n\r\n self.__ax2.imshow(self.arrayMap, cmap=\"Reds\")\r\n self.__ax2.get_figure().canvas.draw()", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def display(self):\n fig, axes = plt.subplots(1, len(self.views),\n figsize=self._figsize(\n [(self.views[0].image, len(self.views))]),\n squeeze=True)\n for ax, view in zip(axes.ravel(), self.views):\n ax.imshow(view.grey)\n points = self._common_keypoints(view).reshape(-1, 2)[::-1]\n ax.plot(points[..., 0], points[..., 1], 'r+')\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=view.position.id)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def cvimg2qpixmap(cvimg: np.ndarray):\n height, width, channel = cvimg.shape\n bytesPerLine = 3 * width\n qImg = QImage(cvimg.data, width, height, bytesPerLine, QImage.Format_RGB888)\n return QPixmap(qImg)", "def display(self):\n nrow = 1\n ncol = len(self.views) + 1\n rows = [(self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, (title, img) in zip(axes.ravel(),\n [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def get_image(self,v):\n\n # kludge to get the image to have no border\n fig=pylab.figure(figsize=(self.hsize,self.hsize*float(v.shape[0])/v.shape[1]))\n\n ax=fig.add_axes([0,0,1,1])\n v = np.flipud(v)\n #self.colormap.set_under(\"r\", alpha = 0.0)\n zeros = v<1.0\n v[zeros]=np.NaN\n self.colormap.set_bad(alpha=0.0)\n\n pylab.imshow(v)\n pylab.axis('off')\n self.process_image()\n\n # create a string buffer to save the file\n im=io.StringIO()\n\n pylab.savefig(im,format='png',transparent=True,dpi = 1200)\n\n # return the buffer\n return im.getvalue()", "def takePicture(self):\n if not PICAM_ENABLED:\n return\n\n try:\n #self.camera.capture(self.imgFile, 'jpeg')\n #self.cnvImg.createImg(self.imgFile)\n\t\t\n self.rgbArray = picamera.array.PiRGBArray(self.camera)\n self.camera.capture(self.rgbArray, 'rgb')\n \n self.gray = rgb2gray(self.rgbArray.array)\n plt.imshow(self.gray, cmap = plt.get_cmap('gray'))\n plt.show()\n print(self.gray.shape, self.gray.ndim)\n print(self.gray[0][2]) \n self.img = Image.fromarray(self.rgbArray.array)\n self.img.save(\"./array.png\")\n self.cnvImg.displayImg(self.img)\n except:\n print(\"Take picture error\")", "def plot(self, windowSize='800x600'):\n if not hasattr(self, 'compiled'):\n raise RuntimeError('The object has not compiled yet')\n # create a scrollable window\n _, fm, run = simple_scrollable_window(windowSize)\n count = 0\n img_ref = []\n for key, val in {**self.qubitDict, **self.readoutDict}.items():\n Label(\n fm, text=key + f':{val}', font='Consolas',\n relief='solid', borderwidth=1\n ).grid(row=count, column=0, ipadx=5, ipady=5, sticky='news')\n img_data = self.compiled[val].plot(\n allInOne=False, toByteStream=True, showSizeInfo=False,\n size=[20, 4]\n )\n render = ImageTk.PhotoImage(Image.open(img_data))\n img_ref += [render]\n img = Label(fm, image=render, borderwidth=1, relief='solid')\n img.grid(row=count, column=1, ipadx=5, ipady=5, sticky='news')\n img.image = render\n count += 1\n run()", "def cv_image_to_qt_image(cv_img):\n height, width, channels = cv_img.shape\n\n ##return QtGui.QImage(\n ##cv_img.data, width, height, QtGui.QImage.Format_ARGB32\n ##)\n \n qt_img = QtGui.QImage(width, height, QtGui.QImage.Format_RGB888)\n\n for i, line in enumerate(cv_img):\n for j, pix in enumerate(line):\n qt_img.setPixel(j, i, QtGui.qRgb(pix[2], pix[1], pix[0]))\n\n return qt_img", "def image(self, data):\n # Assemble colormap from given parameters\n cmap = self.config.get('cmap', None)\n\n # If a single color provided, prepend 'white' color, so that a resulting tuple defines binary colormap\n if is_color_like(cmap):\n cmap = (cmap, )\n # If a tuple of colors provided in `cmap` argument convert it into a colormap\n if isinstance(cmap, tuple):\n cmap = make_cmap(colors=cmap)\n else:\n cmap = copy(plt.get_cmap(cmap))\n # Set a color for nan/masked values display to colormap if provided\n mask_color = self.config.get('mask_color', None)\n cmap.set_bad(color=mask_color)\n\n image_keys = ['alpha', 'vmin', 'vmax', 'extent']\n image_config = self.config.filter(keys=image_keys, prefix='image_')\n\n vmin, vmax = self._parse_vrange(data)\n image_config['vmin'] = vmin\n image_config['vmax'] = vmax\n\n image = self.ax.imshow(data, cmap=cmap, **image_config)\n\n return [image]", "def convert_cv_qt(self, cv_img):\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(225, 220) # , Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)", "def _visualize_input(self, input):\n self.writer.add_image('input', make_grid(input[0, 0, :, :], nrow=8, normalize=True))", "def gridPlot48(img_stack):\r\n F = plt.figure(figsize = (50,50))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (6,8), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:48]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot48.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return", "def getimage(self):", "def __str__(self):\n grid_str = \"[\"\n for row in range(self._height):\n grid_str += \" \" + str(self._grid[row]) + \"\\n\"\n grid_str = grid_str[0:1] + grid_str[2:]\n grid_str = grid_str[:-1]\n grid_str += \"]\"\n return grid_str", "def turn_into_image(self):\n if self.title_colours:\n self.table_to_image = TableToImage(self.table.get_string(), self.colour, self.rows, self.titles,\n self.title_colours)\n else:\n self.table_to_image = TableToImage(self.table.get_string(), self.colour, self.rows)", "def plot_pixel_array(arr, figsize=(10, 10)):\n arr = arr.squeeze()\n plt.figure(figsize=figsize)\n plt.imshow(arr, cmap=plt.cm.bone)\n plt.show()", "def create_image(self):\n\n self._image = 255 * np.ones((self._height, self._width, 3), np.uint8)", "def convert_cv_qt(self, cv_img):\n\t\trgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n\t\th, w, ch = rgb_image.shape\n\t\tbytes_per_line = ch * w\n\t\tconvert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n\n\t\tif w > 1000:\n\t\t\trat = w / 1000\n\t\t\tw = 1000\n\t\t\th = h / rat\n\n\t\tp = convert_to_Qt_format.scaled(int(w), int(h), Qt.KeepAspectRatio)\n\t\treturn QPixmap.fromImage(p)", "def convertToQPixelmap(self, imgToConvert):\n \n # Conversion en image QImage\n if ( len(imgToConvert.shape) == 3 ):\n img_qimg = QtGui.QImage(imgToConvert.data, \n imgToConvert.shape[1], \n imgToConvert.shape[0],\n imgToConvert.strides[0],\n QtGui.QImage.Format_RGB888)\n else:\n\t\t\timg_qimg = QtGui.QImage(imgToConvert.data, \n imgToConvert.shape[1], \n imgToConvert.shape[0],\n imgToConvert.strides[0],\n QtGui.QImage.Format_Indexed8)\n\t\t\t\n \n # Conversion en image QPixmap pour l'afficher\n return QtGui.QPixmap.fromImage(img_qimg)", "def __init__(self, array: ReadCache, y_scale: float, y_spacing: float, chan_map: ChannelMap,\n nav_trace: np.ndarray, x_scale: Union[float, np.ndarray]=1.0, load_channels: Union[list, str]='all',\n max_zoom: float=120.0, units: str='V'):\n\n nchan = array.shape[0]\n if isinstance(load_channels, str) and load_channels.lower() == 'all':\n load_channels = range(nchan)\n self.chan_map = chan_map\n elif len(load_channels) == len(chan_map):\n self.chan_map = chan_map\n elif len(load_channels) < len(chan_map):\n # \"load_channels\" indexes an array of recording channels\n # that may include grounded channels or other junk.\n raise NotImplementedError('cannot yet subset data channels')\n # new_cm = ChannelMap( [chan_map[i] for i in load_channels],\n # chan_map.geometry,\n # col_major=chan_map.col_major )\n # self.chan_map = new_cm\n else:\n raise ValueError('cannot map the listed channels')\n\n self.array = array\n self.y_scale = y_scale\n if isinstance(load_channels, str) and load_channels == 'all':\n load_channels = list(range(len(array)))\n self.load_channels = load_channels\n\n # The main window + layout\n self.win = pg.GraphicsLayoutWidget(border=(10, 10, 10))\n layout = self.win.ci\n # Adding columns to layout: just titles on the top row\n layout.addLabel('Array heatmap')\n layout.addLabel('Zoomed plot')\n layout.addLabel('|')\n # Next row has 1) the heatmap image with the colorbar widget\n layout.nextRow()\n sub_layout = layout.addLayout(colspan=1)\n self.img = pg.ImageItem(image=np.random.randn(*self.chan_map.geometry) * y_spacing) # * 1e6 / 2)\n cmap = pg.colormap.get('coolwarm', source='matplotlib')\n p_img = sub_layout.addPlot()\n self.p_img = p_img\n p_img.getViewBox().setAspectLocked()\n p_img.addItem(self.img)\n p_img.hideAxis('bottom')\n p_img.hideAxis('left')\n mid_x, top_y = self.chan_map.geometry[1] / 2.0, self.chan_map.geometry[0] + 2.0\n\n # add a text label on top of the box (\"anchor\" has text box is centered on its x, y position)\n self.frame_text = pg.TextItem('empty', anchor=(0.5, 0.5), color=(255, 255, 255))\n self.frame_text.setPos(mid_x, top_y)\n # self.vb_img.addItem(self.frame_text)\n p_img.getViewBox().addItem(self.frame_text)\n p_img.getViewBox().autoRange()\n\n # colorbar\n self.cb = pg.ColorBarItem(limits=None, colorMap=cmap, hoverBrush='#EEEEFF80',\n rounding=10e-6, values=(-y_spacing, y_spacing))\n self.cb.getAxis('left').setLabel('')\n self.cb.getAxis('right').setLabel('Voltage', units='V')\n self.cb.setImageItem(self.img)\n sub_layout.addItem(self.cb)\n\n # 2) the stacked traces plot (colspan 2)\n axis = PlainSecAxis(orientation='bottom')\n self.p1 = layout.addPlot(colspan=2, row=1, col=1,\n axisItems={'bottom':axis})\n\n self.p1.enableAutoRange(axis='y', enable=True)\n self.p1.setAutoVisible(y=False)\n self.p1.setLabel('left', 'Amplitude', units=units)\n self.p1.setLabel('bottom', 'Time', units='s')\n\n # Next layout row has the navigator plot (colspan 3)\n layout.nextRow()\n\n # The navigator plot\n axis = HMSAxis(orientation='bottom')\n self.p2 = layout.addPlot(row=2, col=0, colspan=3,\n axisItems={'bottom':axis})\n self.p2.setLabel('left', 'Amplitude', units=units)\n self.p2.setLabel('bottom', 'Time')\n self.region = pg.LinearRegionItem() \n self.region.setZValue(10)\n\n # Add the LinearRegionItem to the ViewBox,\n # but tell the ViewBox to exclude this \n # item when doing auto-range calculations.\n self.p2.addItem(self.region, ignoreBounds=True)\n\n # Multiple curve set that calls up data on-demand\n self.curve_manager = CurveManager(plot=self.p1)\n curves = PlotCurveCollection(array, load_channels, x_scale, y_scale, y_spacing, False)\n x_scale = curves.dx\n # curves.setPen('w', width=1)\n self.curve_manager.add_new_curves(curves, 'all', set_source=True)\n # Set the heatmap to track these curves\n self.curve_manager.heatmap_name = 'all'\n\n initial_pts = 5000\n self.region.setRegion([0, initial_pts * x_scale])\n\n self.p1.setAutoVisible(y=True)\n self.p1.setXRange(0, initial_pts * x_scale)\n self.p1.vb.setLimits(maxXRange=max_zoom)\n\n\n # Selected curve & label set that calls up data on-demand\n labels = ['({}, {})'.format(i, j) for i, j in zip(*chan_map.to_mat())]\n selected_curves = LabeledCurveCollection(curves, labels, clickable=True)\n self.curve_manager.add_new_curves(selected_curves, 'selected')\n for text in selected_curves.texts:\n self.p1.addItem(text)\n\n # Add mean trace to bottom plot\n self.nav_trace = pg.PlotCurveItem(x=np.arange(len(nav_trace)) * x_scale, y=nav_trace)\n self.p2.addItem(self.nav_trace)\n self.p2.setXRange(0, min(5e4, len(nav_trace))*x_scale)\n self.p2.setYRange(*np.percentile(nav_trace, [1, 99]))\n \n # Set bidirectional plot interaction\n # need to hang onto references?\n self._db_cnx1 = DebounceCallback.connect(self.region.sigRegionChanged, self.update_zoom_callback)\n self._db_cnx2 = DebounceCallback.connect(self.p1.sigXRangeChanged, self.update_region_callback)\n\n # Do navigation jumps (if shift key is down)\n self.p2.scene().sigMouseClicked.connect(self.jump_nav)\n\n # Do fine interaction in zoomed plot with vertical line\n self.vline = pg.InfiniteLine(angle=90, movable=False)\n self.p1.addItem(self.vline)\n self.p1.scene().sigMouseMoved.connect(self.fine_nav)\n \n # final adjustments to rows: args are (row, stretch)\n # TODO: deprecation warning here -- do not understand why\n self.win.centralWidget.layout.setRowStretchFactor(0, 0.5)\n self.win.centralWidget.layout.setRowStretchFactor(1, 5)\n self.win.centralWidget.layout.setRowStretchFactor(2, 2.5)\n\n # a callable frame filter may be set on this object to affect frame display\n self.frame_filter = None\n\n # set up initial frame\n self.set_mean_image()", "def Convert_CV_QT(self, cv_img):\n\t\trgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n\t\th, w, ch = rgb_image.shape\n\t\tbytes_per_line = ch * w\n\t\tconvert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n\t\tp = convert_to_Qt_format.scaled(self.camera.size().width(), self.camera.size().height(), Qt.IgnoreAspectRatio)\n\t\t\n\t\treturn QPixmap.fromImage(p)", "def show_tiff_image_data(bgrn_image):\n\ttif_rgb = get_rgb(bgrn_image, [2, 1, 0]) # RGB\n\t# rescaling to 0-255 range - uint8 for display\n\trescaleIMG = np.reshape(tif_rgb, (-1, 1))\n\tscaler = MinMaxScaler(feature_range=(0, 255))\n\trescaleIMG = scaler.fit_transform(rescaleIMG)\n\timg_scaled = (np.reshape(rescaleIMG, tif_rgb.shape)).astype(np.uint8)\n\tnew_style = {'grid': False}\n\tplt.imshow(img_scaled)\n\tplt.title('RGB')\n\tplt.colorbar()", "def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/WGAN.png')\n plt.show()", "def generate_array_image(R, G, B, height, width):\n R = R.reshape((height, width))\n G = G.reshape((height, width))\n B = B.reshape((height, width))\n \n return np.moveaxis(np.array([R, G, B]), 0, -1)", "def convImg2gMatrix(self):\n self.file = self.file.convert(\"L\")\n matrix = numpy.asarray(self.file)\n # convert image to gray scale image\n # in the array, 0 represents black\n # 255 represents white\n # array[a,b] => a represents the line, b represents the columns\n # array[0,0] is the pixel in the top-left hand corner\n return matrix" ]
[ "0.6397902", "0.6196134", "0.60192525", "0.59758794", "0.5931983", "0.5765702", "0.5721111", "0.5687924", "0.56588805", "0.5628664", "0.55947673", "0.55882084", "0.5583067", "0.55810326", "0.55731523", "0.5548744", "0.5506583", "0.54962564", "0.54832053", "0.54686344", "0.5464198", "0.544716", "0.54423577", "0.54351217", "0.5428951", "0.5421566", "0.5421566", "0.5419445", "0.54120886", "0.540037", "0.5399891", "0.53849775", "0.5373316", "0.53695905", "0.53686154", "0.5365701", "0.5354202", "0.53477514", "0.5343686", "0.5342156", "0.53407985", "0.5336528", "0.5329964", "0.5328103", "0.5328103", "0.5328103", "0.53236705", "0.5319147", "0.53047484", "0.5303161", "0.52996635", "0.52996635", "0.52991045", "0.5291345", "0.52843463", "0.52804583", "0.5279245", "0.5274142", "0.5273933", "0.5267852", "0.52602994", "0.524737", "0.52302814", "0.5225247", "0.5223355", "0.52182126", "0.52174145", "0.5216411", "0.5205273", "0.520515", "0.5199398", "0.51987064", "0.51987064", "0.51919353", "0.5183192", "0.51715404", "0.51671195", "0.5166199", "0.5164979", "0.516172", "0.5153962", "0.5143866", "0.5140298", "0.5136309", "0.51283383", "0.5110597", "0.5104652", "0.5099281", "0.5097982", "0.50960064", "0.50923705", "0.50825757", "0.5081052", "0.5076887", "0.5072027", "0.507119", "0.5053706", "0.5052318", "0.50463563", "0.5033774" ]
0.68053734
0
my_map list of lists specifying obstacle positions path [(x1, y1), (x2, y2), ...] list of the steps take by the agent goals [(x1, y1), (x2, y2), ...] list of goal locations
def __init__(self, my_map, paths, starts, goals, agent_goals, predictions): self.my_map = my_map self.paths = paths self.starts = starts self.goals = goals self.agent_goals = agent_goals self.num_of_agents = len(starts) self.predictions = predictions self.CPU_time = 0 # compute heuristics for the low-level search self.heuristics = [] for goal in self.goals: self.heuristics.append(compute_heuristics(my_map, goal))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_goal(self):\r\n\t\t# Creates a flat list of correct values\r\n\t\ttempList = [x for x in range(self.n**2)]\r\n\r\n\t\t# Nests those lists into a NxN\r\n\t\tBoardClass.goal = [tempList[self.n*i:self.n*(i+1)] for i in range(self.n)]\r\n\r\n\t\t# Creates a dictionary for the intended location of any specific tile. Used in\r\n\t\t# Manhatten Distance calculation.\r\n\t\tfor i in range(self.n**2):\r\n\t\t\trow = i // self.n\r\n\t\t\tcol = i % self.n\r\n\t\t\tBoardClass.goalTileLocations[i] = [row, col]", "def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)", "def a_star(my_map, start_locs, goal_locs, h_values, agent, constraints):\n\n ##############################\n # Task 1.1: Extend the A* search to search in the space-time domain\n # rather than space domain, only.\n # Build constraint table if there are constraints\n\n constraint_table = build_constraint_table(constraints, agent)\n\n open_list = []\n closed_list = dict()\n nodes_opened = 0\n max_opened = 500\n start_loc = start_locs[0]\n goal_loc = goal_locs[0]\n if len(start_locs) > 1: # If there is more than 1 start location then this is a multi-cell agent\n multi = True\n else:\n multi = False\n\n # determine when the last constraint is on the goal node (or any of the goal node cells in the case of multi-cell)\n earliest_goal_timestep = 0\n if len(constraint_table) != 0:\n for time in [item for item in sorted(list(constraint_table.keys()), reverse=True)]:\n flat_list = [item for sublist in constraint_table[time] for item in sublist]\n if(goal_locs[0] in flat_list):\n earliest_goal_timestep = time\n break\n elif(multi): # if multi cell check if any of the agents goal cells are constrained \n if(goal_locs[1] in flat_list): \n earliest_goal_timestep = time\n break\n\n h_value = h_values[start_loc]\n goal_orientation = orientation(goal_locs)\n\n root = {'loc': start_loc,'orientation': orientation(start_locs), 'g_val': 0, 'h_val': h_value, 'time': 0, 'parent': None}\n push_node(open_list, root)\n closed_list[(root['loc'], root['time'], root['orientation'])] = root\n\n while len(open_list ) > 0 and nodes_opened < max_opened:\n curr = pop_node(open_list)\n nodes_opened = nodes_opened + 1\n \n if curr['loc'] == goal_loc and curr['orientation'] == goal_orientation and curr['time'] >= earliest_goal_timestep:\n return get_path(curr)\n ############################\n child_orient = curr['orientation']\n for dir in range(7):\n if dir < 5:\n child_loc = move(curr['loc'], dir)\n elif not multi: \n continue\n\n if dir == 5:\n # clockwise rotation \n child_orient = curr['orientation'] - 1\n if child_orient < 1:\n child_orient = 4\n if dir == 6:\n # counter-clockwise rotation \n child_orient = curr['orientation'] + 1\n if child_orient > 4:\n child_orient = 1\n \n if test_map(my_map, child_loc[0], child_loc[1], child_orient, dir):\n continue\n \n # check if the head location is constrained \n if is_constrained(curr['loc'], child_loc, child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if this is a multi cell agent check if the tail is constrained \n if multi:\n # check the next tail location \n row_t, col_t, _, _ = find_tail_positions(curr['loc'][0], curr['loc'][1], curr['orientation'], dir)\n next_row_t, next_col_t, next_row_t_inter, next_col_t_inter = find_tail_positions(child_loc[0], child_loc[1], child_orient, dir)\n\n if is_constrained((row_t,col_t), (next_row_t, next_col_t), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if the agent is rotating check if the intermediate location is constrained\n if dir == 5 or dir == 6: \n if is_constrained((row_t,col_t), (next_row_t_inter, next_col_t_inter), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n child = {'loc': child_loc,\n 'orientation': child_orient,\n 'g_val': curr['g_val'] + 1,\n 'h_val': h_values[child_loc] + orient_cost(child_orient, goal_orientation),\n 'time': curr['time'] + 1,\n 'parent': curr}\n\n if (child['loc'], child['time'], child['orientation']) in closed_list:\n existing_node = closed_list[(child['loc'], child['time'], child['orientation'])]\n \n if compare_nodes(child, existing_node):\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n else:\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n \n return None # Failed to find solutions", "def get_map(self) -> list:\n return self.map_obstacle", "def get_shortest_paths(distance_map: DistanceMap, agent_pos, agent_dir, max_depth: Optional[int] = None, agent_handle: Optional[int] = None) \\\n -> Dict[int, Optional[List[Waypoint]]]:\n shortest_paths = dict()\n\n def _shortest_path_for_agent(agent,agent_pos,agent_dir):\n if agent_pos is None :\n if agent.status == RailAgentStatus.READY_TO_DEPART:\n position = agent.initial_position\n elif agent.status == RailAgentStatus.ACTIVE:\n position = agent.position\n elif agent.status == RailAgentStatus.DONE:\n position = agent.target\n else:\n shortest_paths[agent.handle] = None\n return\n direction = agent.direction\n else :\n position = agent_pos\n direction = agent_dir \n shortest_paths[agent.handle] = []\n distance = math.inf\n depth = 0\n while (position != agent.target and (max_depth is None or depth < max_depth)):\n next_actions = get_valid_move_actions_(direction, position, distance_map.rail)\n best_next_action = None\n for next_action in next_actions:\n next_action_distance = distance_map.get()[\n agent.handle, next_action.next_position[0], next_action.next_position[\n 1], next_action.next_direction]\n if next_action_distance < distance:\n best_next_action = next_action\n distance = next_action_distance\n\n shortest_paths[agent.handle].append(Waypoint(position, direction))\n depth += 1\n\n # if there is no way to continue, the rail must be disconnected!\n # (or distance map is incorrect)\n if best_next_action is None:\n shortest_paths[agent.handle] = None\n return\n\n position = best_next_action.next_position\n direction = best_next_action.next_direction\n if max_depth is None or depth < max_depth:\n shortest_paths[agent.handle].append(Waypoint(position, direction))\n\n if agent_handle is not None:\n _shortest_path_for_agent(distance_map.agents[agent_handle],agent_pos,agent_dir)\n else:\n for agent in distance_map.agents:\n _shortest_path_for_agent(agent,agent_pos,agent_dir)\n\n return shortest_paths", "def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map", "def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res", "def generate_possible_paths(self, obstacle):\n if self.does_uav_intersect_obstacle_vertically(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):\n if self.does_path_intersect_obstacle_2d(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):\n new_attempt_pos_points = [\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0], obstacle.get_point()[1] + obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0], obstacle.get_point()[1] - obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)]\n ]\n\n new_paths = []\n for new_pos_point in new_attempt_pos_points:\n if not self.does_path_intersect_obstacle_3d(obstacle, self.drone.get_point(), new_pos_point) and self.flight_boundary.is_point_in_bounds(new_pos_point):\n for recursive_new_pos_point in new_attempt_pos_points:\n if self.flight_boundary.is_point_in_bounds(recursive_new_pos_point) and abs(recursive_new_pos_point[2] - new_pos_point[2]) < 5:\n if recursive_new_pos_point[0] != new_pos_point[0] or recursive_new_pos_point[1] != new_pos_point[1]:\n if not self.does_path_intersect_obstacle_3d(obstacle, new_pos_point, recursive_new_pos_point) and not self.does_path_intersect_obstacle_3d(obstacle, recursive_new_pos_point, self.drone.get_waypoint_holder().get_current_waypoint()):\n new_paths.append([new_pos_point, recursive_new_pos_point])\n\n # Uncomment for DEBUGGING ONLY\n for path in new_paths:\n print(\"Point:\", str(path))\n\n return new_paths\n\n return []", "def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)", "def find_path(self, start_point: Pos, end_point: Pos, obstacles: list) -> list:\n pass", "def a_star_obs(obs_map):\n world_ndarray = np.copy(obs_map[0])\n\n start = tuple(np.argwhere(world_ndarray == -2)[0])\n goal = tuple(np.argwhere(world_ndarray == -3)[0])\n\n world_ndarray[world_ndarray == -2] = 0\n world_ndarray[world_ndarray == -3] = 0\n\n world_tuple = tuple(map(tuple, world_ndarray))\n\n def h_custom_i(cur, end, obstacle):\n ytop, ybot, minx = obstacle\n cur_y, cur_x = cur\n end_y, end_x = end\n obs_bot = np.where(world_ndarray[ybot] == -1)[0][0]\n mid_y = ybot + (ytop - ybot) // 2\n if cur_y in range(ybot, ytop) and cur_x in range(max(obs_bot, start[1]), end_x):\n return 5000 - abs(minx - cur_x) ** 2 - abs(cur_y - mid_y) ** 2\n return abs(cur_x - end_x) + abs(cur_y - end_y)\n\n pr_queue = [] # Use heapqueue as priority queue\n heappush(pr_queue, (0 + h_custom_i(start, goal, obs_map[1]), 0, \"\", start))\n visited = set() # Each element has to be unique in a set\n graph = get_neighbors(world_tuple)\n route_str = \"\"\n\n while pr_queue:\n _, cost, path, current = heappop(pr_queue)\n if current == goal:\n route_str = path\n break\n if current in visited:\n continue\n visited.add(current)\n for direction, neighbour in graph[current].iteritems():\n heappush(pr_queue, (cost + h_custom_i(neighbour, goal, obs_map[1]), cost + 1, path + direction, neighbour))\n world_ndarray[neighbour] = cost + 1\n\n # print \"Expanded nodes(A*+Custom H): \", len(visited), \" Path length: \", len(route_str)\n # Convert string directions to 2D(x,y) coordinates\n route_coord = [start]\n for p in route_str:\n route_coord.append(graph[route_coord[-1]][p])\n\n world_ndarray[start] = -2 # Mark the start and end coordinates again\n world_ndarray[goal] = -3\n\n return route_coord, world_ndarray, len(visited), len(route_str)", "def search_paths_agent_to_goal(self, robot_x, robot_y, goal_x, goal_y, G, road_node_Nos, road_node_info,\n road_lines, road_directions, road_lines_num, node_edges):\n # add target node\n target_node_coordinate = np.zeros((1, 2))\n target_node_coordinate[0][0] = goal_x\n target_node_coordinate[0][1] = goal_y\n target_node = None\n\n for (key, value) in road_node_info.items():\n if math.sqrt((value[0]-target_node_coordinate[0][0])**2 + (value[1]-target_node_coordinate[0][1])**2) <= 0.01:\n target_node = key\n\n if target_node == 0:\n print(target_node)\n raise Exception(\"wrong target node\", target_node)\n\n # Check whether the robot is on the road node or not\n at_node = False\n for (key, value) in road_node_info.items():\n if key == 0:\n continue\n if value[0] == robot_x and value[1] == robot_y:\n at_node = True\n agent_node_No = key\n\n if at_node == False:\n # add agent node\n agent_node_No = 0\n agent_node_coordinate = np.zeros((1, 2))\n agent_node_coordinate[0][0] = robot_x\n agent_node_coordinate[0][1] = robot_y\n agent_node = dict(zip([agent_node_No], agent_node_coordinate))\n road_node_info.update(agent_node)\n\n # add node\n env_node_Nos = [agent_node_No] + road_node_Nos\n G.add_nodes_from(env_node_Nos)\n\n # add edges from agent to the nearest road line\n # calculate the distance from the agent to the lines\n agent_line_dist = []\n for i in range(road_lines_num):\n cross = (road_lines[i][2] - road_lines[i][0]) * (agent_node_coordinate[0][0] - road_lines[i][0]) \\\n + (road_lines[i][3] - road_lines[i][1]) * (agent_node_coordinate[0][1] - road_lines[i][1])\n if cross <= 0:\n agent_line_dist.append(np.sqrt((agent_node_coordinate[0][0] - road_lines[i][0]) ** 2\n + (agent_node_coordinate[0][1] - road_lines[i][1]) ** 2))\n continue\n\n d2 = (road_lines[i][2] - road_lines[i][0]) ** 2 + (road_lines[i][3] - road_lines[i][1]) ** 2\n if cross >= d2:\n agent_line_dist.append(np.sqrt((agent_node_coordinate[0][0] - road_lines[i][2]) ** 2\n + (agent_node_coordinate[0][1] - road_lines[i][3]) ** 2))\n continue\n r = cross / d2\n p0 = road_lines[i][0] + (road_lines[i][2] - road_lines[i][0]) * r\n p1 = road_lines[i][1] + (road_lines[i][3] - road_lines[i][1]) * r\n agent_line_dist.append(\n np.sqrt((agent_node_coordinate[0][0] - p0) ** 2 + (agent_node_coordinate[0][1] - p1) ** 2))\n\n # find the nearest line index\n agent_line_dist_shortest = float(\"inf\")\n agent_line_shortest_index = 0\n\n for index, item in enumerate(agent_line_dist):\n if item < agent_line_dist_shortest:\n agent_line_shortest_index = index\n agent_line_dist_shortest = item\n\n # find the shortest line's node\n agent_line_shortest_node0 = None\n agent_line_shortest_node1 = None\n\n for (key, value) in road_node_info.items():\n if value[0] == road_lines[agent_line_shortest_index][0] and value[1] == \\\n road_lines[agent_line_shortest_index][1]:\n agent_line_shortest_node0 = key\n if value[0] == road_lines[agent_line_shortest_index][2] and value[1] == \\\n road_lines[agent_line_shortest_index][3]:\n agent_line_shortest_node1 = key\n\n # add new edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.append([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.append([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.append([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.append([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n G.add_edges_from(node_edges)\n simple_paths_list = list()\n if agent_node_No not in G or target_node not in G:\n has_path = False\n G.clear()\n else:\n if nx.has_path(G, source=agent_node_No, target=target_node):\n simple_paths = nx.shortest_simple_paths(G, source=agent_node_No, target=target_node, weight='len')\n\n for path in simple_paths:\n simple_paths_list.append(path)\n\n for path in simple_paths_list:\n if path[1] == agent_line_shortest_node1:\n path[0] = agent_line_shortest_node0\n elif path[1] == agent_line_shortest_node0:\n path[0] = agent_line_shortest_node1\n else:\n raise ValueError('First node Error!')\n\n remove_paths_list = list()\n for path in simple_paths_list:\n for path_rest in simple_paths_list[simple_paths_list.index(path) + 1:]:\n if path == path_rest[- len(path):]:\n remove_paths_list.append(path_rest)\n\n for remove_path in remove_paths_list:\n if remove_path in simple_paths_list:\n simple_paths_list.remove(remove_path)\n\n # Choose 1 simple paths\n if len(simple_paths_list) > 1:\n simple_paths_list = simple_paths_list[0:1]\n\n # remove edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n has_path = True\n G.clear()\n else:\n # remove edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n has_path = False\n G.clear()\n else:\n G.add_edges_from(node_edges)\n simple_paths_list = list()\n # 判断站点是否在路网上\n if agent_node_No not in G or target_node not in G:\n has_path = False\n G.clear()\n else:\n # 判断站点和目标间是否存在路径\n if nx.has_path(G, source=agent_node_No, target=target_node):\n # 提取所有简单路径\n simple_paths = nx.shortest_simple_paths(G, source=agent_node_No, target=target_node, weight='len')\n\n for path in simple_paths:\n simple_paths_list.append(path)\n\n # 移除带有回环的路网\n remove_paths_list = list()\n for path in simple_paths_list:\n for path_rest in simple_paths_list[simple_paths_list.index(path) + 1:]:\n if path == path_rest[- len(path):]:\n remove_paths_list.append(path_rest)\n\n for remove_path in remove_paths_list:\n if remove_path in simple_paths_list:\n simple_paths_list.remove(remove_path)\n\n # 提取最多2条路径\n if len(simple_paths_list) > 2:\n simple_paths_list = simple_paths_list[0:2]\n\n # 确认存在路径\n has_path = True\n G.clear()\n else:\n # 不存在路径\n has_path = False\n G.clear()\n\n return simple_paths_list, has_path", "def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)", "def get_map_instructions(start_lng, start_lat, end_lng, end_lat):\r\n directions_resp = requests.get(\r\n f\"https://api.mapbox.com/directions/v5/mapbox/walking/{start_lng},{start_lat};{end_lng},{end_lat}\",\r\n params={\r\n \"access_token\": MAPBOX_TOKEN,\r\n \"geometries\": \"geojson\",\r\n \"steps\": \"true\",\r\n \"alternatives\": \"true\",\r\n },\r\n )\r\n instructions=[]\r\n for step in directions_resp.json()['routes'][0]['legs'][0]['steps']:\r\n instructions.append(f\"{step['maneuver']['instruction']}\")\r\n #listToStr = '<br>'.join(map(str, instruction))\r\n return instructions", "def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:LENGTH:1, 0:WIDTH:1]\n pos = np.empty(x.shape + (2,))\n # x.shape = (LENGTH,WIDTH)\n # x.shape + (2,) = (LENGTH,WIDTH,2)\n pos[:, :, 0] = x\n pos[:, :, 1] = y\n # pos.shape = (1890, 2)\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n # If axis is an integer, it specifies the axis of x along which to compute the vector norms\n # axis = 1: h.shape = 1890\n # axis = 0: h.shape = 2\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n # print(\"Path\", closedSet)\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return [], closedSet", "def calculate_waypoints(global_start, global_goal, global_home, data, drone_altitude, safety_distance):\n # Calculate graph and offsets\n graph, north_offset, east_offset = create_graph(data, drone_altitude, safety_distance)\n\n map_offset = np.array([north_offset, east_offset, .0])\n\n # Convert start position from global to local.\n local_position = global_to_local(global_start, global_home) - map_offset\n\n # Find closest point to the graph for start\n graph_start = closest_point(graph, local_position)\n\n # Convert goal postion from global to local\n local_goal = global_to_local(global_goal, global_home) - map_offset\n\n # Find closest point to the graph for goal\n graph_goal = closest_point(graph, local_goal)\n\n # Find path\n path, _ = a_star(graph, graph_start, graph_goal)\n path.append(local_goal)\n\n # Prune path\n path = collinearity_prune(path, epsilon=1e-3)\n\n # Calculate waypoints\n return [[int(p[0] + north_offset), int(p[1] + east_offset), drone_altitude, 0] for p in path]", "def __init__(self, map_config):\n self.current_obstacles = []\n self.current_goal = None\n self.cfg = map_config", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def prm_planning(start_x, start_y, goal_x, goal_y,\n obstacle_x_list, obstacle_y_list, robot_radius, *, rng=None):\n obstacle_kd_tree = KDTree(np.vstack((obstacle_x_list, obstacle_y_list)).T)\n\n sample_x, sample_y = sample_points(start_x, start_y, goal_x, goal_y,\n robot_radius,\n obstacle_x_list, obstacle_y_list,\n obstacle_kd_tree, rng)\n if show_animation:\n plt.plot(sample_x, sample_y, \".b\")\n\n road_map = generate_road_map(sample_x, sample_y,\n robot_radius, obstacle_kd_tree)\n\n rx, ry = dijkstra_planning(\n start_x, start_y, goal_x, goal_y, road_map, sample_x, sample_y)\n\n return rx, ry", "def move_obstacles(obstacles_poses, obstacles_goal_poses):\n # for pose in obstacles_poses:\n # dx = random.uniform(0, 0.03); dy = random.uniform(0,0.03);\n # pose[0] -= np.sign(pose[0])*dx; pose[1] -= np.sign(pose[1])*dy;\n\n \"\"\" Each obstacles tends to go to its selected goal point with random speed \"\"\"\n for p in range(len(obstacles_poses)):\n pose = obstacles_poses[p]; goal = obstacles_goal_poses[p]\n dx, dy = (goal - pose) / norm(goal-pose) * 0.05#random.uniform(0,0.05)\n pose[0] += dx; pose[1] += dy;\n\n return obstacles_poses", "def get_directions():\n return [(1, 0), (0, 1), (-1, 0), (0, -1)]", "def __create_d_map(self):\n goal_map = {}\n # collect all goal nodes\n for i, row in enumerate(self.map.get_node_grid()):\n for j, node in enumerate(row):\n if node.borders_tile_of_type(Quarantine):\n goal_map[node.get_name()] = (i, j)\n # calculate distance to closest goal node for each node\n for i, row in enumerate(self.map.get_node_grid()):\n for j, node in enumerate(row):\n distances = [\n abs(i - y) + abs(j - x)\n for node_name, (y, x) in goal_map.items()\n ]\n self.d_map[node.get_name()] = min(distances)", "def astar_map(map, start, end):\n\n # Create start and end node\n start_node = Node3(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node3(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n x = 0\n while len(open_list) > 0:\n x+=1\n # Get the current node\n current_node = open_list[0]\n \n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node.position == end_node.position:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for key in map:\n if key == tuple(current_node.position):\n for elem in map[key]:\n new_node = Node3(current_node, elem[0],elem[1])\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values \n child.g = current_node.g + child.cost\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n # Add the child to the open list\n open_list.append(child)", "def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:45:1, 0:42:1]\n pos = np.empty(x.shape + (2,))\n pos[:, :, 0] = x;\n pos[:, :, 1] = y\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = _get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return []", "def factible_positions_in_routes(customer, solution, customers):\n posbyroute = []\n for i,route in enumerate(solution.routes):\n fpos = factible_route_positions(customer, route, customers)\n if fpos != []: posbyroute.append((i,fpos))\n return posbyroute", "def advice(agents: set, n: int) -> list:\n # if n is 0, return an empty list\n if n == 0:\n return list()\n\n # If agent coordinates are outside of the map, they are simply not considered.\n # There are no duplicate agents on the same square.\n agents = agents_cleanup(agents, n)\n\n # If there is an agent on every grid cell, there is no safe space,\n # so return an empty list\n if len(agents) == n * n:\n return list()\n\n # If there are no agents, then every cell is a safe spaces,\n # so return all coordinates\n city = create_city_map(n)\n if not agents:\n return list(city)\n\n city_map_processing(city, agents)\n\n return list(city)", "def registerInitialState(self, gameState):\r\n \r\n '''\r\n Make sure you do not delete the following line. If you would like to\r\n use Manhattan distances instead of maze distances in order to save\r\n on initialization time, please take a look at\r\n CaptureAgent.registerInitialState in captureAgents.py.\r\n '''\r\n CaptureAgent.registerInitialState(self, gameState)\r\n \r\n \r\n self.teamMates = []\r\n for mate in self.getTeam(gameState):\r\n if mate is not self.index:\r\n self.teamMates.append(mate)\r\n \r\n def getSuccessors(walls, state):\r\n successors = []\r\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\r\n x,y = state\r\n dx, dy = Actions.directionToVector(action)\r\n nextx, nexty = int(x + dx), int(y + dy)\r\n if not walls[nextx][nexty]:\r\n nextState = (nextx, nexty)\r\n cost = 1\r\n successors.append( ( nextState, action, cost) )\r\n return successors\r\n \r\n \r\n \r\n class o0State:\r\n def __init__(self, pos, node = None):\r\n self.pos = pos\r\n self.node = node\r\n self.deadEndDepth = 0.0\r\n self.successors = {}\r\n self.successorsByNodePos = {}\r\n def isDeadEndNode(self):\r\n if self.node is None:\r\n return False\r\n noneDeadEndCount = 0\r\n for successor in self.successors.values():\r\n if not successor.isDeadEnd:\r\n noneDeadEndCount += 1\r\n return noneDeadEndCount is 1\r\n class o0Node:\r\n def __init__(self, pos):\r\n self.pos = pos\r\n self.isDeadEnd = False\r\n class o0Successor:\r\n def __init__(self, direction, nextPos, nextNodePos = None):\r\n self.direction = direction\r\n self.nextPos = nextPos\r\n self.nextNodePos = nextNodePos\r\n self.isDeadEnd = False\r\n\r\n class o0PathMap:\r\n def __init__(self, gameState):\r\n #print 'init pathMap'\r\n walls = gameState.getWalls()\r\n positions = walls.asList(False)\r\n self.states = {}\r\n self.nodes = {}\r\n for pos in positions:\r\n self.states[pos] = o0State(pos)\r\n for successor in getSuccessors(walls,pos):\r\n self.states[pos].successors[successor[1]] = o0Successor(successor[1],successor[0])\r\n successorCount = len(self.states[pos].successors)\r\n if successorCount is not 2:\r\n node = o0Node(pos)\r\n self.nodes[pos] = node\r\n self.states[pos].node = node\r\n \r\n def connectNode(node):\r\n for nodeSuccessor in self.states[node.pos].successors.values():\r\n if nodeSuccessor.nextNodePos is None:\r\n forwardSuccessors = [nodeSuccessor]\r\n backwardSuccessors = []\r\n previousPos = node.pos\r\n currentPos = nodeSuccessor.nextPos\r\n while currentPos not in self.nodes.keys():\r\n #print node.pos\r\n #print currentPos\r\n if len(self.states[currentPos].successors) is not 2:\r\n print 'not a path'\r\n for successor in self.states[currentPos].successors.values():\r\n #print successor.nextPos\r\n if successor.nextPos[0] is previousPos[0] and successor.nextPos[1] is previousPos[1]:\r\n backwardSuccessors.append(successor)\r\n else:\r\n forwardSuccessors.append(successor)\r\n previousPos = currentPos\r\n currentPos = forwardSuccessors[len(forwardSuccessors) - 1].nextPos\r\n for successor in self.states[currentPos].successors.values():\r\n if successor.nextPos is previousPos:\r\n backwardSuccessors.append(successor)\r\n \r\n for successor in forwardSuccessors:\r\n successor.nextNodePos = currentPos\r\n for successor in backwardSuccessors:\r\n successor.nextNodePos = node.pos\r\n \r\n #connectNode(self.nodes.values()[0])\r\n #connectNode(self.nodes.values()[1])\r\n #connectNode(self.nodes.values()[2])\r\n #connectNode(self.nodes.values()[3])\r\n #connectNode(self.nodes.values()[4])\r\n #connectNode(self.nodes.values()[5])\r\n \r\n for node in self.nodes.values():\r\n connectNode(node)#'''\r\n for state in self.states.values():\r\n for successor in self.states[state.pos].successors.values():\r\n self.states[state.pos].successorsByNodePos[successor.nextNodePos] = successor\r\n \r\n updatedNodes = self.nodes.values()\r\n while(len(updatedNodes) is not 0):\r\n nodePool = updatedNodes\r\n updatedNodes = []\r\n for node in nodePool:\r\n if self.states[node.pos].isDeadEndNode():\r\n self.nodes[node.pos].isDeadEnd = True\r\n for successor in self.states[node.pos].successors.values():\r\n self.states[successor.nextNodePos].successorsByNodePos[node.pos].isDeadEnd = True\r\n updatedNodes.append(self.states[successor.nextNodePos])\r\n \r\n #node.isDeadEnd = self.states[node.pos].isDeadEndNode()#'''\r\n \r\n '''\r\n for node in self.nodes.values():\r\n if self.states[node.pos].isDeadEndNode():\r\n node.isDeadEnd = True#'''\r\n \r\n deadEndNodes = {}\r\n noneDeadEndNodes = {}\r\n for node in self.nodes.values():\r\n if not node.isDeadEnd:\r\n noneDeadEndNodes[node.pos] = node\r\n else:\r\n deadEndNodes[node.pos] = node\r\n \r\n for node in deadEndNodes.values():#\r\n actions = breadthFirstSearch(AnyTargetSearchProblem(gameState,noneDeadEndNodes.keys(),node.pos))\r\n nodeConnectedTo = self.nodes[performActions(node.pos, actions)] \r\n actions = reverseActions(actions)\r\n pos = nodeConnectedTo.pos\r\n deadEndDepth = 0.0\r\n for action in actions:\r\n pos = performActions(pos,[action])\r\n deadEndDepth += 1.0\r\n self.states[pos].deadEndDepth = deadEndDepth\r\n def willDie(self, position, distance, scaredTime = 0):#distance from our agent to closest enemy\r\n deadEndDepth = self.states[position].deadEndDepth\r\n if deadEndDepth >= distance - deadEndDepth and deadEndDepth >= scaredTime:\r\n return True\r\n return False\r\n def isDeadEnd(self, position):\r\n return self.states[position].deadEndDepth >= 0.5\r\n #def getAllStatesInDeadEnd(self, anyState):\r\n \r\n\r\n global pathMap\r\n if pathMap is None:\r\n pathMap = o0PathMap(gameState)\r\n self.pathMap = pathMap\r\n targets[self.index] = None\r\n global lastEattenFoodAreDefendingPos\r\n lastEattenFoodAreDefendingPos = None \r\n global totalFood\r\n totalFood = len(self.getFood(gameState).asList())\r\n global leftFood\r\n leftFood = totalFood\r\n #self.debugDraw(pathMap.deadEndNodes.keys(),[1,0,0])\r\n #self.debugDraw(pathMap.nodes.keys(),[0,1,0])\r\n \r\n global pathMapDebugMode\r\n if pathMapDebugMode:\r\n for state in self.pathMap.states.values():\r\n deadEndColor = 0.3 + state.deadEndDepth * 0.1\r\n if deadEndColor>1.0:\r\n deadEndColor = 1.0\r\n if state.deadEndDepth == 0:\r\n deadEndColor = 0.0\r\n \r\n nodeColor = 0.0\r\n if state.node is not None:\r\n nodeColor = 0.5\r\n self.debugDraw(state.pos,[deadEndColor,0,0])\r\n\r\n self.curryFoodScore = 0.8\r\n \r\n \r\n \r\n global defenseWall\r\n global defensePositions\r\n if len(defenseWall) is 0:\r\n foods = self.getFoodYouAreDefending(gameState)\r\n for capsule in self.getCapsulesYouAreDefending(gameState):\r\n foods[capsule[0]][capsule[1]] = True\r\n defenseWall = actionsToPositions((0,0), aStarSearch(DefenseSearchProblem(gameState, foods, self.index),nullHeuristic))\r\n defensePositions = getPositionsNeededToDefense(gameState)\r\n global defenseWallDebugMode\r\n if defenseWallDebugMode is True:\r\n self.debugDraw(defenseWall,[0,0.5,0])\r\n self.debugDraw(defensePositions,[0.5,0,0])\r\n \r\n global agentInDeadEnd\r\n agentInDeadEnd[self.index] = False", "def create_POI(self, location_list, moore_range):\n\n for r in range(len(location_list)):\n\n x = location_list[r][0]\n y = location_list[r][1]\n\n coordinates = []\n for i in range(moore_range):\n for j in range(moore_range):\n if x+i > 0 and len(self.grid_density) > x+i and y+j > 0\\\n and len(self.grid_density[0]) > y+j:\n\n coordinates.append((x+i, y+j))\n coordinates.append((x-i, y-j))\n coordinates.append((x-i, y+j))\n coordinates.append((x+i, y-j))\n\n for coords in coordinates:\n if not self.grid.out_of_bounds(coords):\n this_cell = self.grid.get_cell_list_contents(coords)\n\n for agent in this_cell:\n if type(agent) is nodeAgent:\n\n agent.POI = True", "def main():\n for task in range(1, 6):\n # get map object for the current task\n map_obj = MapObj(task=task)\n # display map\n map_obj.show_map()\n # find cost optimal path using a-star\n node = search(\n map_obj=map_obj,\n heuristic=euclidian_distance,\n moving_goal=(task == 5)\n )\n # draw optimal path on map\n map_obj.draw_path(node)\n # display the map\n map_obj.show_map()", "def a_star(self, xy1, xy2):\n tile_col1, tile_row1 = self.the_map.xy_to_cr(xy1[0], xy1[1])\n tile_col2, tile_row2 = self.the_map.xy_to_cr(xy2[0], xy2[1])\n \n successor_to_parent_map = {}\n start_state = (tile_col1, tile_row1)\n #print('x=%d, y=%d to col=%d, row=%d (map row=%d, col= %d)' % (xy1[0], xy1[1], tile_col1, tile_row1, \n # self.the_map.tile_speeds.shape[0], self.the_map.tile_speeds.shape[1]))\n successor_to_parent_map[(start_state, None)] = None # (Successor, Action) -> (Parent, Action)\n \n open_list = PriorityQueue()\n open_list.update((start_state, None), 0)\n closed = []\n \n while not open_list.isEmpty():\n current_state, action_to_current_state = open_list.pop()\n \n if current_state == (tile_col2, tile_row2):\n return self.__get_action_path((current_state, action_to_current_state), successor_to_parent_map)\n \n if current_state not in closed:\n if current_state == start_state:\n current_cost = 0\n else:\n current_cost = len(self.__get_action_path((current_state, action_to_current_state),\n successor_to_parent_map))\n \n for successor_state, action, step_cost in self.__get_successors(current_state):\n cost = current_cost + step_cost + self.__cartesian_distance(current_state, successor_state)\n \n open_list.update((successor_state, action), cost)\n \n if successor_state not in closed:\n successor_to_parent_map[(successor_state, action)] = (current_state, action_to_current_state)\n \n closed.append(current_state)\n return []", "def _generate_relative_location_action(ui_object_list, ui_v_dist, ui_h_dist):\n action_list = []\n for object_idx, ui_object in enumerate(ui_object_list):\n if object_idx > ui_v_dist.shape[0]:\n assert False, ('ui_object_idx %d out of virtical distance bound %d' %\n (object_idx, ui_v_dist.shape[0]))\n if object_idx > ui_h_dist.shape[0]:\n assert False, ('ui_object_idx %d out of horizontal distance bound %d' %\n (object_idx, ui_h_dist.shape[0]))\n\n if _valid_clickable_object(ui_object) or _valid_typable_object(ui_object):\n neighbor_dict = _get_single_direction_neighbors(object_idx, ui_v_dist,\n ui_h_dist)\n for neighbor_context, neighbor_index in neighbor_dict.items():\n neighbor_object = ui_object_list[neighbor_index]\n if _valid_object_with_name(neighbor_object):\n for neighbor_context_str in neighbor_context.value:\n action_list.extend(\n _generate_relative_location_rule_action(ui_object, object_idx,\n neighbor_object,\n neighbor_context_str))\n return action_list", "def display_global_path(start, goal, path, occupancy_grid):\n # Displaying the map\n fig_astar, ax_astar = display_map(occupancy_grid)\n\n # Plot the best path found and the list of visited nodes\n ax_astar.plot(path[0], path[1], marker=\"o\", color='blue');\n ax_astar.scatter(start[0], start[1], marker=\"o\", color='green', s=200);\n ax_astar.scatter(goal[0], goal[1], marker=\"o\", color='purple', s=200);\n # ax.set_ylim(ax.get_ylim()[::-1])", "def get_path_to(self, dest_x, dest_y) -> List[Tuple[int, int]]:\n\t\t# Kopier den 'walkable' list.\n\t\t# Note `cost` fordi vi ser hvor meget tid det koster at komme over til målet.\n\t\tcost = np.array(self.entity.game_map.tiles['walkable'], dtype=np.int8)\n\n\t\tfor entity in self.entity.game_map.entities:\n\t\t\t# Check that an entity blocks movement and that cost isn't zero (blockin)\n\t\t\tif entity.blocks_movement and cost[entity.x, entity.y]:\n\t\t\t\t# Add to the cost of a blocked position\n\t\t\t\t# A lower number means more enemies will crowd behind each other in hallways.\n\t\t\t\t# A higher number means enemies will take longer paths in order to surround the player\n\t\t\t\tcost[entity.x, entity.y] += 10 # This encourages the entity to move around that area, since the entity will try to go the path with the smallest cost\n\n\t\t# Create a graph from the cost array and pass that graph to a new pathfinder\n\t\tgraph = tcod.path.SimpleGraph(cost=cost, cardinal=2, diagonal=3)\n\t\tpathfinder = tcod.path.Pathfinder(graph)\n\n\t\tpathfinder.add_root((self.entity.x, self.entity.y)) # Start position\n\n\t\t# Compute the path to the destination and remove the starting point\n\t\tpath: List[List[int]] = pathfinder.path_to((dest_x, dest_y))[1:].tolist()\n\n\t\t# Convert from List[List[int]] to List[Tuple[int, int]]\n\t\treturn [(index[0], index[1]) for index in path]", "def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)", "def find_trajectory(self):\n\n translation,_ = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.x = translation[0]\n self.y = translation[1]\n \n cell_x = int(np.floor(self.x / self.metadata.resolution) + self.w / 2) - self.x_offset\n cell_y = int(np.floor(self.y / self.metadata.resolution) + self.h / 2) - self.y_offset\n\n visited = np.zeros(self.costmap.shape)\n visited[cell_y,cell_x] = 1\n\n to_explore = self.add_neighbors(visited, Node(cell_x,cell_y,0,None))\n to_explore.sort(key=operator.attrgetter('cost'))\n\n # Run modified Dijkstra algorithm\n while to_explore: \n next_node = to_explore.pop(0)\n if next_node.cost == -1:\n print(\"Found goal!\")\n\t\tself.send_final_pose(next_node)\n self.number_of_fails = 0\n self.get_trajectory(next_node)\n return\n \n to_explore = to_explore + self.add_neighbors(visited, next_node)\n to_explore.sort(key=operator.attrgetter('cost'))\n\n self.number_of_fails += 1\n print(\"Failed: %d times % self.number_of_fails\")\n\n if self.number_of_fails >= NUMBER_OF_FAILS:\n print(\"Exiting!\")\n msg = Bool()\n msg.data = True\n self.exp_complete_pub.publish(msg)", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n stack = util.Stack() # stack to keep track of frontier nodes where pacman has move\n stack.push(start)\n explored = set() # to keep track of explored areas\n route = []\n\n while not stack.isEmpty():\n current_position = stack.pop()\n explored.add(current_position)\n\n if problem.isGoalState(current_position):\n break\n for each in problem.getSuccessors(current_position):\n if each[0] not in explored: # x,y coordinates of positions we haven't visited are pushed onto stack\n # print(each)\n stack.push(each[0])\n route.append((current_position, each[0], each[1])) # record of movements to rebuild path (from,to,how)\n\n x = len(route)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if route[x - 1][0] != route[x - 2][1]: # starts from goal and works backwards\n route.remove(route[x - 2])\n x = len(route)\n else:\n x -= 1\n # print(route)\n return [action[2] for action in route]", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def search(world_state, robot_pose, goal_pose):\n if world_state.shape[0] == 0 or world_state.shape[1] == 0:\n print(\"Error, empty world_state!!!\")\n return None\n if not is_pos_valid(robot_pose, world_state.shape):\n print(\"Error, invalid robot_pose!!!\", robot_pose)\n return None\n if not is_pos_valid(goal_pose, world_state.shape):\n print(\"Error, invalid goal_pose!!!\", goal_pose)\n return None\n\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)] # orthogonal directions\n found = False\n\n x, y = robot_pose\n g = 0\n h = heuristic(robot_pose, goal_pose)\n f = g + h\n open = [[f, x, y]]\n came_from = {}\n came_from[robot_pose] = None\n cost_so_far = {}\n cost_so_far[robot_pose] = 0\n\n while open:\n open.sort() # sort based on f value\n current = open.pop(0)\n\n x, y = current[1:]\n g = cost_so_far[(x, y)]\n\n if (x, y) == goal_pose:\n found = True\n break\n else:\n # find available next positions\n for direction in directions:\n x2 = x + direction[0]\n y2 = y + direction[1]\n\n # check whether x2 and y2 are valid\n if not is_pos_valid((x2, y2), world_state.shape):\n continue\n\n g2 = g + 1\n if world_state[x2, y2] == 0 and ((x2, y2) not in cost_so_far or g2 < cost_so_far[(x2, y2)]):\n\n h2 = heuristic((x2, y2), goal_pose)\n f2 = g2 + h2\n open.append([f2, x2, y2])\n came_from[(x2, y2)] = (x, y)\n cost_so_far[(x2, y2)] = g2\n if found:\n path = [goal_pose]\n current = goal_pose\n while came_from[current]:\n current = came_from[current]\n path.append(current)\n\n path.reverse()\n return path\n\n else:\n return None", "def get_possible_actions(self, world_state,agent_host):\n action_list = []\n possibilities = {'movenorth 1': -3,'movesouth 1': 3,'moveeast 1': 1,'movewest 1': -1}\n #check walls to see whether can move left,right,back,forward\n #check floor beneath to see whether should do anything at all, or just nothing and sink\n obs_text = world_state.observations[-1].text\n obs = json.loads(obs_text)\n grid = load_grid(world_state)\n print 'GRID SIZE: ', len(grid)\n for k,v in possibilities.items():\n #with current grid, index 31 will always be our agent's current location\n #check walls to see whether can move left,right,back,forward\n if grid[31+v+9] == 'water' or grid[31+v+9] == 'wooden_door': #+9 because we want to check\n action_list.append(k) #where our feet are located\n #check if you can teleport down a level\n if grid[31-27] == 'water' or grid[31-27] == 'wooden_door':\n action_list.append(self.teleport(agent_host,False))\n #check if you can teleport up a level\n if grid[31+45] == 'water' or grid[31+45] == 'wooden_door':\n action_list.append(self.teleport(agent_host,True))\n\n print(\"ACTION LIST: {}\".format(action_list))\n return action_list", "def iterate_paths_map(riv_dirs,paths_map,nlat=360,nlong=720):\n\n if np.count_nonzero(paths_map) == paths_map.size:\n return False\n for i in range(nlat+2):\n for j in range(nlong):\n if i == 0 or i == nlat+1:\n paths_map[i,j] = 1\n elif j == 0:\n paths_map[i,j] = count_accumulated_inflow(np.append(riv_dirs[i-1:i+2,nlong-1:nlong],riv_dirs[i-1:i+2,j:j+2],axis=1),\n np.append(paths_map[i-1:i+2,nlong-1:nlong],paths_map[i-1:i+2,j:j+2],axis=1))\n elif j == nlong-1:\n paths_map[i,j] = count_accumulated_inflow(np.append(riv_dirs[i-1:i+2,j-1:j+1],riv_dirs[i-1:i+2,0:1],axis=1),\n np.append(paths_map[i-1:i+2,j-1:j+1],paths_map[i-1:i+2,0:1],axis=1))\n else:\n paths_map[i,j] = count_accumulated_inflow(riv_dirs[i-1:i+2,j-1:j+2],\n paths_map[i-1:i+2,j-1:j+2])\n return True", "def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight", "def a_star_planning(start_x, start_y, goal_x, goal_y, id):\n # extract the index of start node, goal node and obstacles\n start = Point(round(start_x/grid_size), round(start_y/grid_size), 0.0, -1, [0,0,0])\n goal = Point(round(goal_x/grid_size), round(goal_y/grid_size), 0.0, -1, [0,0,0])\n if not_legal(goal, id):\n print ('not a legal goal')\n return False\n \n # time.sleep(10)\n\n # create the open list and close list to store nodes\n openset, closeset = deque(), deque()\n openset.append(start)\n\n while True:\n # find out the min f node to explore\n\n current_node = min(openset,\n key=lambda node: node.g + calculate_heuristic(node,goal))\n\n # pltplt.plot(current_node.x, current_node.y, \"b*\")\n if len(closeset) % 10 == 0:\n plt.pause(0.001)\n\n if current_node.x == goal.x and current_node.y == goal.y:\n print(\"Congratulations! You have found the goal!\")\n goal.parent = current_node\n break\n\n # Remove it from the open list\n openset.remove(current_node)\n # Add it to the close list\n closeset.append(current_node)\n\n # Explore the neighbour\n for motion in motions:\n if motion == current_node.parent_motion:\n turn_cost = 0\n elif (motion[0] == -1 * current_node.parent_motion[0]) and (motion[1] == -1 * current_node.parent_motion[1]):\n turn_cost = 1.5\n else:\n turn_cost = 1\n\n node = Point(current_node.x + motion[0],\n current_node.y + motion[1],\n current_node.g + motion[2] + turn_cost,\n current_node,\n motion,\n )\n\n # ignore it if it is in the close list\n flag = False\n for item in closeset:\n if item.x == node.x and item.y == node.y:\n flag = True\n break\n if flag:\n continue\n # ignore it if it is obstacle\n\n if not_legal(node, id):\n continue\n # update its parent if it is the open list\n flag = True\n for item in openset:\n if item.x == node.x and item.y == node.y:\n flag = False\n # if closer, update the parent\n if node.g <= item.g:\n item.g = node.g\n item.parent = node.parent\n item.parent_motion = node.parent_motion\n break\n # add to the open list if it is not in the open list\n if flag:\n openset.append(node)\n\n # generate the final path\n while True:\n route = deque()\n route.append(goal)\n plt.plot(goal.x, goal.y, \"rx\")\n if goal.parent == -1:\n break\n else:\n goal = goal.parent\n route.appendleft(goal)\n # return route\n # return False\n if NEED_DRAW:\n # draw map\n for i in range(map.gridwidth):\n for j in range(map.gridheight):\n if map.grid[1,i,j] >0:\n plt.plot(i, j, \"xc\")\n\n plt.plot(start.x, start.y, \"ro\")\n plt.plot(goal.x, goal.y, \"go\")\n\n for goal in route:\n plt.plot(goal.x, goal.y, \"rx\")\n plt.show()", "def state_to_locations(state: list) -> list:\n\n locations = []\n for i in range(0, 16):\n locations.append((0, 0))\n # Each tuple represents a location on the board as (row, column)\n\n \"\"\" \"locations\" keeps track of all fifteen numbers in the given state and the goal \n state. The location of the blank in the state is stored as the tuple at locations[0], \n the location of the number 1 is stored as locations[1], so on and so forth.\"\"\"\n\n \"\"\" Due to the nature of indices on a list, when a location is stored as a tuple \n (row, column), the four rows and four columns are represented as indices from 0 \n to 3, even though the numbers 1 through 15 are represented as indices from 1 to \n 15 on the list.\"\"\"\n\n for i in range(0, 4):\n for j in range(0, 4):\n \"\"\" The loop scans the given state and reads the integer at [i][j]. The number \n is stored at its corresponding index in the list \"locations\". By the time the \n loop finishes, the locations of all fifteen numbers as well as the blank in \n the given state will have been stored in the list.\"\"\"\n num = state[i][j]\n locations[num] = (i, j)\n\n return locations", "def _convert_obstacle_to_map_coordinates(self, obstacle_edges):\n new_obstacle_edges = []\n\n for edge in obstacle_edges:\n new_obstacle_edges.append([\n edge[0] * self.resolution + self.origin_x,\n edge[1] * self.resolution + self.origin_y\n ])\n\n return new_obstacle_edges", "def find_good_paths(self):\n return self.robot_step((0,0),[])", "def test():\n test_map1 = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n x_spacing1 = 0.13\n y_spacing1 = 0.2\n start1 = np.array([[0.3], [0.3], [0]])\n goal1 = np.array([[0.6], [1], [0]])\n path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)\n true_path1 = np.array([\n [ 0.3 , 0.3 ],\n [ 0.325, 0.3 ],\n [ 0.325, 0.5 ],\n [ 0.325, 0.7 ],\n [ 0.455, 0.7 ],\n [ 0.455, 0.9 ],\n [ 0.585, 0.9 ],\n [ 0.600, 1.0 ]\n ])\n if np.array_equal(path1,true_path1):\n print(\"Path 1 passes\")\n\n test_map2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n start2 = np.array([[0.5], [1.0], [1.5707963267948966]])\n goal2 = np.array([[1.1], [0.9], [-1.5707963267948966]])\n x_spacing2 = 0.2\n y_spacing2 = 0.2\n path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)\n true_path2 = np.array([[ 0.5, 1.0],\n [ 0.5, 1.1],\n [ 0.5, 1.3],\n [ 0.5, 1.5],\n [ 0.7, 1.5],\n [ 0.9, 1.5],\n [ 1.1, 1.5],\n [ 1.1, 1.3],\n [ 1.1, 1.1],\n [ 1.1, 0.9]])\n if np.array_equal(path2,true_path2):\n print(\"Path 2 passes\")", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def place_targets():\n\n \n coords = []\n while len(coords)<self.N_targets:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y)\n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords +=[p]\n self.coordinates__targets = coords", "def _initialize_chaotic_map(self, agents: List[Agent]) -> None:\n\n for i, agent in enumerate(agents):\n if i == 0:\n for j in range(agent.n_variables):\n agent.position[j] = r.generate_uniform_random_number(\n size=agent.n_dimensions\n )\n else:\n for j in range(agent.n_variables):\n # Calculates its position using logistic chaotic map (eq. 18)\n agent.position[j] = (\n self.eta\n * agents[i - 1].position[j]\n * (1 - agents[i - 1].position[j])\n )", "def get_driving_instructions(start_lng, start_lat, end_lng, end_lat):\r\n directions_resp = requests.get(\r\n f\"https://api.mapbox.com/directions/v5/mapbox/driving/{start_lng},{start_lat};{end_lng},{end_lat}\",\r\n params={\r\n \"access_token\": MAPBOX_TOKEN,\r\n \"geometries\": \"geojson\",\r\n \"steps\": \"true\",\r\n \"alternatives\": \"true\",\r\n },\r\n )\r\n instructions=[]\r\n for step in directions_resp.json()['routes'][0]['legs'][0]['steps']:\r\n instructions.append(f\"{step['maneuver']['instruction']}\")\r\n #listToStr = '<br>'.join(map(str, instruction))\r\n return instructions", "def test_transition_function_empty_grid(self):\r\n map_file_path = os.path.abspath(os.path.join(__file__, MAPS_DIR, 'empty-8-8/empty-8-8.map'))\r\n grid = MapfGrid(parse_map_file(map_file_path))\r\n\r\n # agents are starting a\r\n agent_starts = ((0, 0), (7, 7))\r\n agents_goals = ((0, 2), (5, 7))\r\n\r\n env = MapfEnv(grid, 2, agent_starts, agents_goals,\r\n FAIL_PROB, REWARD_OF_CLASH, REWARD_OF_GOAL, REWARD_OF_LIVING, OptimizationCriteria.Makespan)\r\n\r\n first_step_transitions = [((round(prob, 2), collision), next_state, reward, done)\r\n for ((prob, collision), next_state, reward, done) in\r\n env.P[env.s][vector_action_to_integer((RIGHT, UP))]]\r\n\r\n self.assertEqual(set(first_step_transitions), {\r\n ((0.64, False), env.locations_to_state(((0, 1), (6, 7))), REWARD_OF_LIVING, False), # (RIGHT, UP)\r\n ((0.08, False), env.locations_to_state(((1, 0), (6, 7))), REWARD_OF_LIVING, False), # (DOWN, UP)\r\n ((0.08, False), env.locations_to_state(((0, 0), (6, 7))), REWARD_OF_LIVING, False), # (UP, UP)\r\n ((0.08, False), env.locations_to_state(((0, 1), (7, 7))), REWARD_OF_LIVING, False), # (RIGHT, RIGHT)\r\n ((0.08, False), env.locations_to_state(((0, 1), (7, 6))), REWARD_OF_LIVING, False), # (RIGHT, LEFT)\r\n ((0.01, False), env.locations_to_state(((1, 0), (7, 7))), REWARD_OF_LIVING, False), # (DOWN, RIGHT)\r\n ((0.01, False), env.locations_to_state(((1, 0), (7, 6))), REWARD_OF_LIVING, False), # (DOWN, LEFT)\r\n ((0.01, False), env.locations_to_state(((0, 0), (7, 7))), REWARD_OF_LIVING, False), # (UP, RIGHT)\r\n ((0.01, False), env.locations_to_state(((0, 0), (7, 6))), REWARD_OF_LIVING, False) # (UP, LEFT)\r\n })\r\n\r\n wish_state = env.locations_to_state(((0, 1), (6, 7)))\r\n second_step_transitions = [((round(prob, 2), collision), next_state, reward, done)\r\n for ((prob, collision), next_state, reward, done) in\r\n env.P[wish_state][vector_action_to_integer((RIGHT, UP))]]\r\n\r\n # [(0,0), (7,7)]\r\n self.assertEqual(set(second_step_transitions), {\r\n ((0.64, False), env.locations_to_state(((0, 2), (5, 7))), REWARD_OF_LIVING + REWARD_OF_GOAL, True),\r\n # (RIGHT, UP)\r\n ((0.08, False), env.locations_to_state(((1, 1), (5, 7))), REWARD_OF_LIVING, False), # (DOWN, UP)\r\n ((0.08, False), env.locations_to_state(((0, 1), (5, 7))), REWARD_OF_LIVING, False), # (UP, UP)\r\n ((0.08, False), env.locations_to_state(((0, 2), (6, 7))), REWARD_OF_LIVING, False), # (RIGHT, RIGHT)\r\n ((0.08, False), env.locations_to_state(((0, 2), (6, 6))), REWARD_OF_LIVING, False), # (RIGHT, LEFT)\r\n ((0.01, False), env.locations_to_state(((1, 1), (6, 7))), REWARD_OF_LIVING, False), # (DOWN, RIGHT)\r\n ((0.01, False), env.locations_to_state(((1, 1), (6, 6))), REWARD_OF_LIVING, False), # (DOWN, LEFT)\r\n ((0.01, False), env.locations_to_state(((0, 1), (6, 7))), REWARD_OF_LIVING, False), # (UP, RIGHT)\r\n ((0.01, False), env.locations_to_state(((0, 1), (6, 6))), REWARD_OF_LIVING, False) # (UP, LEFT)\r\n })", "def construct_map_coordinates(self):\n for y in range(len(self.island_map)):\n iteration = iter(self.island_map)\n length = len(next(iteration))\n if not all(len(list) == length for list in iteration):\n raise ValueError('Inconsistent line length')\n if self.island_map[y][0] != 'O' and self.island_map[y][1] == 'O':\n print(self.island_map[y][0], self.island_map[y][1])\n raise ValueError('Bad boundary')\n\n self.construct_vertical_coordinates(h_axis=y)", "def ia_reflexion(data_ia, data_map):\n ia = data_ia['ia_id']\n enemy = data_ia['enemy_id']\n commands = {}\n\n new_positions = []\n moved_units = []\n\n for ia_unit in data_ia[ia]:\n unit_has_attacked = False\n unit_targets = []\n\n for enemy_unit in data_ia[enemy]:\n # Find each possible target for the Dwarves.\n if data_ia[ia][ia_unit][0] == 'D':\n if (ia_unit[0] - 1) <= enemy_unit[0] <= (ia_unit[0] + 1) and (ia_unit[1] - 1) <= enemy_unit[1] <= (ia_unit[1] + 1):\n # Add the unit to the target list.\n unit_targets.append(enemy_unit)\n\n # Find each possible target for the Elves - ATTACK\n else:\n for i in range(2):\n if (ia_unit[0] - (1 + i)) <= enemy_unit[0] <= (ia_unit[0] + (1 + i)) and (ia_unit[1] - (1 + i)) <= enemy_unit[1] <= (ia_unit[1] + (1 + i)):\n # Add the unit to the target list.\n unit_targets.append(enemy_unit)\n\n # Find the weakest units.\n if unit_targets:\n target = unit_targets[0]\n for enemy_unit in unit_targets:\n if data_ia[enemy][enemy_unit][0] == 'D' or data_ia[enemy][enemy_unit][1] < data_ia[enemy][target][1]:\n target = enemy_unit\n\n # Write the attack.\n commands[data_ia[ia][ia_unit][2]] = [ia_unit, ' -a-> ', target]\n unit_has_attacked = True\n\n # Find the weakest of all enemy's units - MOVE\n if not unit_has_attacked:\n target_list = data_ia[enemy].keys()\n target = target_list[0]\n\n for enemy_unit in data_ia[enemy]:\n if data_ia[enemy][enemy_unit][0] == 'D' or data_ia[enemy][enemy_unit][1] < data_ia[enemy][target][1]:\n target = enemy_unit\n\n target_cell = [ia_unit[0], ia_unit[1]]\n # Move on Y axis\n if target and abs(ia_unit[1] - target[1]) > abs(ia_unit[0] - target[0]) and 1 <= ia_unit[0] <= data_map['map_size'] and 1 <= ia_unit[1] <= data_map['map_size']:\n if ia_unit[1] > target[1]:\n target_cell[1] -= 1\n else:\n target_cell[1] += 1\n # Move on X axis\n elif target and 1 <= ia_unit[0] <= data_map['map_size'] and 1 <= ia_unit[1] <= data_map['map_size']:\n if ia_unit[0] > target[0]:\n target_cell[0] -= 1\n else:\n target_cell[0] += 1\n\n new_target = False\n # Check if he can move on the targeted position.\n enemy_positions = data_ia[enemy].keys()\n ia_positions = data_ia[ia].keys()\n for units in moved_units:\n del ia_positions[ia_positions.index(units)]\n\n # If the units can't move, find another free cell.\n if target_cell in (new_positions or enemy_positions or ia_positions):\n new_target_cells = []\n for line in range(target_cell[0] - 1, target_cell[0] + 2):\n for column in range(target_cell[1] - 1, target_cell[1] + 2):\n\n # Append the possible free cell to the list.\n if (line, column) not in (new_positions or enemy_positions or ia_positions):\n new_target_cells.append((line, column))\n\n # Choose the nearest free cell.\n if new_target_cells:\n new_target = new_target_cells[0]\n for cell in new_target_cells:\n if abs(ia_unit[0] - cell[0]) + abs(ia_unit[1] - cell[1]) < abs(ia_unit[0] - new_target[0]) + abs(ia_unit[1] - new_target[1]):\n new_target = new_target_cells[new_target_cells.index(cell)]\n\n # Save the new target in the correct variable.\n if new_target:\n target_cell = new_target\n\n # Write the move\n if target_cell != ia_unit:\n commands[data_ia[ia][ia_unit][2]] = [ia_unit, ' -m-> ', target_cell]\n new_positions.append(target_cell)\n moved_units.append(ia_unit)\n\n return commands", "def generate_all_locations(grid, shape):", "def enemy_start_locations(self) -> List[Point2]:\n return self._game_info.start_locations", "def makeLocationMap(self):\n\t\tlocationMap = [[(0,0) for i in range(self.numRects)] for j in range(self.numRects)]\n\t\tstartTop = 0.5*(self.height-self.numRects*self.angle*self.rectWidth)\n\t\tstartLeft = (self.width/2)-(self.numRects/2)*self.rectWidth\n\t\tfor row in enumerate(self.makeIndexMap()):\n\t\t\tfor col in row[1]:\n\t\t\t\tlocationMap[col[0]][col[1]] = \\\n\t\t\t\t\t(startLeft+(col[0]+col[1])*self.rectWidth/2,\n\t\t\t\t\tstartTop+(row[0]+1)*0.5*self.angle*self.rectWidth)\n\t\treturn locationMap", "def get_goat_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_goat():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def _generate_absolute_location_action(ui_object_list):\n action_list = []\n for grid_direction_str, grid_num in _LOCATION_GRID_DICT.items():\n grid_objects_idx = [\n i for i in range(len(ui_object_list))\n if ui_object_list[i].grid_location == grid_num\n ]\n # If only one ui object locates in this grid, an action will be generated.\n if len(grid_objects_idx) == 1:\n object_in_grid = ui_object_list[grid_objects_idx[0]]\n action_list.extend(\n _generate_absolute_location_rule_action(object_in_grid,\n grid_objects_idx[0],\n grid_direction_str))\n return action_list", "def create_path(self, board: np.array, my_location: tuple, goal_location: tuple) -> bool:\n to_visit = [my_location]\n visited = []\n\n came_from = dict()\n came_from[my_location] = None\n\n came_from_direction = dict()\n came_from_direction[my_location] = Directions.ZERO\n\n while to_visit:\n point = to_visit.pop(0)\n if point == goal_location: break\n for direction in Directions.NEIGHBOURS:\n # By making it a numpy can add the values\n new_point = tuple(np.array(point) + direction.array)\n\n # Either the row or column value is not on the board\n if not self.in_bounds(new_point):\n continue\n\n # Has already visited that point\n if new_point in visited:\n continue\n\n # Can it reach this point? -> Yes the add to visit list\n if self.check_direction(board, point, direction):\n to_visit.append(new_point)\n came_from[point] = new_point\n came_from_direction[point] = direction\n\n visited.append(point)\n return self.reverse_path(came_from, came_from_direction, goal_location)", "def solution(self):\n return [node.move for node in self.path()[1:]]", "def get_steps_from_position_list(position_list):\n\n step_list = []\n\n for i in range(len(position_list) - 1):\n current_position = position_list[i]\n next_position = position_list[i+1]\n if (current_position.x == next_position.x and \n current_position.y == next_position.y):\n continue\n\n #Cardinal directions\n if (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == 0):\n next_action = \"right\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == 0):\n next_action = \"left\"\n elif (next_position.x - current_position.x == 0 and \n next_position.y - current_position.y == 1):\n next_action = \"up\"\n elif (next_position.x - current_position.x == 0 and \n next_position.y - current_position.y == -1):\n next_action = \"down\"\n\n #Extended directions\n elif (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == 1):\n next_action = \"up_right\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == 1):\n next_action = \"up_left\"\n elif (next_position.x - current_position.x == -1 and \n next_position.y - current_position.y == -1):\n next_action = \"down_left\"\n elif (next_position.x - current_position.x == 1 and \n next_position.y - current_position.y == -1):\n next_action = \"down_right\"\n next_step = Step(current_position, next_action)\n step_list.append(next_step)\n \n if len(step_list) == 0:\n next_step = Step(position_list[0], \"stay\")\n step_list.append(next_step)\n\n return step_list", "def traverse_map(map, x_step, y_step):\n trees_hit = 0\n map_depth = len(map)\n y_steps = range(0,map_depth,y_step)\n for j,step in enumerate(y_steps):\n trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0\n return trees_hit", "def get_valid_locations(location_list, grid, shape):", "def find_path(maze_map, start, target):\n path = [] # path list\n tried = set() # set for faster membership checks\n done = False\n curr_tile = start\n while not done:\n if curr_tile == target:\n done = True # if at target tile, we are done\n else:\n options = [ # possible moves\n (curr_tile[0] + 1, curr_tile[1]),\n (curr_tile[0] - 1, curr_tile[1]),\n (curr_tile[0], curr_tile[1] + 1),\n (curr_tile[0], curr_tile[1] - 1)\n ]\n test = (abs(target[0] - start[0]), abs(target[1] - start[0]))\n prefer = test.index(max(test[0], test[1]))\n if prefer == 0:\n options.sort(key=lambda x: x[0], reverse=True)\n else:\n options.sort(key=lambda x: x[1], reverse=True)\n backtrack = True # assume we must backtrack\n for opt in options:\n try:\n if maze_map[opt[0]][opt[1]] not in ('x', ) and opt not in tried:\n backtrack = False # if we haven't tried this option before, and it's not blocked\n path.append(opt) # then add to the path, and remember that it's been tried\n tried.add(opt)\n curr_tile = opt\n break\n except IndexError:\n continue\n if backtrack: # backtrack to the previous position in the path\n curr_tile = path.pop()\n return path", "def on_global_trajectory(self, msg):\n self._logger.debug('@{}: global trajectory has {} waypoints'.format(\n msg.timestamp, len(msg.data)))\n if len(msg.data) > 0:\n # The last waypoint is the goal location.\n self._goal_location = msg.data[-1][0].location\n else:\n # Trajectory does not contain any waypoints. We assume we have\n # arrived at destionation.\n self._goal_location = self._vehicle_transform.location\n assert self._goal_location, 'Planner does not have a goal'\n self._waypoints = deque()\n for waypoint_option in msg.data:\n self._waypoints.append(waypoint_option[0])\n self._prev_waypoints = self._waypoints", "def mark_route(self, route):\n WPT = []\n if len(route) > 0:\n x = self.xStart\n y = self.yStart\n self.MAP[y][x] = 2\n WPT.append([x,y])\n for i in range(len(route)):\n j = int(route[i])\n x += self.dx[j]\n y += self.dy[j]\n if i<len(route)-1:\n if route[i]==route[i+1]:\n self.MAP[y][x] = 3\n else:\n self.MAP[y][x] = 4\n WPT.append([x,y])\n self.MAP[y][x] = 5\n WPT.append([x,y])\n \n return WPT", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):\n self.goal=(1,1)\n self.goals=[]\n self.walls = gameState.getWalls()\n self.startState = gameState.getPacmanPosition()\n if start != None: self.startState = start\n\n n=0\n try:\n for j in range(1, 40):\n n=j\n x=gameState.hasWall(1, j)\n except:\n n=n\n m=0\n try:\n for i in range(1, 40):\n m=i\n x=gameState.hasWall(i, 1)\n except:\n m=m\n print('maze dimension: ',m,'x',n)\n\n for i in range(1,m):\n for j in range(1,n):\n if (gameState.hasFood(i,j)):\n if(gameState.getNumFood()==1):\n self.goal=(i,j)\n else:\n x=(i,j)\n self.goals.append(x)\n\n #print('goals',self.getFoodPositions())\n self.costFn = costFn\n self.visualize = visualize\n #x=getFoodPosition(gameState)\n #print(\"food positions: \" )\n print(\"[R12] Initial position of pacman is \"+str(gameState.getPacmanPosition()))\n print(\"[R10] Number of foods is \"+str(gameState.getNumFood()))\n if(gameState.getNumFood()>1):\n print(\"[R10] Final goal positions are \", self.goals)\n else:\n print(\"[R10] Final goal position is \"+str(self.goals))\n print(\"[R11] Ghost Positions is/are \"+str(gameState.getGhostPositions()))\n print(\"[R15] has the game food? \"+str(gameState.hasFood(*goal)))\n if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):\n print('Warning: this does not look like a regular search maze')\n\n # For display purposes\n self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE", "def get_move_options(self, x, y):\r\n best = self._get_lowest_neighbor_value(x, y)\r\n moves = []\r\n for dx, dy in DijkstraMap.neighbors:\r\n tx, ty = x + dx, y + dy\r\n if self.point_in_map(tx, ty) and self.tiles[tx][ty] == best:\r\n moves.append( (dx, dy))\r\n return moves", "def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms==[]: return\n xcord=len(rooms)\n ycord=len(rooms[0])\n indexstack=[(i,j) for i in range(len(rooms)) for j in range(len(rooms[0])) if rooms[i][j] == 0]\n direction=[(0,1),(1,0),(0,-1),(-1,0)]\n gatenum=1\n while indexstack != []:\n newindex=[]\n for item in indexstack:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if 0<=xpoint <len(rooms) and 0<=ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=gatenum\n newindex.append((xpoint,ypoint))\n indexstack=newindex\n gatenum+=1\n ''''\n for item in index_0:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=1\n index_1.append((xpoint,ypoint))\n for item in index_1:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=2\n index_2.append((xpoint,ypoint))\n for item in index_2:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=3\n index_3.append((xpoint,ypoint))\n for item in index_3:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <=len(rooms) and ypoint<=len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=4\n #index_3.append((xpoint,ypoint))'''", "def __init__(self, map_obstacle, main_graph):\n\n self.map_obstacle = map_obstacle\n self.main_graph = main_graph\n\n self.sight_range = self.calculate_sight_range()\n\n self.top_left_y = None\n self.top_left_x = None\n self.bottom_right_y = None\n self.bottom_right_x = None\n self.height = None\n self.width = None\n self.size = self.calculate_size()\n\n # nodes specific to this threat zone\n self.nodes = []", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def __init__(self, free_map, agent_list):\n self.random = np.random\n self.free_map = free_map\n # initialize all the directions with False\n self.heading_right = [False] * len(agent_list) #: Attr to track directions.\n # self.heading_left = [False] * len(agent_list)\n self.heading_up = [False] * len(agent_list)\n # self.heading_down = [False] * len(agent_list)", "def _get_goal_info(self, last_info):\n start_ID = 4\n end_ID = start_ID + self.num_parts\n places = {}\n for ID in range(start_ID, end_ID):\n assert ID in last_info, f'something went wrong with ID={ID}'\n position, _, _ = last_info[ID]\n places[ID] = (position, (0, 0, 0, 1.))\n return places", "def solution_path(self) -> list[State]:", "def drawMap(mapObj, gameStateObj, goals, screen):\n \n # mapSurf will be the single Surface object that the tiles are drawn\n # on, so that it is easy to position the entire map on the DISPLAYSURF\n # Surface object. First, the width and height must be calculated.\n # mapWidth = len(mapObj) * TILEWIDTH\n # mapSurfHeight = (len(mapObj[0]) - 1) * TILEFLOORHEIGHT + TILEHEIGHT\n # mapSurf = pygame.Surface((mapSurfWidth, mapSurfHeight))\n # mapSurf.fill(BGCOLOR) # start with a blank color on the surface.\n \n for i in xrange(len(tiles)):\n tiles[i].hideturtle()\n \n debugprint(\"drawing map\")\n \n nxtiles = len(mapObj)\n nytiles = len(mapObj[0])\n \n xoffset = TILEWIDTH/2 + TILEWIDTH\n yoffset = WINHEIGHT - TILEHEIGHT/2 - TILEWIDTH\n \n tileCount = 0;\n \n def updateTile(screen, xpos, ypos, shape):\n global tiles\n \n if tileCount >= len(tiles):\n tiles.append(Tile(screen, xpos, ypos, shape))\n else:\n tiles[tileCount].goto(xpos, ypos)\n tiles[tileCount].shape(shape)\n tiles[tileCount].showturtle()\n\n return tileCount + 1\n \n # screen.tracer(1)\n # # Draw the tile sprites onto this surface.\n for x in range(nxtiles):\n for y in range(nytiles):\n xpos = x*TILEWIDTH + xoffset\n ypos = yoffset - y*40\n \n if mapObj[x][y] in TILEMAPPING:\n baseTile = TILEMAPPING[mapObj[x][y]]\n elif mapObj[x][y] in OUTSIDEDECOMAPPING:\n baseTile = TILEMAPPING[' ']\n\n # First draw the base ground/wall tile.\n tileCount = updateTile(screen, xpos, ypos, baseTile)\n # debugprint(xpos)\n # debugprint(ypos)\n if mapObj[x][y] in OUTSIDEDECOMAPPING:\n # Draw any tree/rock decorations that are on this tile.\n tileCount = updateTile(screen,xpos,ypos,OUTSIDEDECOMAPPING[mapObj[x][y]])\n elif (x, y) in gameStateObj['stars']:\n if (x, y) in goals:\n # A goal AND star are on this space, draw goal first.\n tileCount = updateTile(screen,xpos,ypos,IMAGESDICT['covered goal'])\n # Then draw the star sprite.\n tileCount = updateTile(screen,xpos,ypos,IMAGESDICT['star'])\n elif (x, y) in goals:\n # Draw a goal without a star on it.\n tileCount = updateTile(screen,xpos,ypos,IMAGESDICT['uncovered goal'])\n\n # Last draw the player on the board.\n if (x, y) == gameStateObj['player']:\n # Note: The value \"player_image\" refers\n # to a key in \"PLAYERIMAGES\" which has the\n # specific player image we want to show.\n tileCount = updateTile(screen,xpos,ypos,PLAYERIMAGES[game_state[\"player_image\"]])\n debugprint(PLAYERIMAGES[game_state[\"player_image\"]])", "def multi_goal_given(self):\n goals = set(self.goals)\n for start in self.starts:\n yield Grid2DProblem(self.space, set([start]), goals)", "def get_locations(nodes, tl, br):\n \n # Base cases:\n if len(nodes) == 1: # for singleton, only choice is to place in the single spot in 1x1 square\n return {nodes[0]: tl}\n if len(nodes) == 2: # for two nodes, arbitrarily chose to place the first node in top left\n return {nodes[0]: tl, nodes[1]: br}\n\n # Recursive case, need to create and solve subproblems:\n ret = {}\n\n num_edges = count_num_edges(nodes)\n if num_edges == 0: # for empty graphs, no need to run METIS, just assign arbitrarily\n i = 0\n for x in range(tl.x, br.x+1): \n for y in range(tl.y, br.y+1):\n if i < len(nodes):\n ret.update({nodes[i]: Point(x,y)})\n i += 1\n return ret\n\n filename = splitext(basename(sys.argv[1]))[0] + '.p.' + sys.argv[2] + '.yx.' + sys.argv[3] + '.drop.' + sys.argv[4] + '.' +\\\n '_'.join(['delete', str(tl.x), str(tl.y), str(br.x), str(br.y)]) \n\n # special case for the very first call of get_locations. For example, suppose that there are\n # 97 nodes on a 10x10 grid. Instead of dividing the 97 nodes into 2 equal partitions, we should\n # divide them into a partition of 90 nodes and a partition of 7 nodes. The former should be\n # placed on a 10x9 grid and te latter should be placed on a 1x7 grid.\n if len(nodes) < (br.x - tl.x + 1) * (br.y - tl.y + 1):\n assert tl == Point(0, 0)\n size_tl_nodes = (br.x + 1) * int(len(nodes) / (br.x + 1))\n if size_tl_nodes == len(nodes):\n ret.update(get_locations(nodes, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n return ret\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n # complicated indexing here. As an example, for the 97 into 10x10 case, we want to send 90 nodes\n # to a rectangle spanned by tl=Point(0, 0) and br=Point(9, 8) and we want to send 7 nodes to a \n # rectangle spanned by tl=Point(0, 9) and br=Point(6, 9)\n ret.update(get_locations(nodes_tl, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n ret.update(get_locations(nodes_br, tl=Point(0, len(nodes) / (br.x + 1)), br=Point(len(nodes) % (br.x + 1) - 1, len(nodes) / (br.x + 1))))\n return ret\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n half = tl.x + (br.x - tl.x - 1) / 2\n size_tl_nodes = (half - tl.x + 1) * (br.y - tl.y + 1)\n else: # split on x axis\n half = tl.y + (br.y - tl.y - 1) / 2\n size_tl_nodes = (br.x - tl.x + 1) * (half - tl.y + 1)\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(half, br.y)))\n ret.update(get_locations(nodes_br, tl=Point(half + 1,tl.y), br=br))\n else: # split on x axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(br.x, half)))\n ret.update(get_locations(nodes_br, tl=Point(tl.x, half + 1), br=br))\n\n return ret", "def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def build_compass_map():\n\n for i in range(0, 100):\n # Add bears\n if ENEMY_LIST[i] == 1:\n HAS_COMPASS_MAP.append(COMPASS_DICT[3])\n # Add Grizzly bear\n elif ENEMY_LIST[i] == 2:\n HAS_COMPASS_MAP.append(COMPASS_DICT[4])\n # Add water spots\n elif GROUND_FEATURES_LIST[i] == 10:\n HAS_COMPASS_MAP.append(COMPASS_DICT[1])\n # Add Big Trees\n elif GROUND_FEATURES_LIST[i] == 11:\n HAS_COMPASS_MAP.append(COMPASS_DICT[2])\n # Add nothings\n else:\n HAS_COMPASS_MAP.append(COMPASS_DICT[5])", "def assign_dropoffs(G, path, home_idxs):\n locations_on_path = set(path)\n dropoffs = collections.defaultdict(list)\n # print(locations_on_path)\n for h in home_idxs:\n # print(f'DISTANCES FOR {h}', all_pairs_dists[h])\n closest_loc_on_path = min(locations_on_path, key=lambda loc: all_pairs_dists[h][loc])\n dropoffs[closest_loc_on_path].append(h)\n return dropoffs", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def pathfinder(starting_position: tuple, target_position: tuple, grid: np.ndarray) -> List[tuple] or None:\n moves_dict = {(1, 0): \"DOWN\", (-1, 0): \"UP\", (0, 1): \"RIGHT\", (0, -1): \"LEFT\"}\n\n moves = []\n path = []\n dead_ends = []\n\n def rate_position(current, target):\n \"\"\"\n Helper function to calculate distance to target\n \"\"\"\n return (target[0] - current[0]) ** 2 + (target[1] - current[1]) ** 2\n\n # Setting starting position\n current_position = starting_position\n while current_position != target_position:\n possible_moves = {}\n # Checking for each possible move and rating them\n for m in moves_dict.keys():\n if check_valid_move(grid, current_position, m):\n new_position = tuple(np.add(current_position, m))\n new_position_rating = rate_position(new_position, target_position)\n if new_position not in path and new_position not in dead_ends:\n possible_moves[new_position_rating] = m\n\n # if there are possible move, select the one, that would move us the closest to target\n if possible_moves:\n path.append(current_position) # save position to path\n moves.append(possible_moves[min(possible_moves)]) # save move to move list\n current_position = tuple(np.add(current_position, possible_moves[min(possible_moves)]))\n # if not, go back one move and add current position to dead ends\n else:\n # if no moves available from the start, return None\n if current_position == starting_position:\n return None\n dead_ends.append(current_position) # save position to dead ends\n current_position = path[-1] # move back one step\n path.pop(-1) # delete step from path\n moves.pop(-1) # delete move from move list\n\n return [tuple(moves_dict[move] for move in moves)]", "def adjPaths(imgR,location):\n directions = [(1,0),(-1,0),(0,1),(0,-1)] # up, down, left, right \n possiblePaths = [] \n for direction in directions:\n iPlus,jPlus = direction\n if imgR[location[0]+iPlus,location[1]+jPlus] == 0: \n possiblePaths.append(direction)\n return possiblePaths", "def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n \"\"\"Returns the number of obstacles (wriggler segments or walls) between\n coord_a and coord_b.\n \n This function assumes that coord_b is larger (in either/both x and y)\n than coord_a.\n \"\"\"\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count\n\n\n head_coord = state.wriggler_list[0].get_head()\n tail_coord = state.wriggler_list[0].get_tail()\n \n head_manhattan_distance = get_manhattan_distance(head_coord, self.goal_coord)\n tail_manhattan_distance = get_manhattan_distance(tail_coord, self.goal_coord)\n \n # Calculate and return heuristic value depending on which heuristic to use\n if self.heuristic == Heuristic.MANHATTAN_DIST:\n # Return the shortest Manhattan distance of wriggler0's tail or head to the goal\n return min(head_manhattan_distance, tail_manhattan_distance)\n \n else: # self.heuristic == Heuristic.NUM_OBSTACLES:\n # Return the number of obstacles between wriggler0's tail/head to the goal\n # The tail/head is selected based on which is closer to the goal\n if head_manhattan_distance <= tail_manhattan_distance:\n # The head is closer or the same distance away\n return get_num_obstacles(head_coord, self.goal_coord)\n \n else:\n # The tail is closer\n return get_num_obstacles(tail_coord, self.goal_coord)", "def find_working_paths(paths: Iterable[tuple], starting_position: tuple, grid: np.ndarray) -> List[tuple]:\n moves_dict = {(1, 0): \"DOWN\", (-1, 0): \"UP\", (0, 1): \"RIGHT\", (0, -1): \"LEFT\"}\n successful_paths = []\n for path in paths:\n # starting from starting position\n new_pos = starting_position\n path_check = []\n for move in path:\n # moving to new position and checking if it is valid\n new_pos = tuple(np.add(new_pos, move))\n path_check.append(True if grid[new_pos] != 'x' else False)\n\n # if all moves are valid, adding path to success list\n if all(path_check):\n successful_paths.append(tuple(moves_dict[move] for move in path))\n\n return successful_paths", "def navigate(locations: TravelMap, comp_func: Callable) -> Tuple[int, Dict[str, int]]:\n best_dist = comp_func(float(\"inf\"), float(\"-inf\")) * -1\n best_path = {}\n for loc in locations:\n new_dist, new_path = nav_helper(locations, loc, comp_func)\n if new_dist == comp_func(new_dist, best_dist):\n best_dist = new_dist\n best_path = new_path\n return best_dist, best_path", "def test_for_grader():\n test_map1 = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 1, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n x_spacing1 = 1\n y_spacing1 = 1\n start1 = np.array([[1.5], [1.5], [0]])\n goal1 = np.array([[7.5], [1], [0]])\n path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)\n s = 0\n for i in range(len(path1)-1):\n s += np.sqrt((path1[i][0]-path1[i+1][0])**2 + (path1[i][1]-path1[i+1][1])**2)\n print(\"Path 1 length:\")\n print(s)\n\n\n test_map2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n start2 = np.array([[0.4], [0.4], [1.5707963267948966]])\n goal2 = np.array([[0.4], [1.8], [-1.5707963267948966]])\n x_spacing2 = 0.2\n y_spacing2 = 0.2\n path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)\n s = 0\n for i in range(len(path2)-1):\n s += np.sqrt((path2[i][0]-path2[i+1][0])**2 + (path2[i][1]-path2[i+1][1])**2)\n print(\"Path 2 length:\")\n print(s)", "def ai_pass(self, team_players, enemy_team_players):\n\n # Invert coordinates\n team_pos = {player.id: P(player.pos.x, H-player.pos.y)\n for player in team_players}\n enemy_team_pos = {player.id: P(player.pos.x, H-player.pos.y)\n for player in enemy_team_players}\n self_pos = P(self.pos.x, H-self.pos.y)\n\n prefs = { # directions are wrt origin at bottom-right\n 'SHOOT_A': {'priority': {1: 4, 2: 1}, 'angle': math.pi, 'dir': P(-1, 0)},\n 'SHOOT_Q': {'priority': {1: 3, 2: 1}, 'angle': math.pi*3/4, 'dir': P(-1, 1)},\n 'SHOOT_Z': {'priority': {1: 3, 2: 1}, 'angle': -math.pi*3/4, 'dir': P(-1, -1)},\n 'SHOOT_W': {'priority': {1: 2, 2: 2}, 'angle': math.pi/2, 'dir': P(0, 1)},\n 'SHOOT_X': {'priority': {1: 2, 2: 2}, 'angle': -math.pi/2, 'dir': P(0, -1)},\n 'SHOOT_E': {'priority': {1: 1, 2: 3}, 'angle': math.pi/4, 'dir': P(1, 1)},\n 'SHOOT_C': {'priority': {1: 1, 2: 3}, 'angle': -math.pi/4, 'dir': P(1, -1)},\n 'SHOOT_D': {'priority': {1: 1, 2: 4}, 'angle': 0, 'dir': P(1, 0)},\n }\n\n possible_passes = []\n\n for k, v in prefs.items():\n line = [ # Equation of line as A*x +B*y + C = 0\n math.sin(v['angle']), # x coeff\n -math.cos(v['angle']), # y coeff\n self_pos.y*math.cos(v['angle']) - self_pos.x * \\\n math.sin(v['angle']), # constant\n ]\n for player in team_players:\n if player.id != self.id:\n team_dist = self.dist_to_line(line, team_pos[player.id])\n if (team_dist < AI_MIN_PASS_DIST and # player is near enough to receive the ball\n # In correct x-direction (not behind the line)\n (self_pos.x - team_pos[player.id].x)*v['dir'].x <= 0 and\n (self_pos.y - team_pos[player.id].y)*v['dir'].y <= 0): # In correct y-direction\n\n # Consider enemy's distance as well\n enemy_dist = math.inf\n enemy_min_pos = P(0, 0)\n for enemy_player in enemy_team_players: # Check for all enemies\n if self.dist_to_line(line, enemy_team_pos[enemy_player.id]) < enemy_dist:\n enemy_dist = self.dist_to_line(\n line, enemy_team_pos[enemy_player.id])\n enemy_min_pos = enemy_team_pos[enemy_player.id]\n\n if (enemy_dist < team_dist and # enemy is nearer than team player\n # In correct x-direction (not behind the line)\n (self.pos.x - enemy_min_pos.x)*v['dir'].x <= 0 and\n (self.pos.y - enemy_min_pos.y)*v['dir'].y <= 0): # In correct y-direction\n continue\n else:\n possible_passes.append(\n (v['priority'][self.team_id],\n team_dist,\n k,\n enemy_dist\n )\n )\n\n # Sort by priority then distance\n if possible_passes != []:\n ai_pass = sorted(possible_passes)[0][2]\n else:\n ai_pass = 'NOTHING'\n\n return ai_pass", "def get_route(nodes, goalParentId):\n print(\"%%%%%%%%%%%%%%%%%%%\")\n route = []\n ok = False\n while not ok:\n for node in nodes:\n if( node.myId == goalParentId ):\n route.insert(0, node)\n node.dump()\n goalParentId = node.parentId\n if( goalParentId == -2):\n print(\"%%%%%%%%%%%%%%%%%\")\n ok = True\n return route", "def ant_colony(map, alpha=3, beta=4, m=10, rho=0.2, q=1, its_max=20):\n n = len(map)\n tau = np.ones((n, n))\n eta = 1/map.D\n for i in range(n):\n eta[i, i] = 0\n paths_array = np.zeros((m, n), int)\n its = 0\n path_best = np.zeros((its_max, n), int)\n distance_best = np.zeros(its_max)\n\n while its < its_max:\n paths_length = np.zeros(m)\n for i in range(m):\n source = np.random.randint(n)\n visited = []\n unvisited = list(range(n))\n node_now = source\n node_next = -1\n paths_array[i, 0] = source\n\n for j in range(1, n):\n visited.append(node_now)\n unvisited.remove(node_now)\n prob_roulette = np.array([0]*n, dtype=float)\n for k in unvisited:\n prob_roulette[k] = (pow(tau[node_now, k], alpha)\n * pow(eta[node_now, k], beta))\n prob_roulette = prob_roulette/sum(prob_roulette)\n cum_roulette = prob_roulette.cumsum()\n cum_roulette -= np.random.uniform(0, 1)\n node_next = list(cum_roulette >= 0).index(True)\n paths_array[i, j] = node_next\n paths_length[i] += map.D[node_now, node_next]\n node_now = node_next\n paths_length[i] += map.D[node_now, source]\n\n if its == 0:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n else:\n if distance_best[its-1] < paths_length.min():\n distance_best[its] = distance_best[its-1]\n path_best[its] = path_best[its-1].copy()\n else:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n\n add_tau = np.zeros((n, n))\n\n for i in range(m):\n for j in range(n):\n row = paths_array[i, j]\n col = paths_array[i, (j+1) % n]\n add_tau[row][col] += q/paths_length[i]\n\n tau = (1 - rho)*tau + add_tau\n\n its += 1\n\n return Hamiltonian(path_best[-1], map)", "def create_graph(some_map):\n\n map_height = some_map.height\n map_width = some_map.width\n map_obstacles = some_map.obstacles\n\n nodes = [[None] * map_width] * map_height\n\n # create a node representing each position on the map\n for y in range(0, map_height):\n for x in range(0, map_width):\n position = (y, x)\n\n # create a node describing this position\n node = Node(position=position)\n\n # store it on the graph\n nodes[y][x] = node\n\n # look through all moving characters, non-moving characters, and items\n for map_obstacle in map_obstacles:\n # all characters must start somewhere\n node = nodes[map_obstacle.y][map_obstacle.x]\n\n # store the map_obstacle on this node.\n node.contents.add(map_obstacle)\n\n # only create threat zones for moving/turning entities\n if map_obstacle.can_move() or map_obstacle.can_turn_without_moving():\n threat_zone = ThreatZone(map_obstacle, nodes, some_map)\n threat_zone.mark_nodes_as_members_of_threat_zone()\n\n some_map.nodes = nodes\n\n return nodes", "def process_wire_path(wire_path_data):\n x = 0\n y = 0\n step_count = 0\n travel_map = {}\n x_dict = {'L': -1, 'R': 1, 'U': 0, 'D': 0}\n y_dict = {'L': 0, 'R': 0, 'U': 1, 'D': -1}\n \n for item in wire_path_data:\n direction, steps = split_instruction(item)\n check_condition(direction, steps)\n for _ in range(steps):\n x += x_dict[direction]\n y += y_dict[direction]\n step_count += 1 \n if (x, y) not in travel_map:\n travel_map[(x, y)] = step_count\n \n return travel_map" ]
[ "0.6306571", "0.62811714", "0.62330747", "0.6082783", "0.6031487", "0.59971946", "0.59853303", "0.59514654", "0.5945449", "0.58769524", "0.5866137", "0.5830052", "0.57840186", "0.5759361", "0.57322335", "0.5720972", "0.57009214", "0.56796384", "0.56514007", "0.5645225", "0.56407934", "0.56248903", "0.5575719", "0.5546346", "0.5524988", "0.5523357", "0.5517831", "0.5440602", "0.5423661", "0.53993726", "0.53957564", "0.539517", "0.53896916", "0.5388017", "0.5381743", "0.5373748", "0.5353361", "0.5348458", "0.5339007", "0.533388", "0.53216696", "0.53172356", "0.53114355", "0.52948093", "0.52947015", "0.5291073", "0.5290201", "0.5289015", "0.5281698", "0.5281698", "0.52812755", "0.52795017", "0.5272628", "0.5272151", "0.5271807", "0.52686083", "0.52347106", "0.5225843", "0.52228224", "0.5215837", "0.5215245", "0.5213851", "0.5208779", "0.5207437", "0.5190196", "0.5179602", "0.5174275", "0.5169961", "0.5169464", "0.5168014", "0.5166701", "0.5165011", "0.5164657", "0.51591086", "0.51541", "0.5148392", "0.5146427", "0.5145776", "0.5138672", "0.51357967", "0.5135158", "0.51328576", "0.5132005", "0.51265484", "0.51225466", "0.512163", "0.51125735", "0.5108041", "0.51033485", "0.5098685", "0.50971895", "0.50971746", "0.50958866", "0.5095533", "0.50941694", "0.50906694", "0.5088879", "0.5088555", "0.50871885", "0.5086893" ]
0.5904296
9
Prepare paths specified as config. The input is a list of either strings, or 2tuples (source, target). Where single strings are supplied, the basenames are used as targets. Where targets are given explicitly, they must not be absolute paths. Returns a list of 2tuples, or throws ConfigError if something is wrong in the input.
def process_path_specs(specs): processedSpecs = [] for spec in specs: if not isinstance(spec, (list, tuple)): source = spec target = None elif len(spec) != 2: raise ConfigError("path spec must be a list or tuple of " "length two") else: source, target = spec source = os.path.normpath(source) if not target: target = os.path.basename(source) elif os.path.isabs(target): raise ConfigError("target path for include file may not be " "an absolute path") processedSpecs.append((source, target)) return processedSpecs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_paths(src, dst, paths, *, exclude=None):\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files", "def cfgPathToList( arg ):\n from types import StringTypes\n listPath = []\n if type( arg ) not in StringTypes:\n return listPath\n while arg.find( '/' ) == 0:\n arg = arg[1:]\n return arg.split( '/' )", "def _copy_paths(self, paths, source, destination, output_path,\r\n final_path=None):\r\n for path in paths:\r\n if final_path:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, final_path))\r\n else:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, path))", "def _resolvePathPatterns(self, sources, source):\n kept = []\n pattern = re.compile(source['pathPattern'])\n basedir = self._basePath / source['path']\n if (self._basePath.name == Path(self._largeImagePath).name and\n (self._basePath.parent / source['path']).is_dir()):\n basedir = self._basePath.parent / source['path']\n basedir = basedir.resolve()\n for entry in basedir.iterdir():\n match = pattern.search(entry.name)\n if match:\n if entry.is_file():\n kept.append((entry.name, entry, match))\n elif entry.is_dir() and (entry / entry.name).is_file():\n kept.append((entry.name, entry / entry.name, match))\n for idx, (_, entry, match) in enumerate(sorted(kept)):\n subsource = copy.deepcopy(source)\n # Use named match groups to augment source values.\n for k, v in match.groupdict().items():\n if v.isdigit():\n v = int(v)\n if k.endswith('1'):\n v -= 1\n if '.' in k:\n subsource.setdefault(k.split('.', 1)[0], {})[k.split('.', 1)[1]] = v\n else:\n subsource[k] = v\n subsource['path'] = entry\n for axis in self._axesList:\n stepKey = '%sStep' % axis\n valuesKey = '%sValues' % axis\n if stepKey in source:\n if axis in source or valuesKey not in source:\n subsource[axis] = subsource.get(axis, 0) + idx * source[stepKey]\n else:\n subsource[valuesKey] = [\n val + idx * source[stepKey] for val in subsource[valuesKey]]\n del subsource['pathPattern']\n sources.append(subsource)", "def build_path_pairs(self):\n\n if self.source_paths is None:\n\n raise ValueError(\"self.source_paths uninitialized!\")\n\n for source_path in self.source_paths:\n\n for block_data_dir in data_settings.BLOCK_DATA_DIRS:\n\n block_id = os.path.split(block_data_dir)[-1]\n\n source_data_dir, filename = os.path.split(source_path)\n containing_dir = os.path.split(source_data_dir)[-1]\n\n if not containing_dir in [block_id, data_settings.GRANULE]:\n\n continue\n\n block_data_path = os.path.join(block_data_dir, filename)\n self.path_pairs.append((source_path, block_data_path))", "def in_filepath_list(class_paths: List[str]) -> List:\n registry, not_founds = build_registry(class_paths)\n builder = FilepathListBuilder()\n source = builder.build(registry)\n\n return [source, not_founds]", "def _GetFilePairs(config):\n\n ret = []\n\n has_bazel_genfiles = os.path.exists(\"bazel-bin\")\n\n for filename in config.file_list:\n target = os.path.join(config.package_name, filename)\n generated = os.path.join(config.package_name, config.pattern % filename)\n if has_bazel_genfiles:\n generated = os.path.join(\"bazel-bin\", generated)\n\n # Generated files should always exist. Blaze should guarantee this before\n # we are run.\n if not os.path.isfile(generated):\n print(\"Generated file '%s' does not exist.\" % generated)\n print(\"Please run this command to generate it:\")\n print(\" bazel build %s:%s\" % (config.package_name, config.target_name))\n sys.exit(1)\n ret.append(_FilePair(target, generated))\n\n return ret", "def targets(path, args):\n if args:\n return \" \".join([\"{0}{1}\".format(path, target) for target in args])", "def build_destination_files(destination, requested_paths):\n pathlib.Path(destination).resolve()\n longest_common_requested_path = longest_common_path_prefix(requested_paths)\n destination_files = [destination / path.relative_to(longest_common_requested_path) for path in requested_paths]\n existing_files = [path for path in destination_files if path.exists()]\n return destination_files, existing_files", "def handle_multiple_destinations(self):\n\n # Create the to-directory if it does not exist\n for destination in config.dest:\n if not path.exists(destination.dest):\n makedirs(destination.dest)\n\n # Clone the modules and copy the right directories\n for module in config.modules:\n Logger.assemble_module(module)\n\n directory = path.join(TEMP_DIR, module.name)\n remove_dir(directory)\n clone(module, directory)\n self.commit_hashes[module.name] = self.get_commit_hash(directory)\n\n for destination in config.dest:\n to_directory = path.join(destination.dest, module.name)\n remove_dir(to_directory)\n shutil.move(\n path.join(TEMP_DIR, module.name, destination.src), to_directory\n )", "def expand_paths(self, paths):\n \n expanded_paths = []\n if isinstance(paths, str): # A single path\n expanded = glob.glob(paths)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n elif isinstance(paths, list): # Multiple path\n for p in paths:\n expanded = glob.glob(p)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n else:\n _LOG.exception(\"Unknown input for the 'add' function.\")\n return expanded_paths", "def _resolve_target_sources(self, target_sources, extension=None, relative_to_target_base=False):\r\n resolved_sources = []\r\n for resolved in Target.resolve_all(target_sources):\r\n if hasattr(resolved, 'sources'):\r\n resolved_sources.extend(\r\n source if relative_to_target_base else os.path.join(resolved.target_base, source)\r\n for source in resolved.sources if not extension or source.endswith(extension)\r\n )\r\n return resolved_sources", "def _resolveFramePaths(self, sourceList):\n # we want to work with both _basePath / <path> and\n # _basePath / .. / <path> / <name> to be compatible with Girder\n # resource layouts.\n sources = []\n for source in sourceList:\n if source.get('pathPattern'):\n self._resolvePathPatterns(sources, source)\n else:\n self._resolveSourcePath(sources, source)\n for source in sources:\n if hasattr(source.get('path'), 'resolve'):\n source['path'] = source['path'].resolve(False)\n return sources", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def get_files(target_files, config):\n out = []\n find_fn = _find_file(config)\n for fname_in in target_files.keys():\n if isinstance(fname_in, (list, tuple)):\n fnames = fname_in\n else:\n fnames = fname_in.split(\";\")\n for fname in fnames:\n remote_fname = find_fn(fname)\n if remote_fname:\n if isinstance(remote_fname, (list, tuple)):\n out.extend(remote_fname)\n else:\n out.append(remote_fname)\n return out", "def resolve_specs(paths):\n specs = []\n for path in paths:\n if os.path.isdir(path):\n _, _, files = os.walk(path).next()\n specs.extend(os.path.join(path, fname) for fname in files)\n else:\n specs.append(path)\n return specs", "def configfiles(basename):\n dirs = (\"config\", \"config-\" + os.uname()[1].rsplit(\".\")[0])\n dirpaths = (join(d, basename) for d in dirs)\n realpaths = (join(scriptdir, d) for d in dirpaths)\n return [relpath(d) for d in realpaths]", "def buildReplaceList (remplacements, sourcePath, includes, excludes = [], destinationPath = None, replaceFilename = True):\n\tfrom os import sep\n\n\tdestinations = []\n\n\t# Analyzes the directories\n\tsources = scanAll(normalizePath(sourcePath), includes, excludes)[0]\n\n\t# If the destination directory is not defined\n\tif destinationPath == None:\n\t\tdestinations = sources[:]\n\telse:\n\t\tdestinations = []\n\n\t\t# Creation de la liste des fichiers de destination\n\t\tfor source in sources:\n\t\t\t# Gets the destination directory name\n\t\t\tdestination = normalizePath(destinationPath + sep + source[len (sourcePath):])\n\n\t\t\t# If file names are to be replaced\n\t\t\tif replaceFilename:\n\t\t\t\t# For each replacement to be made in the destination directory name\n\t\t\t\tfor i in remplacements:\n\t\t\t\t\t# Replaces values in line \n\t\t\t\t\tdestination = destination.replace(i[0], i[1])\n\n\t\t\t# Adding the directory to the list\n\t\t\tdestinations.append (normalizePath(destination))\n\treturn sources, destinations", "def config(c):\n for sp_ns in ns_foreach_task_subdir(c):\n try:\n sp_ns.tasks.config(c)\n except UnexpectedExit:\n pass", "def parse_targets(\n name=None, pkgs=None, sources=None, saltenv=\"base\", normalize=True, **kwargs\n):\n if \"__env__\" in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop(\"__env__\")\n\n if __grains__[\"os\"] == \"MacOS\" and sources:\n log.warning('Parameter \"sources\" ignored on MacOS hosts.')\n\n version = kwargs.get(\"version\")\n\n if pkgs and sources:\n log.error('Only one of \"pkgs\" and \"sources\" can be used.')\n return None, None\n\n elif \"advisory_ids\" in kwargs:\n if pkgs:\n log.error('Cannot use \"advisory_ids\" and \"pkgs\" at the same time')\n return None, None\n elif kwargs[\"advisory_ids\"]:\n return kwargs[\"advisory_ids\"], \"advisory\"\n else:\n return [name], \"advisory\"\n\n elif pkgs:\n if version is not None:\n log.warning(\n \"'version' argument will be ignored for multiple package targets\"\n )\n pkgs = _repack_pkgs(pkgs, normalize=normalize)\n if not pkgs:\n return None, None\n else:\n return pkgs, \"repository\"\n\n elif sources and __grains__[\"os\"] != \"MacOS\":\n if version is not None:\n log.warning(\n \"'version' argument will be ignored for multiple package targets\"\n )\n sources = pack_sources(sources, normalize=normalize)\n if not sources:\n return None, None\n\n srcinfo = []\n for pkg_name, pkg_src in sources.items():\n if __salt__[\"config.valid_fileproto\"](pkg_src):\n # Cache package from remote source (salt master, HTTP, FTP) and\n # append the cached path.\n srcinfo.append(__salt__[\"cp.cache_file\"](pkg_src, saltenv))\n else:\n # Package file local to the minion, just append the path to the\n # package file.\n if not os.path.isabs(pkg_src):\n raise SaltInvocationError(\n \"Path {} for package {} is either not absolute or \"\n \"an invalid protocol\".format(pkg_src, pkg_name)\n )\n srcinfo.append(pkg_src)\n\n return srcinfo, \"file\"\n\n elif name:\n if normalize:\n _normalize_name = __salt__.get(\n \"pkg.normalize_name\", lambda pkgname: pkgname\n )\n packed = {_normalize_name(x): version for x in name.split(\",\")}\n else:\n packed = {x: version for x in name.split(\",\")}\n return packed, \"repository\"\n\n else:\n log.error(\"No package sources provided\")\n return None, None", "def FindSources(env, dest, source, suffixes=None):\n for source_entry in env.Flatten(source):\n if type(source_entry) == str:\n # Search for matches for each source entry\n source_nodes = env.Glob(source_entry)\n else:\n # Source entry is already a file or directory node; no need to glob it\n source_nodes = [source_entry]\n for s in source_nodes:\n if str(s.__class__) == 'SCons.Node.FS.Dir':\n # Recursively search subdir. Since glob('*') doesn't match dot files,\n # also glob('.*').\n FindSources(env, dest, [s.abspath + '/*', s.abspath + '/.*'],\n suffixes)\n elif suffixes and s.suffix in suffixes:\n dest.add(s)", "def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])", "def validate(self, config):\n if not isinstance(config, list):\n config = [config]\n\n for conf in config:\n if not conf.get('path'):\n raise ConfigError('Camera needs a `path` to save files to.')\n \n return config", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def run_path_visualisation(paths, config, modulesConfig):\n all_targets = [os.path.basename(config[s][\"target\"]) for s in config.sections]\n all_target_tasks = {os.path.basename(config[s][\"target\"]):s for s in config.sections}\n \n added_tasks = []\n prepared_paths = []\n for path in paths:\n prepared_tasks = []\n for idx, task in enumerate(list(reversed(path))):\n s_module, s_name, *identifier = task.split(\" \")\n\n # Special Rule For Join Module To Have A Connection To Another Module\n special_connection = False\n if s_module == \"processing_join\":\n args = config[task]\n con_module, con_name, *identifier = all_target_tasks.get(os.path.basename(args[\"joinwith\"]), s_module+\"_SPECIAL \"+s_name+\"_SPECIAL\").split(\" \")\n special_connection = {\n \"connection_to_module\" : con_module,\n \"connection_to_name\" : con_name,\n \"will_be_created\" : (os.path.basename(args[\"joinwith\"]) in all_targets)\n }\n\n prepared_tasks.append({\n 'module':s_module,\n 'name':s_name,\n 'display': (task not in added_tasks),\n 'specialConnection': special_connection,\n 'last': (idx == len(path) - 1),\n 'attributes': config[task]\n })\n added_tasks.append(task)\n prepared_paths.append(prepared_tasks)\n logger.debug(\"Path prepared for visualization!\")\n render_path_visualisation(config['projectRoot'], config['projectName'], prepared_paths)", "def _make_path_list(cfg, dir_name, file_name, rank=None):\n if not cfg.DATASET.IS_ABSOLUTE_PATH:\n assert len(dir_name) == 1 or len(dir_name) == len(file_name)\n if len(dir_name) == 1:\n file_name = [os.path.join(dir_name[0], x) for x in file_name]\n else:\n file_name = [os.path.join(dir_name[i], file_name[i])\n for i in range(len(file_name))]\n\n if cfg.DATASET.LOAD_2D: # load 2d images\n temp_list = copy.deepcopy(file_name)\n file_name = []\n for x in temp_list:\n suffix = x.split('/')[-1]\n if suffix in ['*.png', '*.tif']:\n file_name += sorted(glob.glob(x, recursive=True))\n else: # complete filename is specified\n file_name.append(x)\n\n file_name = _distribute_data(cfg, file_name, rank)\n return file_name", "def trainer_paths(config):\n arch_datetime = arch_datetime_path(config)\n return (\n ensure_dir(join(arch_datetime, 'checkpoints')),\n ensure_dir(join(arch_datetime, 'runs'))\n )", "def test_config_merging_toml_paths_only():\n toml = StringIO(\n dedent(\n \"\"\"\\\n [tool.vulture]\n paths = [\"path1\", \"path2\"]\n \"\"\"\n )\n )\n cliargs = [\n \"--exclude=test_*.py\",\n ]\n result = make_config(cliargs, toml)\n assert result[\"paths\"] == [\"path1\", \"path2\"]\n assert result[\"exclude\"] == [\"test_*.py\"]", "def import_data_from_config(config):\n\n merge_columns = config[\"import_data\"][\"merge_columns\"]\n\n if not isinstance(merge_columns, list):\n msg = \"merge_columns (if used) must be a list\"\n raise ValueError(msg)\n\n data_out = config[\"import_data\"][\"output_data_directory\"]\n mkdir(data_out)\n\n # Require 'input_data_directories' to be a list\n data_in_list = config[\"import_data\"][\"input_data_directories\"]\n if not isinstance(data_in_list, list):\n msg = \"input_data_directories must be a list\"\n raise ValueError(msg)\n\n target_column = config[\"target_column\"]\n\n for d_in in data_in_list:\n import_directory_csv(d_in, data_out, target_column, merge_columns)", "def _process_candidate_conf_files(self, reordered_files):\n confs = []\n for r, f in reordered_files:\n if not os.path.exists(f):\n continue\n\n conf = ConfFile(f, self.syspaths)\n conf.replace(self.remap_renamer)\n temp_name = \"%s...%s\" % (r['from'], r['to'])\n conf.path = conf.path.replace(r['from'], temp_name)\n conf.path = conf.path.replace(temp_name, r['to'])\n confs.append(conf)\n\n return confs", "def get_multiple_dest(list_of_str):\n new_list = []\n for x in list_of_str:\n new_list.append(get_dest(x))\n return new_list", "def initialize_paths(self):\n for path in self.config[\"paths\"]:\n self.force_path_to_exist(self.config[\"paths\"][path])", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def to_sources(todos):\n for subtodos in todos.iter_sourced():\n to_path(subtodos, subtodos.get_source())", "def normalize_paths(\n paths: Sequence[str], parent: str = os.curdir\n) -> list[str]:\n assert isinstance(paths, list), paths\n return [normalize_path(p, parent) for p in paths]", "def perform(config: Path, destination_names: Optional[list[str]] = None) -> None:\n validated_config = build_config(config)\n configs: list[ArqSchedulerConfig] = []\n for destination_name, destination in validated_config.destinations.items():\n included = destination_names is None or destination_name in destination_names\n if isinstance(destination.scheduler, ArqSchedulerConfig) and included:\n print( # noqa: WPS421 -- user feedback on command line\n f\"Worker running for '{destination_name}' destination in {config}.\",\n )\n configs.append(destination.scheduler)\n if not configs:\n raise ValueError(\"No valid destination found in config file.\")\n\n asyncio.run(run_workers(configs))", "def path(self, source, target):\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]", "def path(self, source, target):\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]", "def __add_paths(self, config):\n bin_path = os.path.join(\n self.directory.install_directory(self.feature_name), \"bin\"\n )\n whitelist_executables = self._get_whitelisted_executables(config)\n for f in os.listdir(bin_path):\n for pattern in BLACKLISTED_EXECUTABLES:\n if re.match(pattern, f):\n continue\n if whitelist_executables and f not in whitelist_executables:\n continue\n self.directory.symlink_to_bin(f, os.path.join(bin_path, f))", "def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config", "def _resolve_paths(paths):\n allowed_ext = tuple(MIMES.keys())\n\n resolved = []\n for path in paths:\n if os.path.isdir(path):\n resolved.extend(\n entry.path for entry in os.scandir(path)\n if entry.is_file() and entry.name.lower().endswith(allowed_ext)\n )\n elif os.path.isfile(path) and path.lower().endswith(allowed_ext):\n resolved.append(path)\n return resolved", "def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))", "def _iter_configurations() -> Iterable[pathlib.Path]:\n for ext in CONFIGURATION_FILE_FORMATS:\n yield from HERE.rglob(f\"*{ext}\")", "async def paths_from_src(\n src: str = Query(..., description=\"starting article\"),\n dsts: list[str] = Query(..., description=\"destination articles\"),\n db: Session = Depends(database.get_db),\n):\n paths: dict[str, Optional[ArticlePath]] = {}\n ppd = multi_target_bfs(db, src)\n for dst in dsts:\n dst_id = title_to_id(db, dst)\n path = follow_parent_pointers(dst_id, ppd)\n if path is None:\n paths[dst] = None\n continue\n article_path = []\n for article_id in path:\n article_title = id_to_title(db, article_id)\n article_url = f\"https://en.wikipedia.org/?curid={article_id}\"\n article_path.append(\n ArticleWrapper(\n id=article_id,\n title=article_title,\n link=article_url, # type: ignore\n )\n )\n paths[dst] = ArticlePath(articles=article_path)\n return ManyArticlePaths(paths=paths)", "def connect(src, *destinations, exclude=set(), fit=False):\n assignemnts = []\n for dst in destinations:\n assignemnts.extend(_connect(src, dst, exclude, fit))\n \n return assignemnts", "def compilation(self, config: Config, files: List[str]) -> CallbackResult:\n return [], files", "def getcfg(args, config=None):\n from _pytest.deprecated import CFG_PYTEST_SECTION\n\n inibasenames = [\"pytest.ini\", \"tox.ini\", \"setup.cfg\"]\n args = [x for x in args if not str(x).startswith(\"-\")]\n if not args:\n args = [py.path.local()]\n for arg in args:\n arg = py.path.local(arg)\n for base in arg.parts(reverse=True):\n for inibasename in inibasenames:\n p = base.join(inibasename)\n if exists(p):\n try:\n iniconfig = py.iniconfig.IniConfig(p)\n except py.iniconfig.ParseError as exc:\n raise UsageError(str(exc))\n\n if (\n inibasename == \"setup.cfg\"\n and \"tool:pytest\" in iniconfig.sections\n ):\n return base, p, iniconfig[\"tool:pytest\"]\n elif \"pytest\" in iniconfig.sections:\n if inibasename == \"setup.cfg\" and config is not None:\n\n fail(\n CFG_PYTEST_SECTION.format(filename=inibasename),\n pytrace=False,\n )\n return base, p, iniconfig[\"pytest\"]\n elif inibasename == \"pytest.ini\":\n # allowed to be empty\n return base, p, {}\n return None, None, None", "def createSearchPathFromStrings(searchPath):\n from conary.conaryclient import cmdline\n from conary import conarycfg\n labelList = []\n finalPath = []\n if not isinstance(searchPath, (list, tuple)):\n searchPath = [searchPath]\n for item in searchPath:\n if isinstance(item, conarycfg.CfgLabelList):\n item = tuple(item)\n elif isinstance(item, versions.Label):\n labelList.append(item)\n continue\n elif isinstance(item, (list, tuple)):\n # recurse\n item = list(itertools.chain(*createSearchPathFromStrings(item)))\n elif isinstance(item, str):\n if '=' in item:\n # only troveSpecs have = in them\n item = ( cmdline.parseTroveSpec(item), )\n elif '@' in item:\n try:\n item = versions.Label(item)\n except baseerrors.ParseError, err:\n raise baseerrors.ParseError(\n 'Error parsing label \"%s\": %s' % (item, err))\n labelList.append(item)\n continue\n else:\n item = (cmdline.parseTroveSpec(item),)\n else:\n raise baseerrors.ParseError('Unknown searchPath item \"%s\"' % item)\n # labels don't get here, so we know that this is not part of a\n # labelPath\n if labelList:\n finalPath.append(tuple(labelList))\n labelList = []\n finalPath.append(item)\n if labelList:\n finalPath.append(tuple(labelList))\n return tuple(finalPath)", "def load(cls, configpaths, seed_values=None):\n if not configpaths:\n return _EmptyConfig()\n\n single_file_configs = []\n for configpath in configpaths:\n parser = cls._create_parser(seed_values)\n with open(configpath, 'r') as ini:\n parser.readfp(ini)\n single_file_configs.append(_SingleFileConfig(configpath, parser))\n return _ChainedConfig(single_file_configs)", "def merge_config_files(*paths):\n\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read(paths)\n\n return config", "def _GetSubPathForNames(self, names):\n return [(self._module_dir, self._module_path + [name], name,\n self.ReleaseTrack())\n for name in names]", "def urlrepos(prefix, roothead, paths):\n for path in paths:\n path = os.path.normpath(path)\n yield (prefix + '/' +\n util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path", "def update_taskset(\n source,\n target,\n config,\n force=True,\n):\n\n source = Path(source)\n target = Path(target)\n\n managed_files = (\n '__init__.py',\n 'sysconfig.py',\n 'modules',\n )\n\n if (any([osp.exists(target / f)\n for f in managed_files])\n and not force):\n\n raise OSError(\"Project taskset file exists, not overwriting\")\n\n elif force:\n for f in managed_files:\n f = target / f\n if osp.isdir(f):\n print(f\"Cleaning {f}\")\n shutil.rmtree(f)\n elif osp.isfile(f):\n print(f\"Cleaning {f}\")\n os.remove(f)\n\n # then get modules we need and replace the ones in this project\n # with them\n print(\"Updating tasks/sysconfig.py\")\n shutil.copyfile(\n source / \"sysconfig.py\",\n target / \"sysconfig.py\"\n )\n\n print(\"Updating tasks/__init__.py\")\n shutil.copyfile(\n source / \"__init__.py\",\n target / \"__init__.py\"\n )\n\n print(\"Updating tasks/modules\")\n shutil.copytree(\n source / \"modules\",\n target / \"modules\",\n )", "def resolveNames(qconfname, tails):\n if not tails:\n return (qconfname,)\n # remove extension if exist\n qconfname = os.path.splitext(qconfname)[0]\n # Get core name\n # iterate over tails\n basename = None\n for tail in tails:\n # deconstruct for name.ext\n tailext = os.path.splitext(tail)\n # check if there is common name with qconfname\n if qconfname.find(tailext[0]) != -1:\n basename = qconfname[0:qconfname.find(tailext[0])]\n break\n # check if one tail can be found in base name\n if not basename:\n raise ValueError(\"One of tails should be the same as given base name.\")\n # form output\n ret = []\n for tail in tails:\n if not os.path.splitext(tail)[1]:\n ext = imageExt\n else:\n ext = ''\n ret.append(basename + tail + ext)\n return tuple(ret)", "def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))", "def updated_targets(self, targets, destination_directory):\n\n # Do the arguments have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.TARGETFILES_SCHEMA.check_match(targets)\n tuf.formats.PATH_SCHEMA.check_match(destination_directory)\n\n updated_targets = []\n\n for target in targets:\n # Get the target's filepath located in 'destination_directory'.\n # We will compare targets against this file.\n target_filepath = os.path.join(destination_directory, target['filepath'])\n \n # Try one of the algorithm/digest combos for a mismatch. We break\n # as soon as we find a mismatch.\n for algorithm, digest in target['fileinfo']['hashes'].items():\n digest_object = None\n try:\n digest_object = tuf.hash.digest_filename(target_filepath,\n algorithm=algorithm)\n # This exception would occur if the target does not exist locally. \n except IOError:\n updated_targets.append(target)\n break\n # The file does exist locally, check if its hash differs. \n if digest_object.hexdigest() != digest:\n updated_targets.append(target)\n break\n \n return updated_targets", "def get_paths(args):\n log, rest = get_log_path(args)\n out, _ = get_out_path(args)\n temp, _ = get_temp_path(args)\n return log, out, temp, rest", "def load_path_config(fn):\n setters = dict(\n bedtools=helpers.set_bedtools_path,\n samtools=helpers.set_samtools_path,\n r=helpers.set_R_path,\n tabix=helpers.set_tabix_path)\n\n if isinstance(fn, dict):\n for prog, setter in setters.items():\n try:\n path = fn[prog]\n setter(path)\n except KeyError:\n pass\n\n if isinstance(fn, basestring):\n import ConfigParser\n c = ConfigParser.SafeConfigParser()\n c.read(fn)\n if c.sections() != ['paths']:\n raise ValueError(\"Invalid path config -- must have \"\n \"only one section, [paths].\")\n for prog, setter in setters.items():\n try:\n path = c.get('paths', prog)\n setter(path)\n\n except ConfigParser.NoOptionError:\n pass", "def orig_filepath_list(filename_list, src_path):\n orig_filepaths = list([])\n i = 0\n for filename in filename_list:\n orig_filepaths.append(src_path + filename_list[i])\n i += 1\n return orig_filepaths", "def cp_config(configs: Path) -> Callable[[str, Path], Path]:\n\n def copy_config(config_name: str, dest_path: Path) -> Path:\n \"\"\"Copy a config file by name to a destination directory.\n\n The resulting config will be named runway.yml.\n\n \"\"\"\n runway_yml = dest_path / \"runway.yml\"\n if not config_name.startswith(\".yml\"):\n config_name += \".yml\"\n shutil.copy(configs / config_name, runway_yml)\n return runway_yml\n\n return copy_config", "def prepare_config(config, config_prepare_hooks=config_prepare_hooks):\n keysep = \":\"\n for nkey, func in list(config_prepare_hooks.items()):\n keys = nkey.split(keysep)\n try:\n dv = nested_value(config, keys)\n except:\n continue\n func(dv)", "def targets(tgt, tgt_type=\"glob\"):\n\n ssh_known_hosts_file = __opts__.get(\"ssh_known_hosts_file\")\n\n if not os.path.isfile(ssh_known_hosts_file):\n log.error(\"Cannot find SSH known_hosts file\")\n raise OSError(\"Cannot find SSH known_hosts file\")\n if not os.access(ssh_known_hosts_file, os.R_OK):\n log.error(\"Cannot access SSH known_hosts file: %s\", ssh_known_hosts_file)\n raise OSError(\n \"Cannot access SSH known_hosts file: {}\".format(ssh_known_hosts_file)\n )\n\n with salt.utils.files.fopen(ssh_known_hosts_file, \"r\") as hostfile:\n raw = _parse_ssh_known_hosts([line.rstrip() for line in hostfile])\n\n return __utils__[\"roster_matcher.targets\"](raw, tgt, tgt_type, \"ipv4\")", "def test_makeliststep_call_config_file():\n config_file = t_path(\n Path('steps') / 'makelist.cfg'\n )\n results = MakeListStep.call(config_file=config_file)\n assert results == [43.0, 'My hovercraft is full of eels.', False]", "def expand_paths(__file__, paths_with_globs):\n if isinstance(paths_with_globs, str):\n return expand_path(__file__, paths_with_globs)\n else:\n expanded_globs = [\n expand_path(__file__, path) for path in paths_with_globs\n ]\n # Flatten\n return list(itertools.chain.from_iterable(expanded_globs))", "def expand_paths(paths, cwd=None):\n return [expand_path(x, cwd) for x in paths]", "def get_urls(argument_urls: Optional[str] = None) -> List[str]:\n if argument_urls:\n raw_urls = argument_urls\n elif 'amqp_url' in config:\n raw_urls = config['amqp_url']\n elif 'AMQP_SERVERS' in environ:\n raw_urls = environ['AMQP_SERVERS']\n else:\n raise ValueError('AMQP server url is not configured')\n\n return [process_url(url) for url in raw_urls.split(',')]", "def maybe_add_source_names(source_ids, names):\n if isinstance(source_ids[0], tuple):\n return source_ids\n else:\n return add_source_names(source_ids, names)", "def testJoinPaths(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n assert parameters[\"joinPaths\"] == os.path.join(\"a\", \"b\", \"c\")", "def file_src_dest(self):\n yielded_dests = []\n for mgr_file in reversed(self.manager.contents):\n path = Path(mgr_file)\n for from_path in self.maybe_add_one_path(path):\n stem = from_path.relative_to(path) if path.is_dir() else path.name\n to_path = self.output_files_dir / stem\n resolved = str(to_path.resolve())\n if resolved in yielded_dests: # pragma: no cover\n self.log.debug(\"Already populated\", resolved)\n continue\n yielded_dests += [resolved]\n yield from_path, to_path", "def batch_preprocess(source_dirname: str,\n target_dirname: str = None,\n img_size: int = 64,\n is_save=True) -> list:\n try:\n print(source_dirname)\n\n if not os.path.exists(source_dirname):\n print(\"Warning: No such directory {0}\".format(source_dirname))\n return\n\n if is_save:\n if not target_dirname:\n target_dirname = source_dirname\n else:\n if not os.path.exists(target_dirname):\n os.mkdir(target_dirname)\n imgs = []\n\n for name in os.listdir(source_dirname):\n path = os.path.join(source_dirname, name)\n if is_save:\n targer_path = os.path.join(target_dirname, name)\n else:\n targer_path = \"\"\n img = to_square_img(path, targer_path, img_size, is_save=is_save)\n imgs.append(img)\n\n LOGGER.debug(\"Source images: \"+str(imgs))\n\n return imgs\n\n except Exception as exception:\n LOGGER.error(\"Exception in batch_preprocess: {0}\".format(exception))", "def _parse_and_validate(raw_config_list):\n items = []\n for raw in raw_config_list:\n\n # Validation.\n for key in CONFIGS_REQUIRED:\n if key not in raw or raw[key] is None:\n raise ConfigError(\"must specify '%s' in item config: %s\" % (key, raw))\n\n if \"version_string\" in raw and not _CONFIG_VERSION_RE.match(str(raw[\"version_string\"])):\n raise ConfigError(\"invalid version string: '%s'\" % raw[\"version_string\"])\n if \"version_string\" not in raw and \"version_hashable\" not in raw and \"version_command\" not in raw:\n raise ConfigError(\"must specify 'version_string', 'version_hashable', or 'version_command' in item config: %s\" % raw)\n\n # Validate shell templates.\n # For these, we don't expand environment variables here, but instead do it at once at call time.\n for key in \"upload_command\", \"download_command\":\n try:\n strif.shell_expand_to_popen(raw[key], {\"REMOTE\": \"dummy\", \"LOCAL\": \"dummy\"})\n except ValueError as e:\n raise ConfigError(\"invalid command in config value for %s: %s\" % (key, e))\n\n # Normalize and expand environment variables.\n for key in \"local_path\", \"remote_prefix\", \"remote_path\":\n if key.startswith(\"/\"):\n raise ConfigError(\"currently only support relative paths for local_path and remote_path: %s\" % key)\n raw[key] = raw[key].rstrip(\"/\")\n\n try:\n raw[key] = strif.expand_variables(raw[key], os.environ)\n except ValueError as e:\n raise ConfigError(\"invalid command in config value for %s: %s\" % (key, e))\n\n # Parse enums.\n try:\n raw[\"install_method\"] = InstallMethod[raw[\"install_method\"]]\n except KeyError:\n raise ConfigError(\"invalid install_method: %s\" % raw[\"install_method\"])\n\n # Parse booleans. Values True and False may already be converted.\n try:\n if (type(raw[\"make_backup\"]) is str):\n raw[\"make_backup\"] = raw[\"make_backup\"].lower() in (\"on\", \"t\", \"true\", \"y\", \"yes\")\n except KeyError:\n raise ConfigError(\"invalid make_backup: %s\" % raw[\"make_backup\"])\n\n items.append(Config(**raw))\n\n log.debug(\"final configs: %s\", items)\n return items", "def parse_config(config_paths, **kwargs):\n config = helpers.load_yaml_resource('resources/grocker.yaml')\n\n if not config_paths and os.path.exists('.grocker.yml'):\n config_paths = ['.grocker.yml']\n\n for config_path in config_paths:\n project_config = helpers.load_yaml(config_path)\n config.update(project_config or {})\n\n config.update({k: v for k, v in list(kwargs.items()) if v})\n\n return config", "def copy_files(self, source, target):\n\n if source == target and is_local(self.borrowed_ctx.host):\n logger.warning(\"IGNORE self-node: {}\".format(self.borrowed_ctx.host))\n return\n\n try:\n for item in os.listdir(source):\n if os.path.isfile(os.path.join(source, item)):\n logger.debug(\n \"processing {} --> {}\".format(\n os.path.join(source, item), self.borrowed_ctx.host\n )\n )\n self._sftp_channel.put(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n else:\n self.mkdir(\"%s/%s\" % (target, item), ignore_existing=True)\n self.copy_files(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n except Exception as e:\n logger.warning(\n \"Error of processing target = ({}:{}), for reason: {}\".format(\n self.borrowed_ctx.host, self.borrowed_ctx.port, e,\n )\n )\n exit(0)", "def get_sample_paths(paths : list, mags : list) -> tuple:\n all_inputs, all_targets = defaultdict(), defaultdict()\n for mag in mags:\n inputs, targets = [], []\n for path in paths:\n if \"input\" in path and mag in path:\n inputs.append(path)\n if \"target\" in path and mag in path:\n targets.append(path)\n all_inputs[mag] = inputs\n all_targets[mag] = targets\n return all_inputs, all_targets", "def get_configs(self, configs):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcp = LayerConfigParser()\n\t\tfail_str = ''\n\t\tfiles = []\n\t\tfor config_file in configs:\n\t\t\tif isinstance(config_file, tuple):\n\t\t\t\tcontinue\n\t\t\tif not shutit_util.is_file_secure(config_file):\n\t\t\t\tfail_str = fail_str + '\\nchmod 0600 ' + config_file\n\t\t\t\tfiles.append(config_file)\n\t\tif fail_str != '':\n\t\t\tif shutit_global.shutit_global_object.interactive > 1:\n\t\t\t\tfail_str = 'Files are not secure, mode should be 0600. Running the following commands to correct:\\n' + fail_str + '\\n'\n\t\t\t\t# Actually show this to the user before failing...\n\t\t\t\tself.log(fail_str,level=logging.INFO)\n\t\t\t\tself.log('Do you want me to run this for you? (input y/n)',level=logging.INFO)\n\t\t\t\tif shutit_global.shutit_global_object.interactive == 0 or shutit_util.util_raw_input(default='y') == 'y':\n\t\t\t\t\tfor f in files:\n\t\t\t\t\t\tself.log('Correcting insecure file permissions on: ' + f,level=logging.INFO)\n\t\t\t\t\t\tos.chmod(f,0o600)\n\t\t\t\t\t# recurse\n\t\t\t\t\treturn self.get_configs(configs)\n\t\t\telse:\n\t\t\t\tfor f in files:\n\t\t\t\t\tself.log('Correcting insecure file permissions on: ' + f,level=logging.INFO)\n\t\t\t\t\tos.chmod(f,0o600)\n\t\t\t\t# recurse\n\t\t\t\treturn self.get_configs(configs)\n\t\t\tself.fail(fail_str) # pragma: no cover\n\t\tfor config in configs:\n\t\t\tif isinstance(config, tuple):\n\t\t\t\tcp.readfp(config[1], filename=config[0])\n\t\t\telse:\n\t\t\t\tcp.read(config)\n\t\t# Treat allowed_images as a special, additive case\n\t\tself.build['shutit.core.module.allowed_images'] = cp.get_config_set('build', 'shutit.core.module.allowed_images')\n\t\treturn cp", "def guess_a_config_location():\n names = ['gridrealm.cfg', 'gr.cfg', 'config.cfg', 'dev.cfg']\n home_paths = [os.path.join(os.getenv('HOME'), stub)\n for stub in ['.%s', 'gridrealm/%s']]\n other_paths = ['/etc/gridrealm/%s']\n paths = [os.path.join(os.getcwd(), name) for name in names]\n paths.append('/etc/gridrealm.cfg')\n for name in names:\n paths.extend(path % name for path in home_paths)\n for name in names: # second loop to enforce list order\n paths.extend(path % name for path in other_paths)\n return [path for path in paths if os.path.exists(path)]", "def build_targets(self, patterns):\n _targets = []\n for p in patterns:\n p = p.format_map(self.config)\n for s in self.samples:\n e = dict(s, **self.config)\n _targets.append(p.format_map(e))\n return list(set(_targets))", "def handle_args(args: Namespace) -> list:\n # If no targets provided, assume were finding them on network.\n # Once we have targets, if no test given, port/service scan them.\n if not args.target:\n low(\"Target not supplied, running host scan.\")\n hosts = get_hosts(verify_subnet(args.subnet))\n else:\n low(\"Target supplied: {}\".format(args.target))\n hosts = [Host(host) for host in args.target]\n\n if args.user and args.passwd:\n low(\"Username and Password supplied for tests, {}:{}\".format(args.user, args.passwd))\n for host in hosts:\n host.credentials = {'user': args.user, 'passwd': args.passwd}\n\n return hosts", "def compute_lists_proc(path, source_tree, search_regex):\n used_pep_set = set() # PRUNING_EXCLUDE_PATTERNS\n used_pip_set = set() # PRUNING_INCLUDE_PATTERNS\n used_dep_set = set() # DOMAIN_EXCLUDE_PREFIXES\n used_dip_set = set() # DOMAIN_INCLUDE_PATTERNS\n pruning_set = set()\n domain_substitution_set = set()\n symlink_set = set()\n if path.is_file():\n relative_path = path.relative_to(source_tree)\n if not any(cpath in str(relative_path.as_posix()) for cpath in CONTINGENT_PATHS):\n if path.is_symlink():\n try:\n resolved_relative_posix = path.resolve().relative_to(source_tree).as_posix()\n symlink_set.add((resolved_relative_posix, relative_path.as_posix()))\n except ValueError:\n # Symlink leads out of the source tree\n pass\n elif not any(skip in ('.git', '__pycache__', 'uc_staging') for skip in path.parts):\n try:\n if should_prune(path, relative_path, used_pep_set, used_pip_set):\n pruning_set.add(relative_path.as_posix())\n elif should_domain_substitute(path, relative_path, search_regex, used_dep_set,\n used_dip_set):\n domain_substitution_set.add(relative_path.as_posix())\n except: #pylint: disable=bare-except\n get_logger().exception('Unhandled exception while processing %s', relative_path)\n return (used_pep_set, used_pip_set, used_dep_set, used_dip_set, pruning_set,\n domain_substitution_set, symlink_set)", "def update_paths(args, upper_dirs=None, pattern='path'):\n if upper_dirs is None:\n upper_dirs = []\n missing = []\n for k in (k for k in args if pattern in k):\n if '*' in os.path.basename(args[k]) or k in upper_dirs:\n p = update_path(os.path.dirname(args[k]))\n args[k] = os.path.join(p, os.path.basename(args[k]))\n else:\n args[k] = update_path(args[k])\n p = args[k]\n if not os.path.exists(p):\n logging.warning('missing \"%s\": %s', k, p)\n missing.append(k)\n return args, missing", "def update_paths(args, upper_dirs=None, pattern='path'):\n if upper_dirs is None:\n upper_dirs = []\n missing = []\n for k in (k for k in args if pattern in k):\n if '*' in os.path.basename(args[k]) or k in upper_dirs:\n p = update_path(os.path.dirname(args[k]))\n args[k] = os.path.join(p, os.path.basename(args[k]))\n else:\n args[k] = update_path(args[k])\n p = args[k]\n if not os.path.exists(p):\n logging.warning('missing \"%s\": %s', k, p)\n missing.append(k)\n return args, missing", "def adjust_paths(dest, **paths):\n hgrc = os.path.join(dest, '.hg', 'hgrc')\n config = RawConfigParser()\n config.read(hgrc)\n\n if not config.has_section('paths'):\n config.add_section('paths')\n\n changed = False\n for path_name, path_value in paths.items():\n if (not config.has_option('paths', path_name) or\n config.get('paths', path_name) != path_value):\n changed = True\n config.set('paths', path_name, path_value)\n\n if changed:\n config.write(open(hgrc, 'w'))", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def validate_config(config):\n # check if paths are valid\n check_paths = {\n 'data_path': r'data$',\n 'master_list_path': r'master_list\\.csv$',\n 'duplicate_list_path': r'duplicate_list\\.csv$',\n 'log_path': r'data[\\\\\\/]jobfunnel.log$',\n 'filter_list_path': r'data[\\\\\\/]filter_list\\.json$',\n }\n\n for path, pattern in check_paths.items():\n if not re.search(pattern, config[path]):\n raise ConfigError(path)\n # check if the provider list only consists of supported providers\n if not set(config['providers']).issubset(PROVIDERS):\n raise ConfigError('providers')\n\n # check validity of region settings\n validate_region(config['search_terms']['region'])\n\n # check validity of delay settings\n validate_delay(config['delay_config'])\n\n # check the validity of max_listing_days settings\n if(config['max_listing_days'] is not None and config['max_listing_days'] < 0):\n raise ConfigError('max_listing_days')", "def __call__(self, parser, namespace, values, option_string=None):\n namespace._config_dirs.append(values)\n setattr(namespace, self.dest, values)\n\n values = os.path.expanduser(values)\n\n if not os.path.exists(values):\n raise ConfigDirNotFoundError(values)\n\n config_dir_glob = os.path.join(values, '*.conf')\n\n for config_file in sorted(glob.glob(config_dir_glob)):\n ConfigParser._parse_file(config_file, namespace)", "def multi_join(paths, *path_segments):\n return [os.path.join(*(path_segments + (path,))) for path in paths]", "def pick_targets(targetdir, faildir_name='looks_failed', targets={'weight': 'model_weight_final.pth', 'config': '.hydra/config.yaml'}, move_fail=True):\n def _check_experiment(experimentdir, targets: dict):\n retdict = dict()\n\n for k, v in targets.items():\n pathlist = glob.glob(os.path.join(experimentdir, '**', v), recursive=True)\n\n if len(pathlist) == 1:\n retdict[k] = pathlist[0]\n else:\n return None\n\n return retdict\n\n # serch experiments\n experiments = glob.glob(os.path.join(targetdir, '*'))\n if os.path.join(targetdir, faildir_name) in experiments:\n experiments.remove(os.path.join(targetdir, faildir_name))\n else:\n os.makedirs(os.path.join(targetdir, faildir_name))\n\n success, fail = dict(), dict()\n\n # loop over experiments\n for experiment in experiments:\n retdict = _check_experiment(experiment, targets)\n\n if retdict:\n success[os.path.basename(experiment)] = retdict\n else:\n fail[os.path.basename(experiment)] = experiment\n\n # move fail\n if move_fail:\n for k, v in fail.items():\n shutil.move(v, os.path.join(targetdir, faildir_name, k))\n\n return success, fail", "def update_config(\n source,\n target,\n force=True,\n):\n\n source = Path(source)\n target = Path(target)\n\n managed_files = (\n 'requirements.in',\n 'requirements.txt',\n )\n\n if (any([osp.exists(target / f)\n for f in managed_files])\n and not force):\n\n raise OSError(\"Project config exists, not overwriting\")\n\n elif force:\n for f in managed_files:\n f = target / f\n if osp.isdir(f):\n print(f\"Cleaning {f}\")\n shutil.rmtree(f)\n elif osp.isfile(f):\n print(f\"Cleaning {f}\")\n os.remove(f)\n\n print(\"Updating .jubeo/requirements.in\")\n shutil.copyfile(\n source / \"requirements.in\",\n target / \"requirements.in\"\n )\n\n print(\"Updating .jubeo/requirements.txt\")\n shutil.copyfile(\n source / \"requirements.txt\",\n target / \"requirements.txt\"\n )", "def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths", "def _getDefaultConfigFiles(self, _os = os, _sys = sys):\n argv0 = util.filename.fromLocale(\n _sys.argv[0], self.runtime.path_encoding\n )\n if isinstance(argv0, unicode):\n candidates = [util.filename.toLocale(\n name, locale_enc = self.runtime.path_encoding\n ) for name in [\n _os.path.join(\n self.runtime.repository, u'conf', u'mailer.conf'\n ),\n _os.path.join(_os.path.dirname(argv0), u'mailer.conf'),\n u'/etc/svn-mailer.conf',\n ]\n ]\n else:\n # --path-encoding=none\n candidates = [\n _os.path.join(self.runtime.repository, 'conf', 'mailer.conf'),\n _os.path.join(_os.path.dirname(argv0), 'mailer.conf'),\n _os.path.join(_os.path.sep, \"etc\", \"svn-mailer.conf\"),\n ]\n\n return candidates", "def complete_paths(path, filenames):\n\treturn [ \"{0}{1}\".format(path, filenames[i]) for i in range(0, len(filenames)) ]", "def resolve(self, targets):\n context = self.context(target_roots=targets)\n self.execute(context)\n return context.products.get_data('compile_classpath')", "def setup(self):\n if not isinstance(self.files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n return self.files", "def make_targets(dir_name, method, *args):\n roots = [\n \"-\".join([str(c) for c in comb])\n for comb in product(*args)\n ]\n\n return [f\"{dir_name}/{method}-{root}\" for root in roots]", "def test_paths_to_plates():\n output = filelister_ix.paths_to_plates(TEST_PATH_IX)\n prefix = os.path.abspath(TEST_PATH_IX)\n plate_names = [\"test-plate-1\", \"test-plate-2\",\n \"test-plate-3\", \"test-plate-4\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret", "def config_to_list(config_name=CONFIG_FILE_NAME):\n result = []\n config = configparser.ConfigParser()\n\n if not config.read([config_name]):\n return []\n\n for section in SECTIONS:\n for name, opts in ((n, o) for n, o in SECTIONS[section].items() if config.has_option(section, n)):\n value = config.get(section, name)\n\n if value != '' and value != 'None':\n action = opts.get('action', None)\n\n if action == 'store_true' and value == 'True':\n # Only the key is on the command line for this action\n result.append('--{}'.format(name))\n\n if not action == 'store_true':\n if opts.get('nargs', None) == '+':\n result.append('--{}'.format(name))\n result.extend((v.strip() for v in value.split(',')))\n else:\n result.append('--{}={}'.format(name, value))\n\n return result", "def find_paths(self, source, destination, closed=None):\n if closed is None:\n closed = set()\n closed.add(source)\n links = {x.trusted for x in self._tau\n if x.truster == source and x.trusted not in closed}\n if len(links) == 0: # base\n return []\n if destination in links: # base\n return [[Trust(source, destination)]]\n # recurse\n retval = []\n for link in links:\n linkpaths = self.find_paths(link, destination, closed)\n for path in linkpaths:\n path.insert(0, Trust(source, link))\n retval += linkpaths\n\n for path in retval:\n if None in path:\n retval.remove(path)\n if len(retval) == 0:\n return []\n return retval", "def _transferFiles(initialPath, destinationPath, fileList):\n if not fileList:\n return\n if not os.path.exists(destinationPath):\n os.mkdir(destinationPath)\n for pattern in fileList:\n if isinstance(pattern, tuple):\n # allow renames in transit\n fromName, destName = pattern\n copies = [(fromName, destName)]\n else:\n # expand globs if they're given\n copies = []\n for ff in glob.glob(pattern):\n # renaming not allowed with globs\n copies.append((ff, ff))\n\n for fromName, destName in copies:\n fromPath = os.path.join(initialPath, fromName)\n toPath = os.path.join(destinationPath, destName)\n runLog.extra(\"Copying {} to {}\".format(fromPath, toPath))\n shutil.copy(fromPath, toPath)", "def get_target_paths(to_dir,report=False):\n paths = []\n filenames = os.listdir(to_dir)\n for filename in filenames:\n path = os.path.join(to_dir,filename)\n if filename.endswith('~') or filename in SKIPFILES:\n if report:\n print 'Skipping %s' % filename\n continue \n elif (not os.path.isfile(path)) and (not os.path.isdir(path)):\n if report:\n print 'Skipping %s (not a file or directory)' % filename\n continue\n elif filename.startswith('.'):\n if report:\n print 'Skipping %s (filename has a leading dot)' % filename\n continue\n else:\n if HOSTNAME_SEPARATOR in filename:\n # This appears to be a filename with a trailing\n # hostname, e.g. _muttrc__dulip. If the trailing\n # hostname matches the hostname of this host then we\n # link to it.\n hostname = filename.split(HOSTNAME_SEPARATOR)[-1]\n if hostname == HOSTNAME:\n paths.append(path)\n else:\n if report:\n print 'Skipping %s (different hostname)' % filename\n continue \n else:\n # This appears to be a filename without a trailing\n # hostname.\n if filename + HOSTNAME_SEPARATOR + HOSTNAME in filenames: \n if report:\n print 'Skipping %s (there is a host-specific version of this file for this host)' % filename\n continue\n else: \n paths.append(path) \n return paths" ]
[ "0.5550079", "0.5392351", "0.5171191", "0.5132501", "0.5108284", "0.5076208", "0.5054015", "0.5015645", "0.4951827", "0.4932448", "0.49286622", "0.49249643", "0.48797044", "0.48406097", "0.48093775", "0.4735485", "0.47257975", "0.47036657", "0.47026363", "0.46804345", "0.467906", "0.4676848", "0.46630636", "0.46223402", "0.46163362", "0.45962885", "0.45886078", "0.45858467", "0.45850947", "0.4573892", "0.45630643", "0.45484418", "0.4545616", "0.4537735", "0.45153213", "0.4507746", "0.44990152", "0.44990152", "0.44987082", "0.44887638", "0.44878832", "0.44870508", "0.44812143", "0.44806984", "0.44801125", "0.44797108", "0.44682962", "0.44511697", "0.44483304", "0.4447693", "0.44398955", "0.4425112", "0.44199598", "0.44181743", "0.44015655", "0.43729907", "0.43670952", "0.43653485", "0.43635392", "0.43603545", "0.43594587", "0.43424293", "0.4338103", "0.43299982", "0.4328599", "0.43273", "0.4325203", "0.4319711", "0.43189326", "0.43120804", "0.43043447", "0.43039185", "0.4301025", "0.42994747", "0.42988467", "0.4289678", "0.42859444", "0.42845064", "0.42817223", "0.42662543", "0.42662543", "0.42646748", "0.42582917", "0.42501748", "0.42474422", "0.42460114", "0.4244241", "0.424363", "0.42421386", "0.42371532", "0.42361486", "0.42349347", "0.42340103", "0.42270988", "0.42261547", "0.42250663", "0.42224896", "0.42180082", "0.42123342", "0.42122525" ]
0.6430958
0
Return the path to a resource file shipped with cx_Freeze. This is used to find our base executables and initscripts when they are just specified by name.
def get_resource_file_path(dirName, name, ext): if os.path.isabs(name): return name name = os.path.normcase(name) fullDir = os.path.join(os.path.dirname(cx_Freeze.__file__), dirName) if os.path.isdir(fullDir): for fileName in os.listdir(fullDir): checkName, checkExt = \ os.path.splitext(os.path.normcase(fileName)) if name == checkName and ext == checkExt: return os.path.join(fullDir, fileName)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resource_filename(name):\n return pkg_resources.resource_filename(__name__, name)", "def resourcePath(relative):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'assets'))\r\n\r\n return os.path.join(base_path, relative)", "def resource_path(relative_path=None):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n if not relative_path:\n return base_path\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n\r\n except:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n\t\ttry:\r\n\t\t\t# PyInstaller creates a temp folder and stores path in _MEIPASS\r\n\t\t\tbase_path = sys._MEIPASS\r\n\t\texcept Exception:\r\n\t\t\tbase_path = os.path.abspath(\".\")\r\n\r\n\t\treturn os.path.join(base_path, relative_path)", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path) :\n\n try :\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except :\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS # pylint: disable=no-member\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(self, relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"..\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\"../..\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except AttributeError:\n base_path = abspath(\".\")\n\n return join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.dirname(os.path.realpath(__file__))\n \n return os.path.join(base_path, relative_path)", "def resourcePath(relative, dirname=\"data\"):\n # first look in pyinstaller bundle\n if hasattr(sys, \"_MEIPASS\"):\n path = os.path.join(sys._MEIPASS, dirname)\n \n else:\n # then look in py2app bundle\n path = os.environ.get(\"RESOURCEPATH\", None)\n if path is None:\n # then look in source code directory\n path = os.path.join(RESOURCE_BASE, dirname)\n \n path = os.path.join(path, relative)\n \n return path", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS \n base_path = sys._MEIPASS\n _BINARY_DIST = True\n #print sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def resource_path(relative_path) :\n\n try :\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except :\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\t# \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n\t# base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n\t# return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n try:\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n print(\"resource_path:\", os.path.join(base_path, relative_path))\n except Exception:\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)", "def resource_path(self, relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n # base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n # return os.path.join(base_path, relative_path)\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n path = os.path.join(base_path, relative_path)\n return path", "def get_resource(res_name, res_type=\"icons\"):\n own_path = os.path.dirname(__file__)\n resource_path = os.path.abspath(os.path.join(own_path, os.pardir, \"resources\", res_type))\n return os.path.join(resource_path, res_name)", "def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)", "def path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"./\")\n\n print(\"[RESOURCE]\", relative_path)\n rPath = os.path.join(base_path, relative_path)\n return rPath", "def resource_path(relative_path):\n base_path= getattr(sys,'MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resourcePath(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)", "def resource_path(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath('.'), relative_path)", "def GetScriptFile() -> str:\n if (hasattr(GetScriptFile, \"file\")):\n return GetScriptFile.file\n ret: str = \"\"\n try:\n # The easy way. Just use __file__.\n # Unfortunately, __file__ is not available when cx_freeze is used or in IDLE.\n ret = os.path.realpath(__file__)\n except NameError:\n # The hard way.\n if (len(sys.argv) > 0 and len(sys.argv[0]) > 0 and os.path.isabs(sys.argv[0])):\n ret = os.path.realpath(sys.argv[0])\n else:\n ret = os.path.realpath(inspect.getfile(GetScriptFile))\n if (not os.path.exists(ret)):\n # If cx_freeze is used the value of the ret variable at this point is in\n # the following format: {PathToExeFile}\\{NameOfPythonSourceFile}. This\n # makes it necessary to strip off the file name to get the correct path.\n ret = os.path.dirname(ret)\n GetScriptFile.file: str = ret\n return GetScriptFile.file", "def get_exe_path(exe):\n for type_, path in get_possible_paths():\n full_path = os.path.join(path, exe)\n if os.path.exists(full_path):\n if type_ == 'bundled':\n bundled_warning()\n return full_path\n return None", "def get_resource(self, rsc_path):\n\n\t\ttry:\n\t\t\tfrom pkg_resources import resource_filename\n\t\t\treturn resource_filename(__name__, rsc_path)\n\t\texcept ImportError:\n\t\t\treturn os.path.join(os.path.dirname(__file__), rsc_path)", "def get_resource_base_path(self): # real signature unknown; restored from __doc__\n return \"\"", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def get_resources_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES\n )", "def exepath(filename):\r\n return os.path.abspath(os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), filename))", "def get_absolute_resource_path(resource_path):\n return pkg_resources.resource_filename(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )", "def resource_path(relative_path):\n try:\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)\n except:\n pass", "def resource_path(relative_path):\n try:\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)\n except:\n pass", "def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb", "def resourcePath(self,relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n output = base_path + relative_path\n return output", "def find_resource_dir(self, dock_image: str, meta: dict) -> str:\n try:\n return self.interrogate_python_package_location(dock_image, meta)\n except CalledProcessError:\n return ''", "def get_exe_filename(self, exe_name):\n from distutils.sysconfig import get_config_var\n exe_path = exe_name.split('.')\n exe_suffix = get_config_var('EXE')\n return os.path.join(*exe_path) + exe_suffix", "def get_exe_fullpath(self, exe_name):\n modpath = exe_name.split('.')\n filename = self.get_exe_filename(modpath[-1])\n\n if not self.inplace:\n # no further work needed\n # returning :\n # build_dir/package/path/filename\n filename = os.path.join(*modpath[:-1] + [filename])\n return os.path.join(self.build_lib, filename)\n\n # the inplace option requires to find the package directory\n # using the build_py command for that\n package = '.'.join(modpath[0:-1])\n build_py = self.get_finalized_command('build_py')\n package_dir = os.path.abspath(build_py.get_package_dir(package))\n\n # returning\n # package_dir/filename\n return os.path.join(package_dir, filename)", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def get_resource_filename(local_filename):\n return os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"resources\", local_filename\n )", "def get_executable_path(self):\n executable_name = None\n if 'CFBundleExecutable' in self.info:\n executable_name = self.info['CFBundleExecutable']\n else:\n executable_name, _ = splitext(basename(self.path))\n executable_name = utils.remove_control_char(executable_name)\n executable = join(self.path, executable_name)\n if not exists(executable):\n raise Exception(\n 'could not find executable for {0}'.format(self.path))\n return executable", "def exe_filename(self):", "def get_executable_path(self):\n executable_name = None\n if 'CFBundleExecutable' in self.info:\n executable_name = self.info['CFBundleExecutable']\n else:\n executable_name, _ = splitext(basename(self.path))\n executable = join(self.path, executable_name)\n if not exists(executable):\n raise Exception(\n 'could not find executable for {0}'.format(self.path))\n return executable", "def name(self):\n return self._path or '__main__'", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path", "def GetResourcePath(self, resource_name, check=True):\n path = os.path.join(self.resources_dir, resource_name)\n if check:\n file_utils.CheckPath(path, 'resource')\n return path", "def resource_path(relative_path):\n return os.path.join(BASEPATH, relative_path)", "def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path", "def resource_path(p=()):\n # map a string to a tuple containing the string to provide the obvious shortcut\n if isinstance(p, str):\n p = (p,)\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), *p)", "def get_gui_path():\n if frozen_project():\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(__file__)", "def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )", "def resource_path(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, \"TopasGraphSim\", relative_path)\n\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, os.pardir, relative_path)", "def get_resource(filename: str, path: str | None = None) -> str:\n root = Path(__file__).parent\n full_path = root if path is None else root / Path(path)\n return str(full_path / filename)", "def _localfile(name):\n return os.path.abspath(resource_filename(__name__, name))", "def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None", "def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")", "def get_package_init_file_name():\n return _PACKAGE_INIT_FILE + FILE_EXTENSION" ]
[ "0.7262757", "0.72428066", "0.7147131", "0.7135437", "0.70946133", "0.70946133", "0.70946133", "0.70946133", "0.70946133", "0.70946133", "0.70946133", "0.7087055", "0.7067677", "0.70555377", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.705534", "0.70448864", "0.70434654", "0.7034146", "0.7026193", "0.7025934", "0.7017663", "0.701485", "0.7010971", "0.6981204", "0.69693744", "0.69662", "0.6924289", "0.6916595", "0.6880174", "0.68419087", "0.6826415", "0.6787698", "0.6732895", "0.6732895", "0.6732895", "0.6732895", "0.6732895", "0.67033595", "0.6684578", "0.66757905", "0.6666392", "0.6640523", "0.66290355", "0.6621141", "0.6565866", "0.656229", "0.65340793", "0.65303016", "0.6523273", "0.6502339", "0.6501481", "0.64600706", "0.64440304", "0.6436773", "0.64192367", "0.64192367", "0.6400008", "0.6378208", "0.63671166", "0.63656384", "0.63551426", "0.6342822", "0.63179123", "0.63179123", "0.62683475", "0.62577504", "0.62527955", "0.62498593", "0.62465745", "0.62433904", "0.62426615", "0.6233294", "0.6233294", "0.62161565", "0.62136865", "0.6192812", "0.6187143", "0.6157289", "0.6140807", "0.61284816", "0.6125952", "0.61231863", "0.61201966", "0.6104441", "0.60789275", "0.60406816" ]
0.69520795
40
Return the file names of libraries that need not be included because they would normally be expected to be found on the target system or because they are part of a package which requires independent installation anyway.
def _GetDefaultBinExcludes(self): if sys.platform == "win32": return ["comctl32.dll", "oci.dll", "cx_Logging.pyd"] else: return ["libclntsh.so", "libwtc9.so"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-l'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-l for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def other_libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-other'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-other for package `%s': %s\" % (self.name, stderr))\n\n return uniq(stdout.split())", "def get_libraries_names():\n rpm_packages_path = path.join(PMDK_PATH, 'rpm', SYSTEM_ARCHITECTURE)\n libraries_names = [elem.split('-')[0] for elem in listdir(rpm_packages_path)\n if PMDK_VERSION in elem]\n return set(libraries_names)", "def get_not_installed_rpm_packages():\n def is_installed(elem):\n return elem in PMDK_TOOLS and elem in listdir('/usr/bin/') or\\\n elem == \"pmdk\" or elem + '.so' in listdir('/usr/lib64/')\n\n elements = get_libraries_names()\n not_installed_packages = []\n for elem in elements:\n if not is_installed(elem):\n not_installed_packages.append(elem)\n return not_installed_packages", "def get_incompatible_packages():\n pkgconfig_directory = '/usr/lib64/pkgconfig/'\n incompatibe_packages = []\n libraries = get_libraries_names() - set(NO_PKG_CONFIGS)\n for library in libraries:\n with open(pkgconfig_directory + library + '.pc') as f:\n out = f.readlines()\n for line in out:\n if 'version=' in line:\n version = line.split('=')[1].strip(linesep)\n if not version in PMDK_VERSION.replace('~', '-'):\n incompatibe_packages.append(library)\n return incompatibe_packages", "def FindMissingBinaries(needed_tools):\n return [binary for binary in needed_tools if Which(binary) is None]", "def _GetDefaultBinPathExcludes(self):\n if sys.platform == \"win32\":\n import cx_Freeze.util\n systemDir = cx_Freeze.util.GetSystemDir()\n windowsDir = cx_Freeze.util.GetWindowsDir()\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\n elif sys.platform == \"darwin\":\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\n else:\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\n \"/usr/lib64\"]", "def get_imported_packages(self):\n package_versions_dict = {'python': sys.version, 'SasView': sas.system.version.__version__}\n err_version_dict = {}\n no_version_list = []\n # Generate a list of standard modules by looking at the local python library\n try:\n standard_lib = [path.stem.split('.')[0] for path in pathlib.Path(pathlib.__file__)\n .parent.absolute().glob('*')]\n except Exception:\n standard_lib = ['abc', 'aifc', 'antigravity', 'argparse', 'ast', 'asynchat', 'asyncio', 'asyncore',\n 'base64', 'bdb', 'binhex', 'bisect', 'bz2', 'calendar', 'cgi', 'cgitb', 'chunk', 'cmd',\n 'code', 'codecs', 'codeop', 'collections', 'colorsys', 'compileall', 'concurrent',\n 'configparser', 'contextlib', 'contextvars', 'copy', 'copyreg', 'cProfile', 'crypt',\n 'csv', 'ctypes', 'curses', 'dataclasses', 'datetime', 'dbm', 'decimal', 'difflib',\n 'dis', 'distutils', 'doctest', 'email', 'encodings', 'ensurepip', 'enum', 'filecmp',\n 'fileinput', 'fnmatch', 'formatter', 'fractions', 'ftplib', 'functools', 'genericpath',\n 'getopt', 'getpass', 'gettext', 'glob', 'graphlib', 'gzip', 'hashlib', 'heapq', 'hmac',\n 'html', 'http', 'idlelib', 'imaplib', 'imghdr', 'imp', 'importlib', 'inspect', 'io',\n 'ipaddress', 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma',\n 'mailbox', 'mailcap', 'mimetypes', 'modulefinder', 'msilib', 'multiprocessing', 'netrc',\n 'nntplib', 'ntpath', 'nturl2path', 'numbers', 'opcode', 'operator', 'optparse', 'os',\n 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', 'pkgutil', 'platform', 'plistlib',\n 'poplib', 'posixpath', 'pprint', 'profile', 'pstats', 'pty', 'pyclbr', 'pydoc',\n 'pydoc_data', 'py_compile', 'queue', 'quopri', 'random', 're', 'reprlib', 'rlcompleter',\n 'runpy', 'sched', 'secrets', 'selectors', 'shelve', 'shlex', 'shutil', 'signal',\n 'site-packages', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', 'sqlite3',\n 'sre_compile', 'sre_constants', 'sre_parse', 'ssl', 'stat', 'statistics', 'string',\n 'stringprep', 'struct', 'subprocess', 'sunau', 'symbol', 'symtable', 'sysconfig',\n 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'test', 'textwrap', 'this', 'threading',\n 'timeit', 'tkinter', 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'tty',\n 'turtle', 'turtledemo', 'types', 'typing', 'unittest', 'urllib', 'uu', 'uuid', 'venv',\n 'warnings', 'wave', 'weakref', 'webbrowser', 'wsgiref', 'xdrlib', 'xml', 'xmlrpc',\n 'zipapp', 'zipfile', 'zipimport', 'zoneinfo', '_aix_support', '_bootlocale',\n '_bootsubprocess', '_collections_abc', '_compat_pickle', '_compression', '_markupbase',\n '_osx_support', '_pydecimal', '_pyio', '_py_abc', '_sitebuiltins', '_strptime',\n '_threading_local', '_weakrefset', '__future__', '__phello__', '__pycache__']\n standard_lib.extend(sys.builtin_module_names)\n standard_lib.append(\"sas\")\n\n for module_name in sys.modules.keys():\n\n package_name = module_name.split('.')[0]\n\n # A built in python module or a local file, which have no version, only the python/SasView version\n if package_name in standard_lib or package_name in package_versions_dict:\n continue\n\n # Import module\n try:\n package = __import__(package_name)\n except Exception as e:\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to import module\"\n continue\n\n # Retrieving the modules version using the __version__ attribute\n if hasattr(package, '__version__'):\n # Module has __version__ attribute\n try:\n package_versions_dict[package_name] = package.__version__\n continue\n except Exception as e:\n # Unable to access module\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n f\"version using .__version__\"\n pass\n\n # Retrieving the modules version using the pkg_resources package\n # Unreliable, so second option\n try:\n package_versions_dict[package_name] = pkg_resources.get_distribution(package_name).version\n except Exception:\n # Modules that cannot be found by pkg_resources\n pass\n else:\n continue\n\n # Modules version number could not be attained by any of the previous methods\n\n no_version_list.append(package_name)\n\n # Currently not required for any packages used by SasView\n # Retrieving the modules version using the version attribute\n # if hasattr(package, 'version'):\n # # Module has version attribute\n # try:\n # if isinstance(package.version, str):\n # print(package)\n # package_versions_dict[package_name] = package.version\n # continue\n # except Exception as e:\n # # Unable to access module\n # err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n # f\"version using .version\"\n # pass\n\n # Clean up\n package_versions_dict = self.remove_duplicate_modules(package_versions_dict)\n no_version_dict = self.format_no_version_list(package_versions_dict, no_version_list)\n\n return {\"results\": package_versions_dict, \"no_results\": no_version_dict, \"errors\": err_version_dict}", "def _is_rpm_all_lib_include_files_installed(self):\n return False", "def library_search_path(self, pedantic=False):\n return []", "def get_packages():\n\n packages = find_packages()\n packages = ['{}.{}'.format('uniq', package) for package in packages]\n packages.append('uniq')\n return packages", "def required_packages(cls) -> List[Text]:\n return []", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result", "def check_missing_dep():\n global MISSING_PACKAGES, INSTALLED_PACKAGES, ENABLE_CUDA\n if ENABLE_CUDA and IS_MACOS:\n REQUIRED_PACKAGES.extend(MACOS_REQUIRED_PACKAGES)\n MISSING_PACKAGES = []\n for pkg in REQUIRED_PACKAGES:\n key = pkg.split(\"==\")[0]\n if key not in INSTALLED_PACKAGES:\n MISSING_PACKAGES.append(pkg)\n continue\n else:\n if len(pkg.split(\"==\")) > 1:\n if pkg.split(\"==\")[1] != INSTALLED_PACKAGES.get(key):\n MISSING_PACKAGES.append(pkg)\n continue", "def library_dirs(self):", "def include_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-I'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-I for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def _GetDefaultBinIncludes(self):\n if sys.platform == \"win32\":\n pythonDll = \"python%s%s.dll\" % sys.version_info[:2]\n return [pythonDll, \"gdiplus.dll\", \"mfc71.dll\", \"msvcp71.dll\",\n \"msvcr71.dll\"]\n else:\n soName = distutils.sysconfig.get_config_var(\"INSTSONAME\")\n if soName is None:\n return []\n pythonSharedLib = self._RemoveVersionNumbers(soName)\n return [pythonSharedLib]", "def missing_in_gyp_by_file(self):\n return self._missing_gyp_files", "def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)", "def get_library_list(self):\n ret = []\n prefix = \"-l\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/l\"\n for ii in self.__libraries:\n ret += [prefix + ii]\n return ret", "def getLibs(env, categories=\"main\"):\n libs = []\n removeSelf = False\n for category in categories.split():\n if category == \"self\":\n category = \"main\"\n removeSelf = True\n for lib in env.libs[category]:\n if lib not in libs:\n libs.append(lib)\n if removeSelf:\n try:\n libs.remove(env[\"packageName\"])\n except ValueError:\n pass\n return libs", "def list_photo_libraries():\n \"\"\" on MacOS < 10.15, this may omit some libraries \"\"\"\n\n # On 10.15, mdfind appears to find all libraries\n # On older MacOS versions, mdfind appears to ignore some libraries\n # glob to find libraries in ~/Pictures then mdfind to find all the others\n # TODO: make this more robust\n lib_list = glob.glob(f\"{str(Path.home())}/Pictures/*.photoslibrary\")\n\n # On older OS, may not get all libraries so make sure we get the last one\n last_lib = get_last_library_path()\n if last_lib:\n lib_list.append(last_lib)\n\n output = subprocess.check_output(\n [\"/usr/bin/mdfind\", \"-onlyin\", \"/\", \"-name\", \".photoslibrary\"]\n ).splitlines()\n for lib in output:\n lib_list.append(lib.decode(\"utf-8\"))\n lib_list = list(set(lib_list))\n lib_list.sort()\n return lib_list", "def get_third_party_package_module_names():\n # type: () -> List[str]\n result = [] # type: List[str]\n\n def is_python_package(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n \"\"\"\n Return package name if the provided file path is a Python package, None otherwise.\n \"\"\"\n file_name = os.path.basename(file_path)\n init_file_path = os.path.join(file_path, \"__init__.py\")\n\n if os.path.isdir(file_path) and os.path.isfile(init_file_path):\n # Package\n return (True, file_name)\n\n return (False, None)\n\n def is_python_module(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n \"\"\"\n Return module name if the provided file path is a Python module, None otherwise.\n \"\"\"\n if (\n os.path.isfile(file_path)\n and file_path.endswith(\".py\")\n and file_name != \"__init__.py\"\n ):\n # Single file module (e.g. six.py)\n module_name = file_name.replace(\".py\", \"\")\n return (True, module_name)\n\n return (False, None)\n\n for directory_path in THIRD_PARTY_DIRECTORIES:\n file_names = os.listdir(directory_path)\n\n for file_name in file_names:\n file_path = os.path.join(directory_path, file_name)\n\n python_package, package_name = is_python_package(directory_path, file_path)\n python_module, module_name = is_python_module(directory_path, file_path)\n\n if python_package and package_name:\n result.append(package_name)\n elif python_module and module_name:\n result.append(module_name)\n\n return result", "def libs(self):\n return self['libs']", "def test_libs_config(self):\n libs = [l for l in os.listdir(framework_libs_dir()) if l != 'libs.conf']\n self.assertTrue(sorted(libs), sorted(self.conf.options('libs')))", "def load_numpy_distutils_misc_util(finder, module):\n module.IgnoreName(\"numscons\")", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def find_undeclared_bundles(bundleDefinitions):\n\n logging.info(\"\\n#{0}\\nChecking for undeclared dependencies\".format('-'*60))\n\n undeclared = []\n\n for bundle in bundleDefinitions:\n logging.info(' {0}'.format(bundle))\n for dependentBundle in bundleDefinitions[bundle]['bundle_dependency']:\n if not dependentBundle in bundleDefinitions:\n undeclared.append(dependentBundle)\n\n return undeclared", "def python_lib_non_arch_dir(self):\n return get_python_lib()", "def GetMissingRequires(self):\n external_dependencies = set(self._required_namespaces)\n\n # Assume goog namespace is always available.\n external_dependencies.add('goog')\n # goog.module is treated as a builtin, too (for goog.module.get).\n external_dependencies.add('goog.module')\n\n created_identifiers = set()\n for unused_namespace, identifier, unused_line_number in (\n self._created_namespaces):\n created_identifiers.add(identifier)\n\n missing_requires = dict()\n illegal_alias_statements = dict()\n\n def ShouldRequireNamespace(namespace, identifier):\n \"\"\"Checks if a namespace would normally be required.\"\"\"\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)\n\n # First check all the used identifiers where we know that their namespace\n # needs to be provided (unless they are optional).\n for ns in self._used_namespaces:\n namespace = ns.namespace\n identifier = ns.identifier\n if (not ns.alias_definition and\n ShouldRequireNamespace(namespace, identifier)):\n missing_requires[namespace] = ns.GetLine()\n\n # Now that all required namespaces are known, we can check if the alias\n # definitions (that are likely being used for typeannotations that don't\n # need explicit goog.require statements) are already covered. If not\n # the user shouldn't use the alias.\n for ns in self._used_namespaces:\n if (not ns.alias_definition or\n not ShouldRequireNamespace(ns.namespace, ns.identifier)):\n continue\n if self._FindNamespace(ns.identifier, self._provided_namespaces,\n created_identifiers, external_dependencies,\n missing_requires):\n continue\n namespace = ns.identifier.rsplit('.', 1)[0]\n illegal_alias_statements[namespace] = ns.token\n\n return missing_requires, illegal_alias_statements", "def selected_lib_roots(args: Namespace) -> List[str]:\n return [LIB_ROOTS[lib] for lib in selected_libs(args)]", "def linking_library_dirs(self):", "def test_deprecated_modules(self):\n\n deprecated_modules_present = False\n\n deprecated_modules = [\n \"game_assets\",\n \"models\",\n \"world\",\n \"modular_assets\",\n ]\n\n for path in self.application_files:\n for module in deprecated_modules:\n module_text = open(path).read()\n found_reference = False\n if \"import %s\" % module in module_text:\n found_reference = True\n if \"from %s\" % module in module_text:\n found_reference = True\n\n if found_reference:\n print(\"Found '%s' reference in %s\" % (module, path))\n deprecated_modules_present = True\n\n self.assertFalse(deprecated_modules_present)", "def selected_libs(args: Namespace) -> List[str]:\n return args.lib or [\"python\", \"lkt\"]", "def selectImports(pth, xtrapath=None):\n rv = []\n if xtrapath is None:\n xtrapath = [os.path.dirname(pth)]\n else:\n assert isinstance(xtrapath, list)\n xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy\n dlls = getImports(pth)\n for lib in dlls:\n if lib.upper() in seen:\n continue\n if not is_win and not is_cygwin:\n # all other platforms\n npth = lib\n lib = os.path.basename(lib)\n else:\n # plain win case\n npth = getfullnameof(lib, xtrapath)\n\n # now npth is a candidate lib if found\n # check again for excludes but with regex FIXME: split the list\n if npth:\n candidatelib = npth\n else:\n candidatelib = lib\n\n if not dylib.include_library(candidatelib):\n if (candidatelib.find('libpython') < 0 and\n candidatelib.find('Python.framework') < 0):\n # skip libs not containing (libpython or Python.framework)\n if npth.upper() not in seen:\n logger.debug(\"Skipping %s dependency of %s\",\n lib, os.path.basename(pth))\n continue\n else:\n pass\n\n if npth:\n if npth.upper() not in seen:\n logger.debug(\"Adding %s dependency of %s from %s\",\n lib, os.path.basename(pth), npth)\n rv.append((lib, npth))\n else:\n # Don't spew out false warnings on win 10 and UCRT (see issue\n # #1566).\n if not (is_win_10 and lib.startswith(\"api-ms-win-crt\")):\n logger.warning(\"lib not found: %s dependency of %s\", lib, pth)\n\n return rv", "def autodetect_files(self):\n if self._is_valid_requirements_file('requirements.txt'):\n self.filenames.append('requirements.txt')\n\n if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover\n self.filenames.append('requirements.pip')\n\n if os.path.isdir('requirements'):\n for filename in os.listdir('requirements'):\n file_path = os.path.join('requirements', filename)\n if self._is_valid_requirements_file(file_path):\n self.filenames.append(file_path)\n self._check_inclusions_recursively()", "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path", "def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files", "def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]", "def print_test_deps_not_in_package_deps(self):\n extras = []\n for key, rec_deps in self.recursive_pkg_deps.items():\n any = self.test_imports.get(key, set()).difference(rec_deps, set([key]))\n if any:\n extras.append((key, any))\n\n if extras:\n print(\"Packages whose tests have extra dependencies not listed in `go list -f {{.Deps}}`:\")\n for pkg, deps in extras:\n print(\"\\t{0}: {1}\".format(pkg, \", \".join(deps)))\n print(\"\\n\")", "def get_install_requires() -> List[str]:\n return [\n \n ]", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def minimum_sys(cls):\r\n site_libs = set(cls._site_libs())\r\n for site_lib in site_libs:\r\n TRACER.log('Found site-library: %s' % site_lib)\r\n for extras_path in cls._extras_paths():\r\n TRACER.log('Found site extra: %s' % extras_path)\r\n site_libs.add(extras_path)\r\n site_libs = set(os.path.normpath(path) for path in site_libs)\r\n\r\n sys_modules = cls.minimum_sys_modules(site_libs)\r\n sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs)\r\n\r\n return sys_path, sys_path_importer_cache, sys_modules", "def test_pkglibdir(self):\n self.chck_triple('pkglibdir')", "def libraries(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_libraries()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def _get_third_party_python_libs_directory_contents() -> Dict[str, str]:\n direct_url_packages, standard_packages = utils.partition(\n pkg_resources.find_distributions(common.THIRD_PARTY_PYTHON_LIBS_DIR),\n predicate=_dist_has_meta_data\n )\n\n installed_packages = {\n pkg.project_name: pkg.version for pkg in standard_packages\n }\n\n for pkg in direct_url_packages:\n metadata = json.loads(pkg.get_metadata('direct_url.json'))\n version_string = '%s+%s@%s' % (\n metadata['vcs_info']['vcs'], metadata['url'],\n metadata['vcs_info']['commit_id'])\n installed_packages[pkg.project_name] = version_string\n\n # Libraries with different case are considered equivalent libraries:\n # e.g 'Flask' is the same library as 'flask'. Therefore, we\n # normalize all library names in order to compare libraries without\n # ambiguities.\n directory_contents = {\n normalize_python_library_name(library_name): version_string\n for library_name, version_string in installed_packages.items()\n }\n\n return directory_contents", "def have_package_lists():\n return 'Filename:' in execute('apt-cache', 'show', 'python', check=False, capture=True)", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def find_with_deps(self, package_names):", "def checkLibraries(env):\n # Detect OS X python installation, and attempt to correct for it.\n if os.uname()[0] == 'Darwin':\n env.Replace(SHLINKFLAGS='$LINKFLAGS -bundle -flat_namespace -undefined suppress')\n env.Replace(SHLIBSUFFIX='.so')\n\n # Detect the presence of necessary dependencies.\n conf = Configure(env)\n\n if not conf.CheckLibWithHeader('m', 'math.h', 'c'):\n print \"Can't find standard math libraries.\"\n Exit(1)\n\n env = conf.Finish()\n\n return env", "def _is_rpm_all_lib_include_files_installed(self):\n return self.rpm.is_package_installed('rpm-devel')", "def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]", "def find_jars(path):\n all_jars = set(check_output([\"find\", path, \"-type\", \"f\", \"-name\", \"*.jar\"]).splitlines())\n\n return [j for j in all_jars if (\n \"-tests\" not in j and\n \"-sources\" not in j and\n \"-with-dependencies\" not in j)]", "def get_libraries(self, archs: list[str]):\n libraries = self.ScopedLibraryDict.get(\"common\", []).copy()\n\n for arch in archs:\n libraries + self.ScopedLibraryDict.get(arch, []).copy()\n return list(set(libraries))", "def identify_cut(filenames):\n lib_string = \"lib/python\"\n lib_started = False\n for index, filename in enumerate(filenames):\n if not lib_started and lib_string in filename:\n lib_started = True\n if lib_started and lib_string not in filename:\n return index", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def getDefaultDataLibraryFolders():\n return [ 'libraries' ]", "def minimum_sys(cls):\n site_libs = set(cls._site_libs())\n for site_lib in site_libs:\n TRACER.log('Found site-library: %s' % site_lib)\n for extras_path in cls._extras_paths():\n TRACER.log('Found site extra: %s' % extras_path)\n site_libs.add(extras_path)\n site_libs = set(os.path.normpath(path) for path in site_libs)\n\n sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs)\n sys_modules = cls.minimum_sys_modules(site_libs)\n\n return sys_path, sys_path_importer_cache, sys_modules", "def get_libraries(self, project=None):\n unaligned_path = self.get_unaligned_path()\n projects = self.get_projects()\n if project is not None:\n logger.debug(\"subsetting projects\")\n projects = [p for p in projects\n if re.search(project, p)]\n logger.debug(\"collecting list of libraries\")\n logger.debug(\"searching in projects {}\".format(projects))\n # Need to handle possibility of new basespace directory structure\n libList = []\n for p in projects:\n logger.debug(\"Attempting to collect libs for project: {}\".format(p))\n for l in os.listdir(os.path.join(unaligned_path, p)):\n logger.debug(\"Looking for lib name in: {}\".format(l))\n # Old basespace - able to parse libid from current dir\n if (len(parsing.get_library_id(l))):\n libList.append(l)\n # New basespace - need to go down one more level to parse lib\n elif (os.path.isdir(os.path.join(unaligned_path, p, l))): \n logger.debug(\"Lib name not found. Going down into: {}\"\n .format(os.path.join(unaligned_path, p, l)))\n for lNext in os.listdir(os.path.join(unaligned_path, p, l)):\n if (len(parsing.get_library_id(lNext))):\n libList.append(os.path.join(l,lNext))\n else:\n logger.debug(\"Lib name not found and {} is not a directory.\"\n .format(os.path.join(unaligned_path, p, l)))\n \n return libList", "def get_required_packages(self) -> list:\n\t\tret = []\n\t\tlocal_packages = ChocoInfo.get_local_packages(\n\t\t\tPUSHED_PACKAGES_PATH)\n\n\t\tprint(\"local_packages\", local_packages)\n\n\t\treturn [c_package for c_package in self._community_packages if c_package not in local_packages]", "def add_installed_libraries(self, extra_libs = [\"SeleniumLibrary\",\n \"SudsLibrary\",\n \"RequestsLibrary\"]):\n\n libdir = os.path.dirname(robot.libraries.__file__)\n loaded = []\n for filename in os.listdir(libdir):\n if filename.endswith(\".py\") or filename.endswith(\".pyc\"):\n libname, ext = os.path.splitext(filename)\n if (libname.lower() not in loaded and\n not self._should_ignore(libname)):\n\n try:\n self.add(libname)\n loaded.append(libname.lower())\n except Exception as e:\n # need a better way to log this...\n self.log.debug(\"unable to add library: \" + str(e))\n\n # I hate how I implemented this, but I don't think there's\n # any way to find out which installed python packages are\n # robot libraries.\n for library in extra_libs:\n if (library.lower() not in loaded and\n not self._should_ignore(library)):\n try:\n self.add(library)\n loaded.append(library.lower())\n except Exception as e:\n self.log.debug(\"unable to add external library %s: %s\" % \\\n (library, str(e)))", "def xontrib_installed(ns=None):\n installed_xontribs = set()\n xontrib_locations = importlib.util.find_spec(\"xontrib2\").submodule_search_locations\n names = None if not ns or len(ns.names) == 0 else set(ns.names)\n if xontrib_locations:\n for xl in xontrib_locations:\n for x in Path(xl).glob(\"*\"):\n name = x.name.split(\".\")[0]\n if name[0] == \"_\" or (names and name not in names):\n continue\n installed_xontribs.add(name)\n return installed_xontribs", "def libraryFolders() -> list:\n\tpaths = [steamDir() + '/steamapps/'] # create a list for library paths\n\ttry:\n\t\t# open the file that contains the library paths\n\t\twith open(steamDir() + '/steamapps/libraryfolders.vdf', 'r') as file:\n\t\t\tlibrary = Property.parse(file, 'libraryfolders.vdf').as_dict()\n\t\t\t# remove useless stuff\n\t\t\tlibrary['libraryfolders'].pop('timenextstatsreport')\n\t\t\tlibrary['libraryfolders'].pop('contentstatsid')\n\texcept Exception as e:\n\t\traise ConfigError(f'Error while reading steam library file: {e}')\n\n\t# check for other library paths, if the dict is empty, there's no one\n\tif len( library['libraryfolders'] ) != 0:\n\t\tfor i in range( len( library['libraryfolders'] ) ):\n\t\t\tpaths.append( library['libraryfolders'][ i ] + '/steamapps/' ) # append the path\n\n\t# return the \"compiled\" list of libraries\n\treturn paths", "def gyp_files(self):\n return set(self._gyp_flags.keys())", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def missingConfigFiles(self):\n return [ conf\n for conf in self.configFiles\n if not os.path.exists(conf)\n and not os.path.isfile(conf)\n ]", "def unsatisfied_requirements(buildout, package, working_set):\n\n # read all lines from \"requirements.txt\"\n specs = [k.strip() for k in package_readlines(package, 'requirements.txt')]\n\n # discard empty lines and comments\n specs = [k for k in specs if k and k[0] not in ('#', '-')]\n\n # do not consider packages which are already installed, with a reasonable\n # version matching the user specification, either on the current working\n # set, the installed eggs or the system paths\n newest = bool_option(buildout, 'newest', 'true')\n\n left_over = []\n for k in specs:\n if requirement_is_satisfied(k, working_set, newest):\n dist = working_set.require(k)[0]\n logger.info(\"taking requirement `%s' (%s) from `%s'\", dist.key,\n dist.version, dist.location)\n else:\n left_over.append(k)\n specs = left_over\n\n return left_over", "def get_library_directory_list(self):\n ret = []\n prefix = \"-L\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/L\"\n for ii in self.__library_directories:\n ret += [prefix + ii]\n if self.__command_basename.startswith(\"ld\"):\n ret += [\"-rpath-link\", \":\".join(self.__library_directories)]\n return ret", "def __dir__():\n import pkgutil\n\n names = [\n name\n for importer, name, ispkg in pkgutil.iter_modules(__path__)\n if not ispkg and name != \"base\"\n ]\n return names + [\"custom\", \"noData\"]", "def get_system_modules():\n # print(\"## \" + \"System modules \" + \"#\"*60)\n import sys\n\n system_modules = sorted(sys.modules.keys())\n # for m in system_modules:\n # print(m)\n\n # print(\"## \" + \"pkg_resources \" + \"#\"*60)\n pkg_resources_pkgs = []\n for dist in __import__(\"pkg_resources\").working_set:\n if dist.project_name not in system_modules:\n pkg_resources_pkgs.append(dist.project_name)\n\n pkg_resources_pkgs = sorted(pkg_resources_pkgs)\n\n # for p in pkg_resources_pkgs:\n # print(p)\n\n # print(\"## \" + \"pkgutil \" + \"#\"*60)\n import pkgutil\n\n pkg_utils = []\n for m in pkgutil.iter_modules():\n if m[1] not in (system_modules + pkg_resources_pkgs):\n pkg_utils.append(m[1])\n pkg_utils = sorted(pkg_utils)\n # for m in pkg_utils:\n # print(m)\n return sorted(system_modules + pkg_resources_pkgs + pkg_utils)", "def libs(self):\n\n return LibraryList(\"/usr/lib/libSystem.dylib\")", "def find_site_packages(prefixes):\n\n from distutils.sysconfig import get_python_lib\n\n # Standard prefixes to check\n PYTHONDIR = 'python%d.%d' % sys.version_info[0:2]\n SUFFIXES = uniq([\n get_python_lib(prefix=''),\n os.path.join('lib', PYTHONDIR, 'site-packages'),\n os.path.join('lib32', PYTHONDIR, 'site-packages'),\n os.path.join('lib64', PYTHONDIR, 'site-packages'),\n ])\n\n retval = []\n\n for k in prefixes:\n for suffix in SUFFIXES:\n candidate = os.path.realpath(os.path.join(k, suffix))\n if os.path.exists(candidate) and candidate not in retval:\n retval.append(candidate)\n\n return retval", "def show_missing():\n if missing_modules:\n info(\"The following modules are currently not installed and would enable additional tasks:\")\n for pkg_name in missing_modules:\n info(' ' + pkg_name)", "def install_requires():\n skip_install_requires = environ.get('SKIP_INSTALL_REQUIRES')\n if not skip_install_requires:\n with open('requirements.pip') as r:\n return r.readlines()\n return []", "def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]", "def load_numpy_distutils_system_info(finder, module):\n module.IgnoreName(\"Numeric\")", "def get_non_vendor_package_path(aea_project_path: Path) -> Set[Path]:\n result: Set[Path] = set()\n for item_type_plural in ComponentType.plurals():\n nonvendor_package_dir_of_type = aea_project_path / item_type_plural\n result = result.union(\n {p for p in nonvendor_package_dir_of_type.iterdir() if p.is_dir()}\n if nonvendor_package_dir_of_type.exists()\n else {}\n )\n return result", "def listInstalledLibraries(self):\n calcEngine = CalcEngine.factory(self.client_session)\n result = calcEngine.listInstalledLibraries()\n return result", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def getsitepackages():\n # For now used only on Windows. Raise Exception for other platforms.\n if is_win:\n pths = [os.path.join(sys.prefix, 'Lib', 'site-packages')]\n # Include Real sys.prefix for virtualenv.\n if is_virtualenv:\n pths.append(os.path.join(base_prefix, 'Lib', 'site-packages'))\n return pths\n else:\n # TODO Implement for Python 2.6 on other platforms.\n raise NotImplementedError()", "def extra_link_args(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-other'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-other for package `%s': %s\" % (self.name, stderr))\n\n return stdout.strip().split()", "def getsitepackages():\n\n _is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32\n _is_pypy = hasattr(sys, 'pypy_version_info')\n _is_jython = sys.platform[:4] == 'java'\n\n prefixes = [sys.prefix, sys.exec_prefix]\n\n sitepackages = []\n seen = set()\n\n for prefix in prefixes:\n if not prefix or prefix in seen:\n continue\n seen.add(prefix)\n\n if sys.platform in ('os2emx', 'riscos') or _is_jython:\n sitedirs = [os.path.join(prefix, \"Lib\", \"site-packages\")]\n elif _is_pypy:\n sitedirs = [os.path.join(prefix, 'site-packages')]\n elif sys.platform == 'darwin' and prefix == sys.prefix:\n if prefix.startswith(\"/System/Library/Frameworks/\"): # Apple's Python\n sitedirs = [os.path.join(\"/Library/Python\", sys.version[:3], \"site-packages\"),\n os.path.join(prefix, \"Extras\", \"lib\", \"python\")]\n\n else: # any other Python distros on OSX work this way\n sitedirs = [os.path.join(prefix, \"lib\",\n \"python\" + sys.version[:3], \"site-packages\")]\n\n elif os.sep == '/':\n sitedirs = [os.path.join(prefix,\n \"lib\",\n \"python\" + sys.version[:3],\n \"site-packages\"),\n os.path.join(prefix, \"lib\", \"site-python\"),\n ]\n lib64_dir = os.path.join(prefix, \"lib64\", \"python\" + sys.version[:3], \"site-packages\")\n if (os.path.exists(lib64_dir) and\n os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):\n if _is_64bit:\n sitedirs.insert(0, lib64_dir)\n else:\n sitedirs.append(lib64_dir)\n try:\n # sys.getobjects only available in --with-pydebug build\n sys.getobjects\n sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))\n except AttributeError:\n pass\n # Debian-specific dist-packages directories:\n sitedirs.append(os.path.join(prefix, \"local/lib\",\n \"python\" + sys.version[:3],\n \"dist-packages\"))\n sitedirs.append(os.path.join(prefix, \"lib\",\n \"python\" + sys.version[:3],\n \"dist-packages\"))\n if sys.version_info[0] >= 3:\n sitedirs.append(os.path.join(prefix, \"lib\",\n \"python\" + sys.version[0],\n \"dist-packages\"))\n sitedirs.append(os.path.join(prefix, \"lib\", \"dist-python\"))\n else:\n sitedirs = [prefix, os.path.join(prefix, \"lib\", \"site-packages\")]\n if sys.platform == 'darwin':\n # for framework builds *only* we add the standard Apple\n # locations. Currently only per-user, but /Library and\n # /Network/Library could be added too\n if 'Python.framework' in prefix:\n home = os.environ.get('HOME')\n if home:\n sitedirs.append(\n os.path.join(home,\n 'Library',\n 'Python',\n sys.version[:3],\n 'site-packages'))\n for sitedir in sitedirs:\n sitepackages.append(os.path.abspath(sitedir))\n\n sitepackages = [p for p in sitepackages if os.path.isdir(p)]\n return sitepackages", "def _get_dependencies():\n return config.check_driver_dependencies(__virtualname__, {\"XenAPI\": HAS_XEN_API})", "def _GetDependentFiles(self, path):\n dependentFiles = self.dependentFiles.get(path)\n if dependentFiles is None:\n if sys.platform == \"win32\":\n origPath = os.environ[\"PATH\"]\n os.environ[\"PATH\"] = origPath + os.pathsep + \\\n os.pathsep.join(sys.path)\n import cx_Freeze.util\n try:\n dependentFiles = cx_Freeze.util.GetDependentFiles(path)\n except cx_Freeze.util.BindError:\n # Sometimes this gets called when path is not actually a library\n # See issue 88\n dependentFiles = []\n os.environ[\"PATH\"] = origPath\n else:\n dependentFiles = []\n if sys.platform == \"darwin\":\n command = 'otool -L \"%s\"' % path\n splitString = \" (compatibility\"\n dependentFileIndex = 0\n else:\n command = 'ldd \"%s\"' % path\n splitString = \" => \"\n dependentFileIndex = 1\n for line in os.popen(command):\n parts = line.expandtabs().strip().split(splitString)\n if len(parts) != 2:\n continue\n dependentFile = parts[dependentFileIndex].strip()\n if dependentFile == os.path.basename(path):\n continue\n if dependentFile in (\"not found\", \"(file not found)\"):\n fileName = parts[0]\n if fileName not in self.linkerWarnings:\n self.linkerWarnings[fileName] = None\n message = \"WARNING: cannot find %s\\n\" % fileName\n sys.stdout.write(message)\n continue\n if dependentFile.startswith(\"(\"):\n continue\n pos = dependentFile.find(\" (\")\n if pos >= 0:\n dependentFile = dependentFile[:pos].strip()\n if dependentFile:\n dependentFiles.append(dependentFile)\n if sys.platform == \"darwin\":\n # Make library paths absolute. This is needed to use\n # cx_Freeze on OSX in e.g. a conda-based distribution.\n # Note that with @rpath we just assume Python's lib dir,\n # which should work in most cases.\n dirname = os.path.dirname(path)\n dependentFiles = [p.replace('@loader_path', dirname)\n for p in dependentFiles]\n dependentFiles = [p.replace('@rpath', sys.prefix + '/lib')\n for p in dependentFiles]\n dependentFiles = self.dependentFiles[path] = \\\n [f for f in dependentFiles if self._ShouldCopyFile(f)]\n return dependentFiles", "def DEPENDENCIES(self):\n pass", "def get_library_dirs():\n if DAALTK_HOME_ENV_VAR not in os.environ:\n raise Exception(\"Required environment variable %s not set\" % DAALTK_HOME_ENV_VAR)\n\n daaltk_home = os.environ[DAALTK_HOME_ENV_VAR]\n return [daaltk_home, os.path.join(daaltk_home, LIB_DIR)]", "def import_packages_global():\n return \"\"", "def check_libraries(env):\n # Detect OS X python installation, and attempt to correct for it.\n if os.uname()[0] == 'Darwin':\n env.Replace(SHLINKFLAGS='$LINKFLAGS -bundle -flat_namespace -undefined suppress')\n env.Replace(SHLIBSUFFIX='.so')\n if os.path.isdir('/opt/local'):\n env.Append(\n LIBPATH=['/opt/local/lib'],\n CPPPATH=['/opt/local/include']\n )\n\n # Detect the presence of necessary dependencies.\n conf = Configure(env)\n\n if not conf.CheckLibWithHeader('m', 'math.h', 'c'):\n print \"Can't find standard math libraries.\"\n Exit(1)\n\n if not conf.CheckLibWithHeader('python%s' % python_version,\n 'Python.h', 'c'):\n print \"Can't find python %s.\" % python_version\n Exit(1)\n\n env = conf.Finish()\n\n return env", "def _check_imports():\n\n optlist = ['ALPSO', 'CONMIN', 'FSQP', 'IPOPT', 'NLPQLP',\n 'NSGA2', 'PSQP', 'SLSQP', 'SNOPT', 'NLPY_AUGLAG', 'NOMAD']\n\n for optimizer in optlist[:]:\n try:\n __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n except ImportError:\n optlist.remove(optimizer)\n\n return optlist", "def _determine_local_import_names(start_dir):\n file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]\n return [\n basename\n for basename, extension\n in file_ext_pairs\n if extension == '.py' or os.path.isdir(\n os.path.join(start_dir, basename))\n and basename not in ('__pycache__')]", "def missing_in_gn_by_file(self):\n return self._missing_gn_files", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def examineLoadLibrary(lib):\n from PyJobTransformsCore.envutil import examine_library\n\n # turn module name into library name\n if not lib.startswith('lib') and not lib.endswith('.so'):\n lib = 'lib' + lib + '.so'\n print (\"Examining library \" + lib)\n diagLines = []\n errorAcronym = None\n missingSystemLibs = []\n missingOtherLibs = []\n misLibs = examine_library(lib)\n for l in misLibs:\n if systemLibsRE.search(l):\n missingSystemLibs.append(l)\n else:\n missingOtherLibs.append(l)\n if missingSystemLibs:\n if len(missingSystemLibs) == 1: libWord = 'library'\n else: libWord = 'libraries'\n diagLines.append( 'Site problem: Missing system %s: %s' % (libWord, ','.join(missingSystemLibs)) )\n if not errorAcronym: errorAcronym = \"ATH_SITE_SYSLIBS\"\n\n if missingOtherLibs:\n if len(missingOtherLibs) == 1: libWord = 'library'\n else: libWord = 'libraries'\n diagLines.append( 'Can not find %s: %s Please check software installation.' % (libWord,','.join(missingOtherLibs)) )\n if not errorAcronym: errorAcronym = \"ATH_SITE_LIBS\"\n return (errorAcronym,os.linesep.join(diagLines))", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper.sh',\n 'tools/valgrind/memcheck/suppressions.txt',\n 'tools/valgrind/memcheck/suppressions_android.txt']", "def get_mismatches() -> MismatchType:\n requirements_contents = _get_requirements_file_contents()\n directory_contents = _get_third_party_python_libs_directory_contents()\n\n mismatches: MismatchType = {}\n for normalized_library_name in requirements_contents:\n # Library exists in the directory and the requirements file.\n if normalized_library_name in directory_contents:\n # Library matches but version doesn't match.\n if (directory_contents[normalized_library_name] !=\n requirements_contents[normalized_library_name]):\n mismatches[normalized_library_name] = (\n requirements_contents[normalized_library_name],\n directory_contents[normalized_library_name])\n # Library exists in the requirements file but not in the directory.\n else:\n mismatches[normalized_library_name] = (\n requirements_contents[normalized_library_name], None)\n\n for normalized_library_name in directory_contents:\n # Library exists in the directory but is not in the requirements file.\n if normalized_library_name not in requirements_contents:\n mismatches[normalized_library_name] = (\n None, directory_contents[normalized_library_name])\n\n return mismatches", "def _check_lib_folder(self):\n possible_homes = []\n try:\n possible_homes.append(sys.prefix)\n possible_homes.append(sys.exec_prefix)\n possible_homes.append(sys.base_prefix) # base home for venv\n possible_homes.append(sys.base_exec_prefix)\n except AttributeError:\n # sys.base_prefix and sys.base_exec_prefix aren't available in 2.7\n pass\n for home in set(possible_homes):\n lib_folder = os.path.join(home, 'lib')\n abpath = self._check_folder(lib_folder)\n if abpath is not None:\n return abpath", "def ignore_pyc(root,names):\n return [name for name in names if name.endswith('pyc')]" ]
[ "0.7460289", "0.73898304", "0.71631336", "0.7133726", "0.70093113", "0.68778294", "0.67150843", "0.6555115", "0.6427538", "0.6418846", "0.6387556", "0.6331555", "0.62914765", "0.6284729", "0.62831557", "0.6266173", "0.622727", "0.616253", "0.6143471", "0.6061587", "0.6038263", "0.60035014", "0.5996275", "0.5992362", "0.5992113", "0.59867203", "0.5968575", "0.5957055", "0.5897884", "0.58894", "0.5886815", "0.5880284", "0.5879611", "0.5878689", "0.5876186", "0.5867698", "0.5853259", "0.58517176", "0.58469206", "0.5838552", "0.58292055", "0.5814717", "0.5811181", "0.5811181", "0.580746", "0.57946837", "0.5794258", "0.5786008", "0.5761874", "0.57487243", "0.57483923", "0.5747459", "0.57469493", "0.5734069", "0.57174", "0.57151306", "0.57010126", "0.56982726", "0.56969655", "0.56857", "0.5679292", "0.5677747", "0.5672379", "0.56698275", "0.56674343", "0.56626457", "0.5650142", "0.56493443", "0.56364423", "0.56363046", "0.56332666", "0.5629443", "0.5627832", "0.56193835", "0.56162757", "0.56114167", "0.5610601", "0.56054574", "0.5592711", "0.55920506", "0.5591376", "0.5587448", "0.5580822", "0.5576063", "0.557327", "0.5568251", "0.55578685", "0.5554716", "0.555136", "0.55469215", "0.5540743", "0.5538193", "0.55262655", "0.55242974", "0.55120814", "0.54869854", "0.54828477", "0.54795104", "0.5478217", "0.54749584" ]
0.6424925
9
Return the file names of libraries which must be included for the frozen executable to work.
def _GetDefaultBinIncludes(self): if sys.platform == "win32": pythonDll = "python%s%s.dll" % sys.version_info[:2] return [pythonDll, "gdiplus.dll", "mfc71.dll", "msvcp71.dll", "msvcr71.dll"] else: soName = distutils.sysconfig.get_config_var("INSTSONAME") if soName is None: return [] pythonSharedLib = self._RemoveVersionNumbers(soName) return [pythonSharedLib]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-l'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-l for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result", "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def get_libraries_names():\n rpm_packages_path = path.join(PMDK_PATH, 'rpm', SYSTEM_ARCHITECTURE)\n libraries_names = [elem.split('-')[0] for elem in listdir(rpm_packages_path)\n if PMDK_VERSION in elem]\n return set(libraries_names)", "def gyp_files(self):\n return set(self._gyp_flags.keys())", "def _GetDefaultBinPathExcludes(self):\n if sys.platform == \"win32\":\n import cx_Freeze.util\n systemDir = cx_Freeze.util.GetSystemDir()\n windowsDir = cx_Freeze.util.GetWindowsDir()\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\n elif sys.platform == \"darwin\":\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\n else:\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\n \"/usr/lib64\"]", "def get_install_requires() -> List[str]:\n return [\n \n ]", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def _is_rpm_all_lib_include_files_installed(self):\n return False", "def other_libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-other'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-other for package `%s': %s\" % (self.name, stderr))\n\n return uniq(stdout.split())", "def required_packages(cls) -> List[Text]:\n return []", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def get_included_files(space):\n files = space.ec.interpreter.included_files\n arr_list = []\n for f in files:\n arr_list.append(space.newstr(f))\n return space.new_array_from_list(arr_list)", "def include_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-I'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-I for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def library_dirs(self):", "def library_search_path(self, pedantic=False):\n return []", "def install_requires():\n skip_install_requires = environ.get('SKIP_INSTALL_REQUIRES')\n if not skip_install_requires:\n with open('requirements.pip') as r:\n return r.readlines()\n return []", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def FindMissingBinaries(needed_tools):\n return [binary for binary in needed_tools if Which(binary) is None]", "def get_library_list(self):\n ret = []\n prefix = \"-l\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/l\"\n for ii in self.__libraries:\n ret += [prefix + ii]\n return ret", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def autodetect_files(self):\n if self._is_valid_requirements_file('requirements.txt'):\n self.filenames.append('requirements.txt')\n\n if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover\n self.filenames.append('requirements.pip')\n\n if os.path.isdir('requirements'):\n for filename in os.listdir('requirements'):\n file_path = os.path.join('requirements', filename)\n if self._is_valid_requirements_file(file_path):\n self.filenames.append(file_path)\n self._check_inclusions_recursively()", "def listInstalledLibraries(self):\n calcEngine = CalcEngine.factory(self.client_session)\n result = calcEngine.listInstalledLibraries()\n return result", "def get_imported_packages(self):\n package_versions_dict = {'python': sys.version, 'SasView': sas.system.version.__version__}\n err_version_dict = {}\n no_version_list = []\n # Generate a list of standard modules by looking at the local python library\n try:\n standard_lib = [path.stem.split('.')[0] for path in pathlib.Path(pathlib.__file__)\n .parent.absolute().glob('*')]\n except Exception:\n standard_lib = ['abc', 'aifc', 'antigravity', 'argparse', 'ast', 'asynchat', 'asyncio', 'asyncore',\n 'base64', 'bdb', 'binhex', 'bisect', 'bz2', 'calendar', 'cgi', 'cgitb', 'chunk', 'cmd',\n 'code', 'codecs', 'codeop', 'collections', 'colorsys', 'compileall', 'concurrent',\n 'configparser', 'contextlib', 'contextvars', 'copy', 'copyreg', 'cProfile', 'crypt',\n 'csv', 'ctypes', 'curses', 'dataclasses', 'datetime', 'dbm', 'decimal', 'difflib',\n 'dis', 'distutils', 'doctest', 'email', 'encodings', 'ensurepip', 'enum', 'filecmp',\n 'fileinput', 'fnmatch', 'formatter', 'fractions', 'ftplib', 'functools', 'genericpath',\n 'getopt', 'getpass', 'gettext', 'glob', 'graphlib', 'gzip', 'hashlib', 'heapq', 'hmac',\n 'html', 'http', 'idlelib', 'imaplib', 'imghdr', 'imp', 'importlib', 'inspect', 'io',\n 'ipaddress', 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma',\n 'mailbox', 'mailcap', 'mimetypes', 'modulefinder', 'msilib', 'multiprocessing', 'netrc',\n 'nntplib', 'ntpath', 'nturl2path', 'numbers', 'opcode', 'operator', 'optparse', 'os',\n 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', 'pkgutil', 'platform', 'plistlib',\n 'poplib', 'posixpath', 'pprint', 'profile', 'pstats', 'pty', 'pyclbr', 'pydoc',\n 'pydoc_data', 'py_compile', 'queue', 'quopri', 'random', 're', 'reprlib', 'rlcompleter',\n 'runpy', 'sched', 'secrets', 'selectors', 'shelve', 'shlex', 'shutil', 'signal',\n 'site-packages', 'site', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', 'sqlite3',\n 'sre_compile', 'sre_constants', 'sre_parse', 'ssl', 'stat', 'statistics', 'string',\n 'stringprep', 'struct', 'subprocess', 'sunau', 'symbol', 'symtable', 'sysconfig',\n 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'test', 'textwrap', 'this', 'threading',\n 'timeit', 'tkinter', 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'tty',\n 'turtle', 'turtledemo', 'types', 'typing', 'unittest', 'urllib', 'uu', 'uuid', 'venv',\n 'warnings', 'wave', 'weakref', 'webbrowser', 'wsgiref', 'xdrlib', 'xml', 'xmlrpc',\n 'zipapp', 'zipfile', 'zipimport', 'zoneinfo', '_aix_support', '_bootlocale',\n '_bootsubprocess', '_collections_abc', '_compat_pickle', '_compression', '_markupbase',\n '_osx_support', '_pydecimal', '_pyio', '_py_abc', '_sitebuiltins', '_strptime',\n '_threading_local', '_weakrefset', '__future__', '__phello__', '__pycache__']\n standard_lib.extend(sys.builtin_module_names)\n standard_lib.append(\"sas\")\n\n for module_name in sys.modules.keys():\n\n package_name = module_name.split('.')[0]\n\n # A built in python module or a local file, which have no version, only the python/SasView version\n if package_name in standard_lib or package_name in package_versions_dict:\n continue\n\n # Import module\n try:\n package = __import__(package_name)\n except Exception as e:\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to import module\"\n continue\n\n # Retrieving the modules version using the __version__ attribute\n if hasattr(package, '__version__'):\n # Module has __version__ attribute\n try:\n package_versions_dict[package_name] = package.__version__\n continue\n except Exception as e:\n # Unable to access module\n err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n f\"version using .__version__\"\n pass\n\n # Retrieving the modules version using the pkg_resources package\n # Unreliable, so second option\n try:\n package_versions_dict[package_name] = pkg_resources.get_distribution(package_name).version\n except Exception:\n # Modules that cannot be found by pkg_resources\n pass\n else:\n continue\n\n # Modules version number could not be attained by any of the previous methods\n\n no_version_list.append(package_name)\n\n # Currently not required for any packages used by SasView\n # Retrieving the modules version using the version attribute\n # if hasattr(package, 'version'):\n # # Module has version attribute\n # try:\n # if isinstance(package.version, str):\n # print(package)\n # package_versions_dict[package_name] = package.version\n # continue\n # except Exception as e:\n # # Unable to access module\n # err_version_dict[package_name] = f\"Unknown: {e} when attempting to access {package_name} \" \\\n # f\"version using .version\"\n # pass\n\n # Clean up\n package_versions_dict = self.remove_duplicate_modules(package_versions_dict)\n no_version_dict = self.format_no_version_list(package_versions_dict, no_version_list)\n\n return {\"results\": package_versions_dict, \"no_results\": no_version_dict, \"errors\": err_version_dict}", "def get_libraries(self, archs: list[str]):\n libraries = self.ScopedLibraryDict.get(\"common\", []).copy()\n\n for arch in archs:\n libraries + self.ScopedLibraryDict.get(arch, []).copy()\n return list(set(libraries))", "def libs(self):\n return self['libs']", "def libraries(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_libraries()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def _GetDependentFiles(self, path):\n dependentFiles = self.dependentFiles.get(path)\n if dependentFiles is None:\n if sys.platform == \"win32\":\n origPath = os.environ[\"PATH\"]\n os.environ[\"PATH\"] = origPath + os.pathsep + \\\n os.pathsep.join(sys.path)\n import cx_Freeze.util\n try:\n dependentFiles = cx_Freeze.util.GetDependentFiles(path)\n except cx_Freeze.util.BindError:\n # Sometimes this gets called when path is not actually a library\n # See issue 88\n dependentFiles = []\n os.environ[\"PATH\"] = origPath\n else:\n dependentFiles = []\n if sys.platform == \"darwin\":\n command = 'otool -L \"%s\"' % path\n splitString = \" (compatibility\"\n dependentFileIndex = 0\n else:\n command = 'ldd \"%s\"' % path\n splitString = \" => \"\n dependentFileIndex = 1\n for line in os.popen(command):\n parts = line.expandtabs().strip().split(splitString)\n if len(parts) != 2:\n continue\n dependentFile = parts[dependentFileIndex].strip()\n if dependentFile == os.path.basename(path):\n continue\n if dependentFile in (\"not found\", \"(file not found)\"):\n fileName = parts[0]\n if fileName not in self.linkerWarnings:\n self.linkerWarnings[fileName] = None\n message = \"WARNING: cannot find %s\\n\" % fileName\n sys.stdout.write(message)\n continue\n if dependentFile.startswith(\"(\"):\n continue\n pos = dependentFile.find(\" (\")\n if pos >= 0:\n dependentFile = dependentFile[:pos].strip()\n if dependentFile:\n dependentFiles.append(dependentFile)\n if sys.platform == \"darwin\":\n # Make library paths absolute. This is needed to use\n # cx_Freeze on OSX in e.g. a conda-based distribution.\n # Note that with @rpath we just assume Python's lib dir,\n # which should work in most cases.\n dirname = os.path.dirname(path)\n dependentFiles = [p.replace('@loader_path', dirname)\n for p in dependentFiles]\n dependentFiles = [p.replace('@rpath', sys.prefix + '/lib')\n for p in dependentFiles]\n dependentFiles = self.dependentFiles[path] = \\\n [f for f in dependentFiles if self._ShouldCopyFile(f)]\n return dependentFiles", "def get_required_extensions(self):\n return []", "def DEPENDENCIES(self):\n pass", "def selected_lib_roots(args: Namespace) -> List[str]:\n return [LIB_ROOTS[lib] for lib in selected_libs(args)]", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def check_requirements():\n process_output = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0] for r in process_output.split()]\n if 'pandas' and 'matplotlib' in installed_packages:\n return True\n else:\n print('You don`t have one of required libralies\\n'\n 'I can`t create histogram\\n'\n 'Required libralies: \\n'\n '->pandas\\n'\n '->matplotlib\\n')\n return False", "def get_code_dependencies(self):\n pip_commands = ['pip', 'pip3', '/usr/local/bin/pip3']\n for pip_cmd in pip_commands:\n try:\n raw_stdout = subprocess.check_output([pip_cmd, 'freeze'])\n except FileNotFoundError:\n continue\n\n dependencies = raw_stdout.decode('ascii').split('\\n')[0:-1]\n if dependencies:\n return dependencies\n else:\n msg = \"Couldn't find pip executable in: {}\"\n raise ValueError(msg.format(','.join(pip_commands)))", "def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]", "def linking_library_dirs(self):", "def get_required_module_descriptors(self):\r\n return []", "def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)", "def freeze():\n proc = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)\n with open('requirements.txt', 'wb') as fout:\n fout.write(proc.stdout)", "def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')", "def getImports(pth):\n if is_win or is_cygwin:\n if pth.lower().endswith(\".manifest\"):\n return []\n try:\n return _getImports_pe(pth)\n except Exception as exception:\n # Assemblies can pull in files which aren't necessarily PE,\n # but are still needed by the assembly. Any additional binary\n # dependencies should already have been handled by\n # selectAssemblies in that case, so just warn, return an empty\n # list and continue.\n # For less specific errors also log the traceback.\n logger.warning('Can not get binary dependencies for file: %s', pth)\n logger.warning(\n ' Reason: %s', exception,\n exc_info=not isinstance(exception, pefile.PEFormatError))\n return []\n elif is_darwin:\n return _getImports_macholib(pth)\n else:\n return _getImports_ldd(pth)", "def missing_in_gyp_by_file(self):\n return self._missing_gyp_files", "def get_dependencies():\n return config.check_driver_dependencies(\n __virtualname__, {\"profitbricks\": HAS_PROFITBRICKS}\n )", "def requires():\n install_reqs = parse_requirements(join(CWD, 'requirements', 'base.txt'),\n session=False)\n return [str(ir.req) for ir in install_reqs]", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def selected_libs(args: Namespace) -> List[str]:\n return args.lib or [\"python\", \"lkt\"]", "def get_checked_define_files(self):\n return (self._files['src/config.h'],\n self._files['src/gromacs/simd/simd.h'],\n self._files['src/gromacs/ewald/pme_simd.h'],\n self._files['src/gromacs/nbnxm/nbnxm_simd.h'])", "def GetIncludedFilesForHeaderString():\n\n # Don't really need to automate this as it'll be the same for all of them. \n include_files_string = (IncludeString(\"\\\"ChasteSerialization.hpp\\\"\") + \n IncludeString(\"<boost/serialization/base_object.hpp>\") + \n IncludeString(\"<boost/serialization/shared_ptr.hpp>\") + \n \"\\n\" + \n IncludeString(\"<cmath>\") + \n IncludeString(\"<iostream>\") + \n IncludeString(\"\\\"AbstractOdeSystem.hpp\\\"\\n\") )\n\n return include_files_string", "def _GetDefaultBinExcludes(self):\n if sys.platform == \"win32\":\n return [\"comctl32.dll\", \"oci.dll\", \"cx_Logging.pyd\"]\n else:\n return [\"libclntsh.so\", \"libwtc9.so\"]", "def modules():\n cmd = \"{} -M\".format(_detect_os())\n ret = {}\n ret[\"static\"] = []\n ret[\"shared\"] = []\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n for line in out:\n comps = line.split()\n if not comps:\n continue\n if \"(static)\" in line:\n ret[\"static\"].append(comps[0])\n if \"(shared)\" in line:\n ret[\"shared\"].append(comps[0])\n return ret", "def install_requires():\n return reqs(\"requirements.txt\")", "def install_requires():\n return reqs('requirements.txt')", "def learn_requirements():\n req_file = \"requirements.txt\"\n reqs = []\n\n import os\n\n path = os.path.dirname(__file__)\n req_file = os.path.join(path, \"..\", req_file)\n if not os.path.exists(req_file):\n # not needed with installed package\n return reqs\n\n excludes = \"versioneer coveralls coverage\".split()\n with open(req_file, \"r\") as fp:\n buf = fp.read().strip().splitlines()\n for req in buf:\n req = req.strip()\n if (\n req != \"\"\n and not req.startswith(\"#\")\n and req not in excludes\n ):\n reqs.append(req)\n return reqs", "def xontrib_installed(ns=None):\n installed_xontribs = set()\n xontrib_locations = importlib.util.find_spec(\"xontrib2\").submodule_search_locations\n names = None if not ns or len(ns.names) == 0 else set(ns.names)\n if xontrib_locations:\n for xl in xontrib_locations:\n for x in Path(xl).glob(\"*\"):\n name = x.name.split(\".\")[0]\n if name[0] == \"_\" or (names and name not in names):\n continue\n installed_xontribs.add(name)\n return installed_xontribs", "def list_photo_libraries():\n \"\"\" on MacOS < 10.15, this may omit some libraries \"\"\"\n\n # On 10.15, mdfind appears to find all libraries\n # On older MacOS versions, mdfind appears to ignore some libraries\n # glob to find libraries in ~/Pictures then mdfind to find all the others\n # TODO: make this more robust\n lib_list = glob.glob(f\"{str(Path.home())}/Pictures/*.photoslibrary\")\n\n # On older OS, may not get all libraries so make sure we get the last one\n last_lib = get_last_library_path()\n if last_lib:\n lib_list.append(last_lib)\n\n output = subprocess.check_output(\n [\"/usr/bin/mdfind\", \"-onlyin\", \"/\", \"-name\", \".photoslibrary\"]\n ).splitlines()\n for lib in output:\n lib_list.append(lib.decode(\"utf-8\"))\n lib_list = list(set(lib_list))\n lib_list.sort()\n return lib_list", "def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]", "def get_requirements():\n requirements_list = []\n\n if not os.path.isfile(REQUIREMENTS_FILE):\n # Check if requirements file did not exist.\n return requirements_list\n\n with open(REQUIREMENTS_FILE) as reqs:\n for install in reqs:\n requirements_list.append(install.strip())\n\n return requirements_list", "def get_incompatible_packages():\n pkgconfig_directory = '/usr/lib64/pkgconfig/'\n incompatibe_packages = []\n libraries = get_libraries_names() - set(NO_PKG_CONFIGS)\n for library in libraries:\n with open(pkgconfig_directory + library + '.pc') as f:\n out = f.readlines()\n for line in out:\n if 'version=' in line:\n version = line.split('=')[1].strip(linesep)\n if not version in PMDK_VERSION.replace('~', '-'):\n incompatibe_packages.append(library)\n return incompatibe_packages", "def get_library_directory_list(self):\n ret = []\n prefix = \"-L\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/L\"\n for ii in self.__library_directories:\n ret += [prefix + ii]\n if self.__command_basename.startswith(\"ld\"):\n ret += [\"-rpath-link\", \":\".join(self.__library_directories)]\n return ret", "def libraryFolders() -> list:\n\tpaths = [steamDir() + '/steamapps/'] # create a list for library paths\n\ttry:\n\t\t# open the file that contains the library paths\n\t\twith open(steamDir() + '/steamapps/libraryfolders.vdf', 'r') as file:\n\t\t\tlibrary = Property.parse(file, 'libraryfolders.vdf').as_dict()\n\t\t\t# remove useless stuff\n\t\t\tlibrary['libraryfolders'].pop('timenextstatsreport')\n\t\t\tlibrary['libraryfolders'].pop('contentstatsid')\n\texcept Exception as e:\n\t\traise ConfigError(f'Error while reading steam library file: {e}')\n\n\t# check for other library paths, if the dict is empty, there's no one\n\tif len( library['libraryfolders'] ) != 0:\n\t\tfor i in range( len( library['libraryfolders'] ) ):\n\t\t\tpaths.append( library['libraryfolders'][ i ] + '/steamapps/' ) # append the path\n\n\t# return the \"compiled\" list of libraries\n\treturn paths", "def get_packages():\n\n packages = find_packages()\n packages = ['{}.{}'.format('uniq', package) for package in packages]\n packages.append('uniq')\n return packages", "def libs(self):\n\n return LibraryList(\"/usr/lib/libSystem.dylib\")", "def get_default_pip_requirements(include_cloudpickle=False):\n _check_soft_dependencies(\"mlflow\", severity=\"error\")\n from mlflow.utils.requirements_utils import _get_pinned_requirement\n\n pip_deps = [_get_pinned_requirement(\"sktime\")]\n if include_cloudpickle:\n pip_deps += [_get_pinned_requirement(\"cloudpickle\")]\n\n return pip_deps", "def include_dirs(self):", "def install_requires():\n return [\n \"SQLAlchemy~=1.3\",\n \"bibtexparser~=0.6.2\",\n \"click~=6.7\",\n \"nltk~=3.4\",\n \"numpy~=1.17\",\n \"langdetect\",\n \"langcodes\",\n \"PyPDF2~=1.26\",\n \"tabulate~=0.7\",\n \"tqdm~=4.11.2\",\n \"requests>2,<3\",\n ]", "def dependencies(self) -> List[Bundle]:\n return []", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def build_env_wheels() -> Iterable[Path]:\n return []", "def _is_rpm_all_lib_include_files_installed(self):\n return self.rpm.is_package_installed('rpm-devel')", "def log_installed_python_prereqs():\n sh(\"pip freeze > {}\".format(Env.GEN_LOG_DIR + \"/pip_freeze.log\"))", "def _get_dependencies():\n return config.check_driver_dependencies(__virtualname__, {\"XenAPI\": HAS_XEN_API})", "def test_libs_config(self):\n libs = [l for l in os.listdir(framework_libs_dir()) if l != 'libs.conf']\n self.assertTrue(sorted(libs), sorted(self.conf.options('libs')))", "def _get_third_party_python_libs_directory_contents() -> Dict[str, str]:\n direct_url_packages, standard_packages = utils.partition(\n pkg_resources.find_distributions(common.THIRD_PARTY_PYTHON_LIBS_DIR),\n predicate=_dist_has_meta_data\n )\n\n installed_packages = {\n pkg.project_name: pkg.version for pkg in standard_packages\n }\n\n for pkg in direct_url_packages:\n metadata = json.loads(pkg.get_metadata('direct_url.json'))\n version_string = '%s+%s@%s' % (\n metadata['vcs_info']['vcs'], metadata['url'],\n metadata['vcs_info']['commit_id'])\n installed_packages[pkg.project_name] = version_string\n\n # Libraries with different case are considered equivalent libraries:\n # e.g 'Flask' is the same library as 'flask'. Therefore, we\n # normalize all library names in order to compare libraries without\n # ambiguities.\n directory_contents = {\n normalize_python_library_name(library_name): version_string\n for library_name, version_string in installed_packages.items()\n }\n\n return directory_contents", "def get_dependencies(self):\n return [\"make\", \"g++\", \"gcc\", \"cmake-2.8.12.1\", \"boost_1_56_0\"]", "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def minimum_sys(cls):\r\n site_libs = set(cls._site_libs())\r\n for site_lib in site_libs:\r\n TRACER.log('Found site-library: %s' % site_lib)\r\n for extras_path in cls._extras_paths():\r\n TRACER.log('Found site extra: %s' % extras_path)\r\n site_libs.add(extras_path)\r\n site_libs = set(os.path.normpath(path) for path in site_libs)\r\n\r\n sys_modules = cls.minimum_sys_modules(site_libs)\r\n sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs)\r\n\r\n return sys_path, sys_path_importer_cache, sys_modules", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def find_requirements():\n with open(\"requirements.txt\", 'r') as f:\n return f.read().splitlines()", "def get_used_define_files(self):\n return set(self._used_defines.keys())", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def included_files(self) -> Iterable[str]:\n return self._incl_files", "def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()", "def _get_base_files(self):\n setup_file = path.join(self.PyCogentDirectory, 'setup.py')\n #reqs_file = path.join(self.PyCogentDirectory, 'cogent-requirements.txt')\n #return [(setup_file, 'Python'), (reqs_file, 'Properties')]\n return [(setup_file, 'Python')]", "def requires(self):\n return []", "def get_included_files(self):\n return self._includedfiles", "def get_dependencies(self):\n dependencies = self._dependencies\n if self.ansible is not None:\n dependencies.append(\"ansible=={}.*\".format(self.ansible))\n else:\n dependencies.append(\"ansible\")\n # Drivers can have their own dependencies\n if self.scenario.driver is not None \\\n and self.scenario.driver in DRIVER_DEPENDENCIES.keys():\n dependencies.extend(DRIVER_DEPENDENCIES[self.scenario.driver])\n # Scenarios can specify a requirements.txt\n if self.scenario.requirements is not None:\n dependencies.append(\"-r\" + self.scenario.requirements)\n return dependencies", "def get_installation_paths(versions=None):\n\n pass", "def listFeatures() :\n global features\n features = [feature.split(\".\")[0] for feature in os.listdir(os.path.abspath(__file__)[:-11])\n if feature.endswith(\".py\") and feature != \"__init__.py\"]", "def get_required_packages(self) -> list:\n\t\tret = []\n\t\tlocal_packages = ChocoInfo.get_local_packages(\n\t\t\tPUSHED_PACKAGES_PATH)\n\n\t\tprint(\"local_packages\", local_packages)\n\n\t\treturn [c_package for c_package in self._community_packages if c_package not in local_packages]", "def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]", "def _setup_classpath_runtime_binary(self):\n\n logger.debug(\"Running from binary distribution.\")\n classpath = []\n\n if not self._runtime_path:\n logger.debug(\"runtime_path is empty, no classpath can be \" \"determined\")\n return []\n\n if self.runtime_version < 5:\n classpath.extend(\n [\n os.path.join(self._runtime_path, \"server\", \"*\"),\n os.path.join(self._runtime_path, \"server\", \"lib\", \"*\"),\n os.path.join(self._runtime_path, \"runtime\", \"*\"),\n os.path.join(self._runtime_path, \"runtime\", \"lib\", \"*\"),\n ]\n )\n elif self.runtime_version >= 5:\n classpath.extend(\n [\n os.path.join(\n self._runtime_path,\n \"runtime\",\n \"felix\",\n \"bin\",\n \"felix.jar\",\n ),\n os.path.join(\n self._runtime_path,\n \"runtime\",\n \"lib\",\n \"com.mendix.xml-apis-1.4.1.jar\",\n ),\n ]\n )\n\n return classpath", "def _get_compile_cache_dep_files():\n if entry_script_path is None:\n logger.warning(\"Can not get the entry script file path.\")\n return []\n compile_cache_dep_files = []\n logger.debug(f\"entry script file path: {entry_script_path}\")\n compile_cache_dep_files.append(entry_script_path)\n __get_compile_cache_dep_files(entry_script_path, compile_cache_dep_files, None)\n return compile_cache_dep_files", "def get_install_requires():\n requirements = []\n for line in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):\n continue\n # add line to requirements\n requirements.append(line)\n return requirements", "def _get_dependencies(requirements_file: Path) -> List[str]:\n lines = requirements_file.read_text().strip().split('\\n')\n return [line for line in lines if not line.startswith('#')]", "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path", "def checkExecutables( filenames ):\n \n missing = []\n\n for filename in filenames:\n if not which( filename ):\n missing.append( filename )\n\n if missing:\n raise ValueError( \"missing executables: %s\" % \",\".join(missing) )", "def gn_files(self):\n return set(self._gn_flags.keys())", "def minimum_sys(cls):\n site_libs = set(cls._site_libs())\n for site_lib in site_libs:\n TRACER.log('Found site-library: %s' % site_lib)\n for extras_path in cls._extras_paths():\n TRACER.log('Found site extra: %s' % extras_path)\n site_libs.add(extras_path)\n site_libs = set(os.path.normpath(path) for path in site_libs)\n\n sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs)\n sys_modules = cls.minimum_sys_modules(site_libs)\n\n return sys_path, sys_path_importer_cache, sys_modules", "def add_installed_libraries(self, extra_libs = [\"SeleniumLibrary\",\n \"SudsLibrary\",\n \"RequestsLibrary\"]):\n\n libdir = os.path.dirname(robot.libraries.__file__)\n loaded = []\n for filename in os.listdir(libdir):\n if filename.endswith(\".py\") or filename.endswith(\".pyc\"):\n libname, ext = os.path.splitext(filename)\n if (libname.lower() not in loaded and\n not self._should_ignore(libname)):\n\n try:\n self.add(libname)\n loaded.append(libname.lower())\n except Exception as e:\n # need a better way to log this...\n self.log.debug(\"unable to add library: \" + str(e))\n\n # I hate how I implemented this, but I don't think there's\n # any way to find out which installed python packages are\n # robot libraries.\n for library in extra_libs:\n if (library.lower() not in loaded and\n not self._should_ignore(library)):\n try:\n self.add(library)\n loaded.append(library.lower())\n except Exception as e:\n self.log.debug(\"unable to add external library %s: %s\" % \\\n (library, str(e)))" ]
[ "0.6991022", "0.6678766", "0.6654032", "0.66060007", "0.6492413", "0.6385442", "0.6274169", "0.62589043", "0.62560844", "0.62152314", "0.6175629", "0.61445343", "0.61295563", "0.61234623", "0.61047596", "0.6061235", "0.6051311", "0.6039806", "0.6035648", "0.603107", "0.6014711", "0.60123914", "0.6009939", "0.59982836", "0.5975805", "0.5972929", "0.5966429", "0.5950516", "0.5925375", "0.590469", "0.59021544", "0.5870829", "0.5846746", "0.5844992", "0.58427054", "0.5835871", "0.58314085", "0.5826918", "0.5820845", "0.5807729", "0.5797813", "0.5763477", "0.5735737", "0.5731751", "0.5721657", "0.5704976", "0.56945115", "0.56913257", "0.5686515", "0.56734866", "0.5658111", "0.56408316", "0.5612251", "0.56110615", "0.5584318", "0.5583657", "0.55809426", "0.5580093", "0.5575965", "0.557494", "0.55711704", "0.55650645", "0.55566996", "0.5552604", "0.5550496", "0.55481434", "0.5545534", "0.5541258", "0.55325866", "0.55311495", "0.55293316", "0.5527578", "0.5526269", "0.5518877", "0.55170745", "0.5515736", "0.551551", "0.551551", "0.55125123", "0.55017304", "0.54870915", "0.5483372", "0.5481586", "0.5475287", "0.5468676", "0.5465055", "0.5463534", "0.5458214", "0.5452889", "0.5444264", "0.5420138", "0.5411029", "0.5410881", "0.54075176", "0.5398732", "0.5397983", "0.5394662", "0.53902364", "0.5385078", "0.5383296" ]
0.6333758
6
Return the paths of directories which contain files that should not be included, generally because they contain standard system libraries.
def _GetDefaultBinPathExcludes(self): if sys.platform == "win32": import cx_Freeze.util systemDir = cx_Freeze.util.GetSystemDir() windowsDir = cx_Freeze.util.GetWindowsDir() return [windowsDir, systemDir, os.path.join(windowsDir, "WinSxS")] elif sys.platform == "darwin": return ["/lib", "/usr/lib", "/System/Library/Frameworks"] else: return ["/lib", "/lib32", "/lib64", "/usr/lib", "/usr/lib32", "/usr/lib64"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeduppaths():\r\n # This ensures that the initial path provided by the interpreter contains\r\n # only absolute pathnames, even if we're running from the build directory.\r\n L = []\r\n known_paths = set()\r\n for dir in sys.path:\r\n # Filter out duplicate paths (on case-insensitive file systems also\r\n # if they only differ in case); turn relative paths into absolute\r\n # paths.\r\n dir, dircase = makepath(dir)\r\n if not dircase in known_paths:\r\n L.append(dir)\r\n known_paths.add(dircase)\r\n sys.path[:] = L\r\n return known_paths", "def include_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-I'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-I for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)", "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]", "def include_dirs(self):", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def library_search_path(self, pedantic=False):\n return []", "def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths", "def get_non_vendor_package_path(aea_project_path: Path) -> Set[Path]:\n result: Set[Path] = set()\n for item_type_plural in ComponentType.plurals():\n nonvendor_package_dir_of_type = aea_project_path / item_type_plural\n result = result.union(\n {p for p in nonvendor_package_dir_of_type.iterdir() if p.is_dir()}\n if nonvendor_package_dir_of_type.exists()\n else {}\n )\n return result", "def get_missing_sources(source_paths, files_only=False):\n missing_sources = [\n source_path\n for source_path in source_paths\n if (not os.path.isdir(source_path) or files_only) and not os.path.isfile(source_path)\n ]\n return missing_sources", "def top_level_directories(self):\n return [d for d in self.directories if len([x for x in self.directories if x in d]) == 1]", "def get_unignored_file_paths(ignore_list=None, whitelist=None):\n unignored_files = []\n if ignore_list is None:\n ignore_list = []\n if whitelist is None:\n whitelist = []\n\n for root, dirs, files in os.walk(\".\"):\n floyd_logger.debug(\"Root:%s, Dirs:%s\", root, dirs)\n\n if ignore_path(unix_style_path(root), ignore_list, whitelist):\n # Reset dirs to avoid going further down this directory.\n # Then continue to the next iteration of os.walk, which causes\n # everything in this directory to be ignored.\n #\n # Note that whitelisted files that are within directories that are\n # ignored will not be whitelisted. This follows the expected\n # behavior established by .gitignore logic:\n # \"It is not possible to re-include a file if a parent directory of\n # that file is excluded.\"\n # https://git-scm.com/docs/gitignore#_pattern_format\n dirs[:] = []\n floyd_logger.debug(\"Ignoring directory : %s\", root)\n continue\n\n for file_name in files:\n file_path = unix_style_path(os.path.join(root, file_name))\n if ignore_path(file_path, ignore_list, whitelist):\n floyd_logger.debug(\"Ignoring file : %s\", file_name)\n continue\n\n unignored_files.append(os.path.join(root, file_name))\n\n return unignored_files", "def getdirs():\n dirs = [i for i in os.listdir(dname) if not \\\n os.path.isfile(os.path.join(dname, i))]\n return dirs", "def get_ignored_dirs(ci_ignore_path):\n with open(ci_ignore_path, 'r') as ignore_file:\n return set([\n normpath(line.strip())\n for line in ignore_file.readlines()\n if not line.startswith('#') and not is_blank(line)\n ])", "def get_theme_base_dirs_unchecked():\n theme_dirs = getattr(settings, \"COMPREHENSIVE_THEME_DIRS\", None)\n\n return get_theme_base_dirs_from_settings(theme_dirs)", "def _files_without_hidden(path):\n return [name for name in os.listdir(path) if not name.startswith('.')]", "def site_paths(buildout, prefixes):\n\n def is_buildout_dir(path):\n return path.startswith(buildout['eggs-directory']) or \\\n path.startswith(buildout['develop-eggs-directory'])\n\n def is_in_prefixes(path):\n return any([path.startswith(k) for k in prefixes])\n\n retval = [os.path.realpath(k) for k in site.sys.path]\n return [k for k in retval if not (is_buildout_dir(k) or is_in_prefixes(k))]", "def test_find_with_excluded_hidden_dirs_relative(self):\n tdir1 = self._make_test_dir('.test1')\n tdir2 = self._make_test_dir('test_2')\n tdir3 = self._make_test_dir('test.3')\n files = [\n os.path.join(tdir1, 'testfile1.py'),\n os.path.join(tdir2, 'testfile2.py'),\n os.path.join(tdir3, 'testfile3.py'),\n ]\n _touch_files(files)\n\n # We must temporarily change the current directory, so that we test against\n # patterns like ./.test1/file instead of /tmp/foo/.test1/file\n with _restore_working_dir():\n\n os.chdir(self.test_tmpdir)\n actual = file_resources.GetCommandLineFiles(\n [os.path.relpath(self.test_tmpdir)],\n recursive=True,\n exclude=['*.test1*'])\n\n self.assertEqual(\n sorted(actual),\n sorted([\n os.path.join(\n os.path.relpath(self.test_tmpdir), os.path.basename(tdir2),\n 'testfile2.py'),\n os.path.join(\n os.path.relpath(self.test_tmpdir), os.path.basename(tdir3),\n 'testfile3.py'),\n ]))", "def required_dirs(self) -> list:\n return [\n self.get(\"campaign.characters.path\"),\n self.get(\"campaign.session.path\"),\n self.get(\"campaign.plot.path\"),\n ]", "def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))", "def _init_pathinfo():\r\n d = set()\r\n for dir in sys.path:\r\n try:\r\n if os.path.isdir(dir):\r\n dir, dircase = makepath(dir)\r\n d.add(dircase)\r\n except TypeError:\r\n continue\r\n return d", "def _find_files(root_dir, should_include):\n paths = [] # Return value.\n\n is_module = lambda path: path.endswith(\".py\")\n\n # os.walk() is new in Python 2.3\n # http://docs.python.org/library/os.html#os.walk\n for dir_path, dir_names, file_names in os.walk(root_dir):\n new_paths = [os.path.join(dir_path, file_name) for file_name in file_names]\n new_paths = filter(is_module, new_paths)\n new_paths = filter(should_include, new_paths)\n paths.extend(new_paths)\n\n return paths", "def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories", "def get_untracked_files():\n untracked_files = set()\n for _, dirs, files in os.walk(os.getcwd()):\n for d in dirs:\n if d not in staging_obj_names:\n file_path = get_path_outside_wit(filename=d.strip())\n if file_path:\n untracked_files.add(file_path)\n for f in files:\n if f not in staging_obj_names:\n file_path = get_path_outside_wit(filename=f.strip())\n if file_path:\n untracked_files.add(file_path)\n return untracked_files", "def missingConfigFiles(self):\n return [ conf\n for conf in self.configFiles\n if not os.path.exists(conf)\n and not os.path.isfile(conf)\n ]", "def directories(self):\n directories = list(set([\n '/'.join(f.split('/')[:-1]) for f in self.files\n ]))\n return sorted(directories)", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def scrubbed_sys_path():\n for p in sys.path[:]:\n if not isinstance(p, str):\n yield p\n\n # Scrub any/all pex locations from sys.path.\n pp = pathlib.Path(p)\n if pex_root not in pp.parents:\n yield p", "def list_dir_no_hidden(path):\n\n return glob(os.path.join(path, \"*\"))", "def dir_unchecked():\n return abspath('unchecked')", "def _get_file_paths(self, ignored_exts: Optional[Set[str]]) -> List[str]:\n dir_path = os.path.join(self._target_dir, '**')\n all_paths = glob.glob(dir_path, recursive=True)\n if ignored_exts is None:\n return [p for p in all_paths if os.path.isfile(p)]\n file_paths = [p for p in all_paths if self._extr_ext(p) not in ignored_exts]\n return [p for p in file_paths if os.path.isfile(p)]", "def get_htdocs_dirs(self):\n return []", "def get_htdocs_dirs(self):\n return []", "def get_extra_paths(self):\n # Add libraries found by a site .pth files to our extra-paths.\n if 'pth-files' in self.options:\n import site\n for pth_file in self.options['pth-files'].splitlines():\n pth_libs = site.addsitedir(pth_file, set())\n if not pth_libs:\n self.log.warning(\n \"No site *.pth libraries found for pth_file=%s\" % (\n pth_file,))\n else:\n self.log.info(\"Adding *.pth libraries=%s\" % pth_libs)\n self.options['extra-paths'] += '\\n' + '\\n'.join(pth_libs)\n\n # Add local extra-paths.\n return [p.replace('/', os.path.sep) for p in\n self.options['extra-paths'].splitlines() if p.strip()]", "def __dir__():\n import pkgutil\n\n names = [\n name\n for importer, name, ispkg in pkgutil.iter_modules(__path__)\n if not ispkg and name != \"base\"\n ]\n return names + [\"custom\", \"noData\"]", "def library_dirs(self):", "def get_files_not_staged():\n unstaged_files = []\n current_staging_hashes = get_all_path_hashes(staging_path)\n for root, _, files in os.walk(os.getcwd()):\n for f in files:\n file_path = get_path_outside_wit(filename=f)\n if 'staging_area' in root and file_path:\n file_hash = get_file_hash(file_path=file_path)\n if file_hash not in current_staging_hashes:\n unstaged_files.append(file_path)\n return unstaged_files", "def get_theme_base_dirs():\n # Return an empty list if theming is disabled\n if not is_comprehensive_theming_enabled():\n return []\n return get_theme_base_dirs_unchecked()", "def included(path):\n if path.endswith(Env.IGNORED_TEST_DIRS):\n return False\n return path.endswith('.py') or os.path.isdir(path)", "def file_list_emptydirs(load):\n # TODO - implement this\n _init()\n\n return []", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def _get_implicit_folder_imports(self) -> list:\r\n implicit_paths: list = []\r\n\r\n if self.folders_node is None:\r\n return []\r\n\r\n def try_append_path(path: str) -> None:\r\n if os.path.isdir(path) and path not in self.import_paths:\r\n implicit_paths.append(path)\r\n\r\n for folder_node in filter(is_folder_node, self.folders_node):\r\n folder_path: str = os.path.normpath(folder_node.text)\r\n try_append_path(folder_path if os.path.isabs(folder_path) else os.path.join(self.project_path, folder_path))\r\n\r\n return PathHelper.uniqify(implicit_paths)", "def list_missing_files(files):\n\n result = []\n for f in flatten(files):\n if not file_exists(f): result.append(f)\n return None if len(result) == 0 else result", "def get_sitepackage_dirs():\n if 'getsitepackages' in dir(site):\n return site.getsitepackages()\n else:\n # workaround for https://github.com/pypa/virtualenv/issues/355\n return sys.path", "def get_path_names(directory):\n paths_without_source = set()\n paths = glob.glob(source + \"**/*.*\", recursive=True)\n for p in paths:\n paths_without_source.add(p.replace(directory, \"\", 1))\n\n return paths_without_source", "def source_dirs_files(fspath, fil=None):\r\n dirs = []\r\n files = []\r\n for child in fspath.listdir(fil=fil):\r\n if child.basename.startswith('.'):\r\n continue\r\n if child.check(dir=True):\r\n dirs.append(child)\r\n elif child.check(file=True):\r\n if child.ext in ['.pyc', '.pyo']:\r\n continue\r\n files.append(child)\r\n return sorted(dirs), sorted(files)", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def get_pythonpath(working_set, buildout, prefixes):\n\n # get all paths available in the current working set\n paths = list(working_set.entries)\n\n if hasattr(zc.buildout.easy_install, 'distribute_loc'):\n prepend_path(zc.buildout.easy_install.distribute_loc, paths)\n elif hasattr(zc.buildout.easy_install, 'setuptools_loc'):\n prepend_path(zc.buildout.easy_install.setuptools_loc, paths)\n else:\n prepend_path(zc.buildout.easy_install.setuptools_path, paths)\n\n return [k for k in working_set.entries \\\n if os.path.realpath(k) not in site_paths(buildout, prefixes)]", "def find_files(top_directory, exclude=[], include_top_directory_in_name=True):\n import os\n import re\n paths_and_names = []\n exclude = [re.compile(exclusion) for exclusion in exclude]\n top_directory = os.path.abspath(os.path.expanduser(top_directory))\n parent_directory = os.path.dirname(top_directory)\n for root, dirs, files in os.walk(top_directory, topdown=True):\n dirs.sort(key=str.lower) # Go in case-insensitive alphabetical order\n files.sort(key=str.lower) # Go in case-insensitive alphabetical order\n for exclusion in exclude:\n for d in dirs:\n if exclusion.search(os.path.relpath(d, top_directory)):\n dirs.remove(d)\n for f in files:\n if exclusion.search(os.path.relpath(f, top_directory)):\n files.remove(f)\n for f in files:\n path = os.path.join(root, f)\n if include_top_directory_in_name:\n name = os.path.relpath(path, parent_directory)\n else:\n name = os.path.relpath(path, top_directory)\n paths_and_names.append([path, name])\n return paths_and_names", "def list_files(directory) -> List:\n return sorted(f for f in listdir(directory) if f.endswith('.py') and '__init__.py' not in f)", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def clean_list(path):\n # Remove directories \n clean_file_list = [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]\n\n # List files to ignore\n bad_files = ['desktop.ini',\n os.path.basename(__file__)]\n # TODO: Ignore hidden files & self when compiled\n\n # Loop through bad files and remove from list\n for found_file in bad_files:\n if found_file in clean_file_list:\n clean_file_list.remove(found_file)\n return clean_file_list", "def get_basic_search_subdirs(afile):\n if os.path.exists(afile):\n elf_class = get_elf_class(afile)\n if \"ELF32\" == elf_class:\n # Do not search lib64 dir for 32bit binary file\n return ['', 'bin', 'sbin', 'lib', 'usr/bin', 'usr/sbin', 'usr/lib']\n return ['', 'bin', 'sbin', 'lib64', 'usr/bin', 'usr/sbin', 'usr/lib64', 'lib', 'usr/lib']", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def programs_not_in_path(programs):\n return [program for program in programs if not which(program)]", "def get_htdocs_dirs(self):\n from pkg_resources import resource_filename\n return [('bl', resource_filename(__name__, 'htdocs'))]", "def files_in_folder(self):\n non_til = set()\n filesInFolder = []\n for f in self.find_all_files():\n newstr = f.replace(\"~\", \"\") \n if newstr in self.find_all_files():\n non_til.add(newstr)\n for fs in non_til:\n filesInFolder.append(fs)\n return filesInFolder", "def FindMissingBinaries(needed_tools):\n return [binary for binary in needed_tools if Which(binary) is None]", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def _tested_notebooks():\n\n all_notebooks = _list_all_notebooks()\n skipped_notebooks = functools.reduce(\n lambda a, b: a.union(b),\n list(set(glob.glob(g, recursive=True)) for g in SKIP_NOTEBOOKS),\n )\n\n return sorted(\n os.path.abspath(n) for n in all_notebooks.difference(skipped_notebooks)\n )", "def walkdirs(root):\r\n scriptype_paths = collections.defaultdict(set)\r\n for root, subdirs, files in os.walk(root):\r\n\r\n # Filter subdirs\r\n tmpdir = []\r\n for i in subdirs:\r\n if i.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n if '__init__.py' in os.listdir(os.path.join(root, i)):\r\n scriptype_paths['python'].add(root)\r\n continue\r\n tmpdir.append(i)\r\n subdirs[:] = tmpdir\r\n\r\n # If files with extension exists add to right source type.\r\n if ext_exists('.py', files):\r\n scriptype_paths['python'].add(root)\r\n if ext_exists('.mel', files):\r\n scriptype_paths['mel'].add(root)\r\n return scriptype_paths", "def GetLcovDirectories():\n dirs = set()\n for dirname, _, files in os.walk(os.path.join(makani.HOME, 'bazel-bin')):\n for f in files:\n if f.endswith('.gcda'):\n dir_parts = dirname.split(os.sep)\n for i, p in enumerate(dir_parts):\n if p == '_objs':\n dirs.add(os.sep.join(dir_parts[:i+2]))\n\n return sorted(dirs)", "def _determine_local_import_names(start_dir):\n file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]\n return [\n basename\n for basename, extension\n in file_ext_pairs\n if extension == '.py' or os.path.isdir(\n os.path.join(start_dir, basename))\n and basename not in ('__pycache__')]", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def exclude_paths(root, patterns, dockerfile=None):\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n exceptions = [p for p in patterns if p.startswith('!')]\n\n include_patterns = [p[1:] for p in exceptions]\n include_patterns += [dockerfile, '.dockerignore']\n\n exclude_patterns = list(set(patterns) - set(exceptions))\n\n paths = get_paths(root, exclude_patterns, include_patterns,\n has_exceptions=len(exceptions) > 0)\n\n return set(paths).union(\n # If the Dockerfile is in a subdirectory that is excluded, get_paths\n # will not descend into it and the file will be skipped. This ensures\n # it doesn't happen.\n set([dockerfile])\n if os.path.exists(os.path.join(root, dockerfile)) else set()\n )", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def get_templates_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [resource_filename(__name__, 'templates')]", "def find_packages(paths=(os.curdir,), exclude=()):\n packages = []\n discarded = []\n\n def _discarded(path):\n for discard in discarded:\n if _under(path, discard):\n return True\n return False\n\n for path in paths:\n path = convert_path(path)\n for root, dirs, files in os.walk(path):\n for dir_ in dirs:\n fullpath = os.path.join(root, dir_)\n if _discarded(fullpath):\n continue\n # we work only with Python packages\n if not _is_package(fullpath):\n discarded.append(fullpath)\n continue\n # see if it's excluded\n excluded = False\n package_name = _package_name(path, fullpath)\n for pattern in exclude:\n if fnmatchcase(package_name, pattern):\n excluded = True\n break\n if excluded:\n continue\n\n # adding it to the list\n packages.append(package_name)\n return packages", "def GetRosIncludePaths():\n try:\n from rospkg import RosPack\n except ImportError:\n return []\n rospack = RosPack()\n includes = []\n includes.append(os.path.expandvars('$ROS_WORKSPACE') + '/devel/include')\n for p in rospack.list():\n if os.path.exists(rospack.get_path(p) + '/include'):\n includes.append(rospack.get_path(p) + '/include')\n for distribution in os.listdir('/opt/ros'):\n includes.append('/opt/ros/' + distribution + '/include')\n return includes", "def folders(self):\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isdir(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def most_writable_paths(self):\n path_set = [\"/bin\", \"/boot\", \"/builddir\", \"/etc\", \"/home\", \"/lib\", \"/lib64\", \"/media\", \"/mnt\", \"/opt\", \"/root\", \"/sbin\", \"/selinux\", \"/srv\", \"/tmp\", \"/usr\", \"/var\"]\n\n paths = []\n for p in path_set:\n paths += self.list_of_writable_paths_in_path(p)\n\n return paths", "def getFilesOnly(self,files):\n filesOnly = []\n for f in files:\n if not f['is_dir']:\n filesOnly.append(f)\n return filesOnly", "def common_path(self):\n directories = []\n for file in self.keep_files:\n directories.append(os.path.dirname(file))\n if self.keep_directories:\n directories.extend(self.keep_directories)\n\n # special case: having only one directory will cause sadness.\n # it will result in `mv ./* folder` which will move the .git folder and :kaboom\"\n if not self.keep_files and len(self.keep_directories) == 1:\n return os.path.dirname(self.keep_directories[0])\n\n return os.path.commonpath(directories)", "def included_files(self) -> Iterable[str]:\n return self._incl_files", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper.sh',\n 'tools/valgrind/memcheck/suppressions.txt',\n 'tools/valgrind/memcheck/suppressions_android.txt']", "def ignore(ignored_dirs, path):\n return any([normpath(path).startswith(ignore_dir) for ignore_dir in ignored_dirs])", "def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)", "def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def get_filepaths_to_remove(scratch_dir, target_dir):\n target_fns = [fp.name for fp in target_dir.glob(\"*/*\") if fp.name[-3:] == \".nc\"]\n\n return [fp for fp in scratch_dir.glob(\"*/*\") if fp.name in target_fns]", "def get_template_directories() -> list[Path]:\n template_directories = []\n for engine in engines.all():\n for template_loader in engine.engine.template_loaders: # type: ignore\n if is_compatible_template_loader(template_loader):\n for template_directory in template_loader.get_dirs():\n if isinstance(template_directory, str):\n template_directory = Path(template_directory)\n template_directories.append(template_directory)\n return template_directories", "def available_files(root_directory, relative_paths):\n root_directory = pathlib.Path(root_directory).resolve()\n if isinstance(relative_paths, str):\n relative_paths = [relative_paths]\n\n available_files = []\n not_found = []\n for relative_path in relative_paths:\n file_list = []\n absolute_path = root_directory / relative_path\n if absolute_path.is_file():\n file_list.append(absolute_path)\n elif absolute_path.is_dir():\n file_list = [path for path in absolute_path.rglob(\"*\") if path.is_file()]\n else:\n file_list = [path for path in root_directory.rglob(relative_path) if path.is_file()]\n if file_list:\n available_files.extend(file_list)\n else:\n not_found.append(relative_path)\n available_files.sort()\n not_found.sort()\n return available_files, not_found", "def get_django_template_dirs():\n template_dirs = []\n if 'django.template.loaders.filesystem.load_template_source' in\\\n settings.TEMPLATE_LOADERS or\\\n 'django.template.loaders.filesystem.Loader' in\\\n settings.TEMPLATE_LOADERS:\n template_dirs.extend(settings.TEMPLATE_DIRS)\n if 'django.template.loaders.app_directories.load_template_source' in\\\n settings.TEMPLATE_LOADERS or\\\n 'django.template.loaders.app_directories.Loader' in\\\n settings.TEMPLATE_LOADERS:\n from django.template.loaders.app_directories import app_template_dirs\n template_dirs.extend(app_template_dirs)\n return template_dirs", "def scrub_from_sys_modules():\n for k, m in sys.modules.items():\n if k in sys_modules_whitelist:\n continue\n\n if hasattr(m, '__file__') and m.__file__ is not None:\n mp = pathlib.Path(m.__file__)\n if pex_root in mp.parents:\n yield k", "def get_templates_dirs(self):\n from pkg_resources import resource_filename\n return [resource_filename(__name__, 'templates')]", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def get_htdocs_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [('hw', resource_filename(__name__, 'htdocs'))]", "def get_paths(input_folder: str) -> list[str]:\n\n return [f for f in os.listdir(input_folder) if f[-4:] == '.txt' and f[:3] != 'top']", "def _get_config_dirs(project=None):\n snap = os.environ.get('SNAP')\n snap_c = os.environ.get('SNAP_COMMON')\n\n cfg_dirs = [\n _fixpath(os.path.join('~', '.' + project)) if project else None,\n _fixpath('~'),\n os.path.join('/etc', project) if project else None,\n '/etc',\n os.path.join(snap_c, \"etc\", project) if snap_c and project else None,\n os.path.join(snap, \"etc\", project) if snap and project else None,\n ]\n return [x for x in cfg_dirs if x]", "def exclude_dirs(self, matches: Iterable[str]) -> List[str]:\n filters = [(\"ifmodule\", self.modules.keys()), (\"ifdefine\", self.variables)]\n\n valid_matches = []\n\n for match in matches:\n for filter_ in filters:\n if not self._pass_filter(match, filter_):\n break\n else:\n valid_matches.append(match)\n return valid_matches", "def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results", "def get_dirs():\n # join glob matchers\n dirnames = [\n str(dir_path.relative_to(get_data_dir()))\n for dir_path in get_data_dir().rglob(\"*\")\n if dir_path.is_dir()\n ]\n\n return dirnames", "def test_matlab_install_dir_absent(self):\n directories = (\"/\", \"/tmp\")\n for dirname in directories:\n with self.subTest(dirname=dirname):\n self.assertNotIn(\"matlab-install\", self.host.file(dirname).listdir())", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]" ]
[ "0.68841964", "0.66146666", "0.6608054", "0.6544238", "0.6524269", "0.65076166", "0.64406294", "0.64142877", "0.63882494", "0.6343896", "0.6316815", "0.6312666", "0.6243478", "0.6181976", "0.6178829", "0.6160177", "0.615914", "0.61474144", "0.61427253", "0.61281586", "0.6118144", "0.607804", "0.60767174", "0.6061069", "0.60532624", "0.60111415", "0.60055494", "0.6000497", "0.59944016", "0.5977549", "0.5975145", "0.59550935", "0.5934327", "0.5934327", "0.5933611", "0.59097636", "0.5892799", "0.58857447", "0.58805436", "0.58742076", "0.5866373", "0.5857063", "0.5846702", "0.57894874", "0.5784708", "0.57757854", "0.57737374", "0.5749093", "0.57341003", "0.5733533", "0.57322043", "0.5730715", "0.57124937", "0.56989855", "0.5683222", "0.5681102", "0.56770015", "0.56752074", "0.56696343", "0.5658965", "0.56578684", "0.5647928", "0.5647928", "0.56466407", "0.564063", "0.5638635", "0.56356686", "0.5634233", "0.5633382", "0.5633322", "0.5627791", "0.5617645", "0.5608611", "0.55709183", "0.556761", "0.55642456", "0.55602247", "0.5544852", "0.5541989", "0.5538834", "0.5530831", "0.5530105", "0.55275273", "0.5514739", "0.5509794", "0.5503829", "0.550344", "0.55024105", "0.55017537", "0.55016243", "0.5497483", "0.54970866", "0.5496713", "0.5496074", "0.54958856", "0.54901284", "0.5489798", "0.5485175", "0.54833317", "0.5482905" ]
0.7017395
0
Return the file's dependencies using platformspecific tools (the imagehlp library on Windows, otool on Mac OS X and ldd on Linux); limit this list by the exclusion lists as needed
def _GetDependentFiles(self, path): dependentFiles = self.dependentFiles.get(path) if dependentFiles is None: if sys.platform == "win32": origPath = os.environ["PATH"] os.environ["PATH"] = origPath + os.pathsep + \ os.pathsep.join(sys.path) import cx_Freeze.util try: dependentFiles = cx_Freeze.util.GetDependentFiles(path) except cx_Freeze.util.BindError: # Sometimes this gets called when path is not actually a library # See issue 88 dependentFiles = [] os.environ["PATH"] = origPath else: dependentFiles = [] if sys.platform == "darwin": command = 'otool -L "%s"' % path splitString = " (compatibility" dependentFileIndex = 0 else: command = 'ldd "%s"' % path splitString = " => " dependentFileIndex = 1 for line in os.popen(command): parts = line.expandtabs().strip().split(splitString) if len(parts) != 2: continue dependentFile = parts[dependentFileIndex].strip() if dependentFile == os.path.basename(path): continue if dependentFile in ("not found", "(file not found)"): fileName = parts[0] if fileName not in self.linkerWarnings: self.linkerWarnings[fileName] = None message = "WARNING: cannot find %s\n" % fileName sys.stdout.write(message) continue if dependentFile.startswith("("): continue pos = dependentFile.find(" (") if pos >= 0: dependentFile = dependentFile[:pos].strip() if dependentFile: dependentFiles.append(dependentFile) if sys.platform == "darwin": # Make library paths absolute. This is needed to use # cx_Freeze on OSX in e.g. a conda-based distribution. # Note that with @rpath we just assume Python's lib dir, # which should work in most cases. dirname = os.path.dirname(path) dependentFiles = [p.replace('@loader_path', dirname) for p in dependentFiles] dependentFiles = [p.replace('@rpath', sys.prefix + '/lib') for p in dependentFiles] dependentFiles = self.dependentFiles[path] = \ [f for f in dependentFiles if self._ShouldCopyFile(f)] return dependentFiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper.sh',\n 'tools/valgrind/memcheck/suppressions.txt',\n 'tools/valgrind/memcheck/suppressions_android.txt']", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',\n 'tools/valgrind/tsan/suppressions.txt',\n 'tools/valgrind/tsan/suppressions_android.txt',\n 'tools/valgrind/tsan/ignores.txt']", "def get_dependencies(self):\n dependencies = self._dependencies\n if self.ansible is not None:\n dependencies.append(\"ansible=={}.*\".format(self.ansible))\n else:\n dependencies.append(\"ansible\")\n # Drivers can have their own dependencies\n if self.scenario.driver is not None \\\n and self.scenario.driver in DRIVER_DEPENDENCIES.keys():\n dependencies.extend(DRIVER_DEPENDENCIES[self.scenario.driver])\n # Scenarios can specify a requirements.txt\n if self.scenario.requirements is not None:\n dependencies.append(\"-r\" + self.scenario.requirements)\n return dependencies", "def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]", "def _get_dependencies(requirements_file: Path) -> List[str]:\n lines = requirements_file.read_text().strip().split('\\n')\n return [line for line in lines if not line.startswith('#')]", "def _get_dependencies():\n return config.check_driver_dependencies(__virtualname__, {\"XenAPI\": HAS_XEN_API})", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def get_dependencies():\n return config.check_driver_dependencies(\n __virtualname__, {\"profitbricks\": HAS_PROFITBRICKS}\n )", "def FindMissingBinaries(needed_tools):\n return [binary for binary in needed_tools if Which(binary) is None]", "def _system_requirement_tools(self, app: AppConfig):\n if app.target_vendor_base == DEBIAN:\n base_system_packages = [\"python3-dev\", \"build-essential\"]\n system_verify = [\"dpkg\", \"-s\"]\n system_installer = \"apt\"\n elif app.target_vendor_base == RHEL:\n base_system_packages = [\n \"python3-devel\",\n \"gcc\",\n \"make\",\n \"pkgconf-pkg-config\",\n ]\n system_verify = [\"rpm\", \"-q\"]\n system_installer = \"dnf\"\n else:\n base_system_packages = None\n system_verify = None\n system_installer = None\n\n return base_system_packages, system_verify, system_installer", "def _GetPossibleFileList(self, filename, only_webkit):\n\n possible_chromium_files = []\n possible_webkit_files = []\n\n reduced_filename = filename.replace(\"LayoutTests/\", \"\")\n chromium_platform_url = LAYOUT_TEST_REPO_BASE_URL\n if not filename.startswith(\"chrome\"):\n chromium_platform_url += \"platform/%s/\"\n chromium_platform_url += filename\n\n webkit_platform_url = WEBKIT_PLATFORM_BASELINE_URL + reduced_filename\n\n if IsMacPlatform(self.platform):\n self._AddBaselineURLs(possible_chromium_files, chromium_platform_url,\n CHROMIUM_MAC_PLATFORM_DIRS)\n self._AddBaselineURLs(possible_webkit_files, webkit_platform_url,\n WEBKIT_MAC_PLATFORM_DIRS)\n elif IsLinuxPlatform(self.platform):\n self._AddBaselineURLs(possible_chromium_files, chromium_platform_url,\n CHROMIUM_LINUX_PLATFORM_DIRS)\n else:\n self._AddBaselineURLs(possible_chromium_files, chromium_platform_url,\n CHROMIUM_WIN_PLATFORM_DIRS)\n\n if not IsMacPlatform(self.platform):\n self._AddBaselineURLs(possible_webkit_files, webkit_platform_url,\n WEBKIT_WIN_PLATFORM_DIRS)\n possible_webkit_files.append(WEBKIT_LAYOUT_TEST_BASE_URL + filename)\n\n if only_webkit:\n return possible_webkit_files\n return possible_chromium_files + possible_webkit_files", "def list_photo_libraries():\n \"\"\" on MacOS < 10.15, this may omit some libraries \"\"\"\n\n # On 10.15, mdfind appears to find all libraries\n # On older MacOS versions, mdfind appears to ignore some libraries\n # glob to find libraries in ~/Pictures then mdfind to find all the others\n # TODO: make this more robust\n lib_list = glob.glob(f\"{str(Path.home())}/Pictures/*.photoslibrary\")\n\n # On older OS, may not get all libraries so make sure we get the last one\n last_lib = get_last_library_path()\n if last_lib:\n lib_list.append(last_lib)\n\n output = subprocess.check_output(\n [\"/usr/bin/mdfind\", \"-onlyin\", \"/\", \"-name\", \".photoslibrary\"]\n ).splitlines()\n for lib in output:\n lib_list.append(lib.decode(\"utf-8\"))\n lib_list = list(set(lib_list))\n lib_list.sort()\n return lib_list", "def check_tools_exist(WARNINGS):\n tools_list = []\n Warning_out = WARNINGS + \"Tool executable warning: \"\n try:\n flash.Flash(args.flash)\n tools_list.append(\"flash\")\n except ValueError:\n Warning_out = Warning_out + \"Flash not in path\"\n try:\n error_correction.Error_Correction(args.spades)\n tools_list.append(\"error_correction\")\n except ValueError:\n Warning_out = Warning_out + \"spades.py not in path\\n\"\n try:\n vsearch.Vsearch(args.vsearch)\n tools_list.append(\"vsearch\")\n except ValueError:\n Warning_out = Warning_out + \"vsearch not in path\\n\"\n try:\n trimmomatic.Trimmomatic(args.trimmomatic)\n tools_list.append(\"trimmomatic\")\n except ValueError:\n Warning_out = Warning_out + \"trimmomatic not in path\\n\"\n try:\n swarm.Swarm(args.swarm)\n tools_list.append(\"swarm\")\n except ValueError:\n Warning_out = Warning_out + \"swarm not in path\\n\"\n try:\n samtools_index.Samtools_Index(args.samtools)\n tools_list.append(\"samtools\")\n except ValueError:\n Warning_out = Warning_out + \"samtools not in path\\n\"\n try:\n pear.Pear(args.pear)\n tools_list.append(\"pear\")\n except ValueError:\n Warning_out = Warning_out + \"pear not in path\\n\"\n try:\n muscle.Muscle(args.muscle)\n tools_list.append(\"muscle\")\n except ValueError:\n Warning_out = Warning_out + \"muscle not in path\\n\"\n try:\n fastqc.FastQC(args.fastqc)\n tools_list.append(\"fastqc\")\n except ValueError:\n Warning_out = Warning_out + \"fastqc not in path\\n\"\n try:\n cd_hit.Cd_hit(args.cd_hit)\n tools_list.append(\"cd-hit-est\")\n except ValueError:\n Warning_out = Warning_out + \"cd-hit-est not in path\\n\"\n try:\n bowtie_map.Bowtie2_Map(args.bowtie2)\n tools_list.append(\"bowtie2\")\n except ValueError:\n Warning_out = Warning_out + \"bowtie2 not in path\\n\"\n try:\n blast.Blastclust(args.blastclust)\n tools_list.append(\"blastclust\")\n except ValueError:\n Warning_out = Warning_out + \"blastclust not in path\\n\"\n return tools_list, Warning_out", "def get_tools() -> List[Dict[str, Any]]:\n tools = []\n with os.scandir(PHP_TOOL_PATH) as it:\n for item in it:\n if not item.name.startswith(\".\") and item.is_dir():\n data = get_tool_options(item.name)\n tools.append(\n {\n \"dir\": \"../php_tools/\" + item.name,\n \"name\": data[\"name\"],\n \"exclude\": [str(x) for x in data[\"exclude\"]],\n }\n )\n return sorted(DEFAULT_TOOLS + tools, key=lambda tool: tool[\"name\"].replace(\"*\", \"\"))", "def _GetDefaultBinExcludes(self):\n if sys.platform == \"win32\":\n return [\"comctl32.dll\", \"oci.dll\", \"cx_Logging.pyd\"]\n else:\n return [\"libclntsh.so\", \"libwtc9.so\"]", "def find_define_file_uses(self):\n # Executing git grep is substantially faster than using the define_re\n # directly on the contents of the file in Python.\n for define_file in self.get_checked_define_files():\n excluded_files = set([define_file])\n excluded_files.update(define_file.get_included_files(recursive=True))\n all_defines = define_file.get_declared_defines()\n args = ['git', 'grep', '-zwIF']\n for define in all_defines:\n args.extend(['-e', define])\n args.extend(['--', '*.cpp', '*.c', '*.cu', '*.h', '*.cuh'])\n define_re = r'\\b(?:' + '|'.join(all_defines)+ r')\\b'\n output = subprocess.check_output(args, cwd=self._source_root).decode()\n for line in output.splitlines():\n (filename, text) = line.split('\\0')\n fileobj = self._files.get(filename)\n if fileobj is not None and fileobj not in excluded_files:\n defines = re.findall(define_re, text)\n fileobj.add_used_defines(define_file, defines)", "def GetFilesForTool(self):\n raise NotImplementedError()", "def autodetect_files(self):\n if self._is_valid_requirements_file('requirements.txt'):\n self.filenames.append('requirements.txt')\n\n if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover\n self.filenames.append('requirements.pip')\n\n if os.path.isdir('requirements'):\n for filename in os.listdir('requirements'):\n file_path = os.path.join('requirements', filename)\n if self._is_valid_requirements_file(file_path):\n self.filenames.append(file_path)\n self._check_inclusions_recursively()", "def tidy_requirements(requirement_file):\n outdata = []\n with open(requirement_file) as dependencies:\n for line in dependencies:\n line = line.strip()\n if line and not line.startswith('#') and line not in outdata:\n outdata.append(line)\n return outdata", "def depot_tools_base(self):\n depot_tools = self.path_from_chromium_base('third_party',\n 'depot_tools')\n return depot_tools if self._filesystem.isdir(depot_tools) else None", "def get_tools_used_by_groups():\n global tools_used_by_groups\n\n if not tools_used_by_groups:\n tools_used_by_groups = rsh.tools_used_by_groups(get_srcs())\n \n return tools_used_by_groups", "def _get_compile_cache_dep_files():\n if entry_script_path is None:\n logger.warning(\"Can not get the entry script file path.\")\n return []\n compile_cache_dep_files = []\n logger.debug(f\"entry script file path: {entry_script_path}\")\n compile_cache_dep_files.append(entry_script_path)\n __get_compile_cache_dep_files(entry_script_path, compile_cache_dep_files, None)\n return compile_cache_dep_files", "def getImports(pth):\n if is_win or is_cygwin:\n if pth.lower().endswith(\".manifest\"):\n return []\n try:\n return _getImports_pe(pth)\n except Exception as exception:\n # Assemblies can pull in files which aren't necessarily PE,\n # but are still needed by the assembly. Any additional binary\n # dependencies should already have been handled by\n # selectAssemblies in that case, so just warn, return an empty\n # list and continue.\n # For less specific errors also log the traceback.\n logger.warning('Can not get binary dependencies for file: %s', pth)\n logger.warning(\n ' Reason: %s', exception,\n exc_info=not isinstance(exception, pefile.PEFormatError))\n return []\n elif is_darwin:\n return _getImports_macholib(pth)\n else:\n return _getImports_ldd(pth)", "def read_deps():\n with open(\"./dependencies.txt\", 'r') as deps:\n return [d for d in re.split(r'\\s', ''.join(deps)) if d]", "def _GetDefaultBinIncludes(self):\n if sys.platform == \"win32\":\n pythonDll = \"python%s%s.dll\" % sys.version_info[:2]\n return [pythonDll, \"gdiplus.dll\", \"mfc71.dll\", \"msvcp71.dll\",\n \"msvcr71.dll\"]\n else:\n soName = distutils.sysconfig.get_config_var(\"INSTSONAME\")\n if soName is None:\n return []\n pythonSharedLib = self._RemoveVersionNumbers(soName)\n return [pythonSharedLib]", "def set_dependency_files(context):\n path_to_direct_file = os.path.abspath('data/gemini_scan_data/direct-dependencies.txt')\n path_to_transitive_file = os.path.abspath('data/gemini_scan_data/transitive-dependencies.txt')\n context.dependency_files = list()\n with open(path_to_direct_file, 'rb') as f:\n context.dependency_files.append((\n \"dependencyFile[]\",\n (\n 'direct-dependencies.txt',\n f.read(),\n 'text/plain'\n )\n ))\n with open(path_to_transitive_file, 'rb') as f:\n context.dependency_files.append((\n \"dependencyFile[]\",\n (\n 'transitive-dependencies.txt',\n f.read(),\n 'text/plain'\n )\n ))", "def get_dependencies_content():\n import trustedanalytics\n dependencies = []\n for filename in trustedanalytics.udf_dependencies:\n name, content = _get_file_content_as_str(filename)\n dependencies.append({'file_name': name, 'file_content': content})\n return dependencies", "def learn_requirements():\n req_file = \"requirements.txt\"\n reqs = []\n\n import os\n\n path = os.path.dirname(__file__)\n req_file = os.path.join(path, \"..\", req_file)\n if not os.path.exists(req_file):\n # not needed with installed package\n return reqs\n\n excludes = \"versioneer coveralls coverage\".split()\n with open(req_file, \"r\") as fp:\n buf = fp.read().strip().splitlines()\n for req in buf:\n req = req.strip()\n if (\n req != \"\"\n and not req.startswith(\"#\")\n and req not in excludes\n ):\n reqs.append(req)\n return reqs", "def check_system_dependencies():\n out_info(\"Checking System Dependencies...\")\n check_cmake()\n if OS_VERSION[0] == \"Windows\":\n check_visual_studio()\n check_cplus_plus()\n if OS_VERSION[0] == \"Linux\":\n check_gcc()\n check_gpp()", "def filter_depend( self, atoms ):\n\n def dep_string_reduce(dep_string,enabled_useflags):\n dest = []\n tokens = iter(dep_string.split())\n useflags = enabled_useflags.split()\n\n for token in tokens:\n if token[-1] == \"?\":\n if token.startswith(\"!\"):\n skip = token[1:-1] in useflags\n else:\n skip = token[:-1] not in useflags\n if skip:\n level = 0\n while 1:\n token = next(tokens)\n if token == \"(\":\n level+=1\n if token == \")\":\n level-=1\n if level < 1:\n break\n continue\n elif token == \"(\" or token == \")\":\n continue\n else:\n dest.append(token)\n\n return \" \".join(dest)\n\n # gjl does not use use flags\n try:\n use = os.environ[\"USE\"]\n atoms = dep_string_reduce(atoms, use)\n except KeyError:\n pass\n return atoms", "def get_dependencies(self):\n return [\"make\", \"g++\", \"gcc\", \"cmake-2.8.12.1\", \"boost_1_56_0\"]", "def get_libraries_names():\n rpm_packages_path = path.join(PMDK_PATH, 'rpm', SYSTEM_ARCHITECTURE)\n libraries_names = [elem.split('-')[0] for elem in listdir(rpm_packages_path)\n if PMDK_VERSION in elem]\n return set(libraries_names)", "def gyp_files(self):\n return set(self._gyp_flags.keys())", "def get_tool_version_files():\n similar_files = defaultdict(list)\n for path in Runtime_Datasets.RAW_FILE_PATHS:\n filename = get_file_name(path)\n filename = filename.rsplit('_', 1)[0]\n similar_files[filename].append(path)\n\n Runtime_Datasets.RAW_FILE_PATHS = similar_files", "def depfile_args(self, dep_file):\n return [\"-Wp,-MMD,%s\" % (dep_file)]", "def initialize(finder):\n finder.ExcludeModule(\"FCNTL\")\n finder.ExcludeModule(\"os.path\")\n if os.name == \"nt\":\n finder.ExcludeModule(\"fcntl\")\n finder.ExcludeModule(\"grp\")\n finder.ExcludeModule(\"pwd\")\n finder.ExcludeModule(\"termios\")\n else:\n finder.ExcludeModule(\"_subprocess\")\n finder.ExcludeModule(\"_winreg\")\n finder.ExcludeModule(\"msilib\")\n finder.ExcludeModule(\"msvcrt\")\n finder.ExcludeModule(\"multiprocessing._multiprocessing\")\n finder.ExcludeModule(\"nt\")\n finder.ExcludeModule(\"nturl2path\")\n finder.ExcludeModule(\"pyHook\")\n finder.ExcludeModule(\"pythoncom\")\n finder.ExcludeModule(\"pywintypes\")\n finder.ExcludeModule(\"winerror\")\n finder.ExcludeModule(\"winsound\")\n finder.ExcludeModule(\"win32api\")\n finder.ExcludeModule(\"win32con\")\n finder.ExcludeModule(\"win32gui\")\n finder.ExcludeModule(\"win32event\")\n finder.ExcludeModule(\"win32evtlog\")\n finder.ExcludeModule(\"win32evtlogutil\")\n finder.ExcludeModule(\"win32file\")\n finder.ExcludeModule(\"win32pdh\")\n finder.ExcludeModule(\"win32pipe\")\n finder.ExcludeModule(\"win32process\")\n finder.ExcludeModule(\"win32security\")\n finder.ExcludeModule(\"win32service\")\n finder.ExcludeModule(\"wx.activex\")\n if os.name != \"posix\":\n finder.ExcludeModule(\"posix\")\n if sys.platform != \"darwin\":\n finder.ExcludeModule(\"Carbon\")\n finder.ExcludeModule(\"gestalt\")\n finder.ExcludeModule(\"ic\")\n finder.ExcludeModule(\"mac\")\n finder.ExcludeModule(\"MacOS\")\n finder.ExcludeModule(\"macostools\")\n finder.ExcludeModule(\"macpath\")\n finder.ExcludeModule(\"macurl2path\")\n finder.ExcludeModule(\"_scproxy\")\n if os.name != \"nt\":\n finder.ExcludeModule(\"EasyDialogs\")\n if os.name != \"os2\":\n finder.ExcludeModule(\"os2\")\n finder.ExcludeModule(\"os2emxpath\")\n finder.ExcludeModule(\"_emx_link\")\n if os.name != \"ce\":\n finder.ExcludeModule(\"ce\")\n if os.name != \"riscos\":\n finder.ExcludeModule(\"riscos\")\n finder.ExcludeModule(\"riscosenviron\")\n finder.ExcludeModule(\"riscospath\")\n finder.ExcludeModule(\"rourl2path\")\n if sys.platform[:4] != \"java\":\n finder.ExcludeModule(\"java.lang\")\n finder.ExcludeModule(\"org.python.core\")\n if sys.platform[:4] != \"OpenVMS\":\n finder.ExcludeModule(\"vms_lib\")\n if sys.version_info[0] >= 3:\n finder.ExcludeModule(\"new\")\n finder.ExcludeModule(\"Tkinter\")\n else:\n finder.ExcludeModule(\"tkinter\")", "def gather_required_files(filename):\n # open the file, while ignoring encoding errors (usually comments)\n encoding = open_guess_encoding(filename)\n with open(filename, encoding=encoding, errors='surrogateescape') as fp:\n config = MugenParser()\n config.read_string(fp.read())\n\n # go through each section and store any options that look like filenames\n required = set()\n for section in config.sections():\n section = config[section]\n options = set(find_asset(normpath(v)) for k, v in section.items()\n if filename_regex.match(v))\n required.update(options)\n\n # check other def files, then search them and add the results\n root = dirname(filename)\n for child_file in required.copy():\n name, ext = os.path.splitext(child_file)\n if ext.lower() == '.def':\n path = join(root, child_file)\n required.update(gather_required_files(path))\n\n # TODO: this is not implemented\n # mugen does checking against many paths, so we need\n # to emulate that the if we want to check for missing files\n # finally, go through the potential files and verify they exist\n # for child_file in required.copy():\n # path = join(root, child_file)\n # if not os.path.exists(path):\n # required.remove(child_file)\n\n return required", "def _parseDependDotMake( targetBuildDir, platformBuildDir ):\n Any.requireIsTextNonEmpty( targetBuildDir )\n Any.requireIsTextNonEmpty( platformBuildDir )\n\n dependDotMakePath = os.path.join( targetBuildDir, 'depend.make' )\n\n lines = FastScript.getFileContent( dependDotMakePath, splitLines=True )\n result = collections.defaultdict( set )\n\n languageNormalizationMap = {\n '.c' : 'c',\n '.C' : 'c++',\n '.CC' : 'c++',\n '.CPP': 'c++',\n '.CXX': 'c++',\n '.cc' : 'c++',\n '.cpp': 'c++',\n '.cxx': 'c++',\n }\n\n for l in lines:\n # skip comments and empty lines\n if Any.isTextNonEmpty( l ) and not l.startswith( '#' ):\n # lines are in the format\n # /path/to/obj/file.{c,cpp,cc,cxx}.o: /path/to/dependencyfile.{c,cpp,cc,cxx,h,hpp,hxx,hh}\n objFile, depFile = l.split( ':' )\n srcFile, objExt = os.path.splitext( objFile.strip( ) )\n srcName, srcExt = os.path.splitext( srcFile )\n depFile = depFile.strip( )\n _, depFileExt = os.path.splitext( depFile )\n language = languageNormalizationMap[ srcExt ]\n\n if depFileExt.lower( ) in ('.h', '.hxx', '.hpp', '.hh'):\n if not os.path.isabs( depFile ):\n relPath = os.path.join( platformBuildDir, depFile )\n absPath = os.path.abspath( relPath )\n else:\n absPath = depFile\n result[ absPath ].add( language )\n\n\n return result", "def find_requirements():\n with open(\"requirements.txt\", 'r') as f:\n return f.read().splitlines()", "def get_incompatible_packages():\n pkgconfig_directory = '/usr/lib64/pkgconfig/'\n incompatibe_packages = []\n libraries = get_libraries_names() - set(NO_PKG_CONFIGS)\n for library in libraries:\n with open(pkgconfig_directory + library + '.pc') as f:\n out = f.readlines()\n for line in out:\n if 'version=' in line:\n version = line.split('=')[1].strip(linesep)\n if not version in PMDK_VERSION.replace('~', '-'):\n incompatibe_packages.append(library)\n return incompatibe_packages", "def dependencies(project_name):\n deps = []\n logging.info('Locating {}'.format(project_name))\n located = distlib.locators.locate(project_name, prereleases=True)\n if located is None:\n logging.warn('{} not found'.format(project_name))\n return []\n for dep in located.run_requires:\n # Drop any version details from the dependency name.\n deps.append(just_name(dep))\n return deps", "def libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-l'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-l for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def dependencies(pkg, extra=None):\n ret = set()\n for dist in pkg.requires_dist:\n requirement = pkg_resources.Requirement.parse(dist)\n # we replace all underscores with dash, to make package names similiar in all cases\n name = requirement.name.replace(\"_\", \"-\")\n if extra:\n # for extras we don't grab dependencies for the main pkg,\n # those are already in the main plg rule\n if not requirement.marker or requirement.marker.evaluate({\"extra\": None}):\n continue\n\n if requirement.marker:\n if not requirement.marker.evaluate({\"extra\": extra}):\n continue\n\n if requirement.extras:\n ret = ret | set(\n [\"{}[{}]\".format(name, dist_extra) for dist_extra in requirement.extras]\n )\n else:\n ret.add(name)\n\n return sorted(list(ret))", "def check_dependencies(work_dir, fits_dir, fitsbase):\n # Print to screen what processing steps have been selected\n print \"The following processing steps have been selected:\\n\"\n if params.do_rfifind:\n print \" - PRESTO rfifind (RFI mitigation tools)\"\n if params.do_prepsub:\n print \" - PRESTO prepsubband (dedispersion)\"\n if params.do_candsearch:\n print \" - PRESTO acceleration search and candidate sifting\"\n if params.do_presto_sp:\n print \" - PRESTO singlepulse search (singlepulse.py)\"\n # Print to screen what processing steps are being skipped\n print \"\\nThe following processing steps are being skipped:\\n\"\n if params.do_rfifind == 0:\n print \" - PRESTO rfifind (RFI mitigation tools)\"\n if params.do_prepsub == 0:\n print \" - PRESTO prepsubband (dedispersion)\"\n if params.do_candsearch == 0:\n print \" - PRESTO acceleration search and candidate sifting\"\n if params.do_presto_sp == 0:\n print \" - PRESTO singlepulse search (singlepulse.py)\"\n print \"\\nChecking dependencies...\\n\"\n # There must be at least one .fits file in the fits directory\n fl = glob(fits_dir + '/%s*.fits' %fitsbase)\n if len(fl):\n print \" Found %d file(s) in %s:\\n\" %(len(fl), fits_dir)\n for i in fl:\n print \" %s\\n\" %(i.split('/')[-1])\n else:\n print \" No %s*.fits files found in %s !\\n Exiting...\\n\" %(fitsbase, fits_dir)\n sys.exit(0)\n # If skipping the RFIFIND step in processing but want to do\n # processing steps further down the line, then there must be a\n # rfi_products folder in the results directory with a .mask file\n # in it\n if params.do_rfifind == 0 and params.use_mask and \\\n (params.do_prepsub or params.do_candsearch or params.do_presto_sp):\n mlist = glob(work_dir + '/rfi_products/*.mask')\n if len(mlist):\n print \" Using RFI .mask:\\n %s\\n\" %(mlist[0])\n else:\n print \" No RFI .mask found in %s/rfi_products!\\n Exiting...\\n\"\\\n %(work_dir)\n sys.exit(0)\n # If skipping the PREPSUBBAND step in processing but want to\n # do processing steps further down the line, then there must be\n # de-dispersed time series files in the results directory of\n # the form basename*DM*.dat and basename*DM*.inf\n if params.do_prepsub == 0 and (params.do_candsearch or \n params.do_presto_sp):\n dats = glob(work_dir + '/*DM*dat')\n infs = glob(work_dir + '/*DM*inf')\n if not (len(dats) and len(infs)):\n print \" No .dat and/or .inf files in %s!\\n Exiting...\\n\" %(work_dir)\n sys.exit(0)\n # If we haven't exited by now, then things should be good\n print \"\\nLooks good...\\n\\n\"\n # Pause for a few seconds so you can actually read the output\n time.sleep(5)", "def checkOptionalDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # soft dependencies\n failed = []\n for opt in self.optmodules:\n mod = self.parent.module(opt)\n if( mod == None ):\n failed.append(opt)\n \n # remove soft dependencies that were not found\n self.buildWithout(failed)", "def test_req_file_parse_no_use_wheel(data):\n finder = PackageFinder([], [], session=PipSession())\n for req in parse_requirements(\n data.reqfiles.join(\"supported_options.txt\"), finder,\n session=PipSession()):\n pass\n assert not finder.use_wheel", "def dependencies(self) -> List[Bundle]:\n return []", "def plugin_get_dependency():\n return []", "def get_requirements():\n with open('requirements.txt') as fd:\n lines = fd.read().splitlines()\n requires, links = [], []\n for line in lines:\n if line.startswith('git+'):\n links.append(line)\n elif line:\n requires.append(line)\n return requires, links", "def missing_in_gyp_by_file(self):\n return self._missing_gyp_files", "def _get_base_files(self):\n setup_file = path.join(self.PyCogentDirectory, 'setup.py')\n #reqs_file = path.join(self.PyCogentDirectory, 'cogent-requirements.txt')\n #return [(setup_file, 'Python'), (reqs_file, 'Properties')]\n return [(setup_file, 'Python')]", "def extract_deps(self, srcinfo):\n packages = {}\n pkgname = \"\"\n\n for i in srcinfo.split(\"\\n\"):\n if not i:\n continue\n if i[0] == \"#\":\n continue\n option = i.strip()\n key, value = option.split(\" = \")\n if key == \"pkgbase\":\n pkgname = value\n packages[pkgname] = []\n if key == \"makedepends\":\n packages[pkgname].append(value)\n # if key == \"depends\":\n # packages[pkgname].append(value)\n return packages", "def get_requirements(*args):\n requirements = set()\n with open(get_absolute_path(*args)) as handle:\n for line in handle:\n # Strip comments.\n line = re.sub(r'^#.*|\\s#.*', '', line)\n # Ignore empty lines\n if line and not line.isspace():\n requirements.add(re.sub(r'\\s+', '', line))\n return sorted(requirements)", "def _python_dependencies(self):\n dependencies = []\n if self._requires_extensions():\n self._inject_extensions_build(dependencies)\n dependencies.append('- task: UsePythonVersion@0')\n dependencies.append(' displayName: \"Setting python version to 3.6 as required by functions\"')\n dependencies.append(' inputs:')\n dependencies.append(' versionSpec: \\'3.6\\'')\n dependencies.append(' architecture: \\'x64\\'')\n dependencies.append('- script: |')\n dependencies.append(' python3.6 -m venv worker_venv')\n dependencies.append(' source worker_venv/bin/activate')\n dependencies.append(' pip3.6 install setuptools')\n if self._requires_pip():\n dependencies.append(' pip3.6 install -r requirements.txt')\n return dependencies", "def check_req_utils():\n utils = (['dmenu', 'gpg', 'pass', 'xclip', 'exo-open', 'pkill'])\n for util in utils:\n if find_executable(util) is None:\n print(\"ERROR: Util '{}' is missing, install it before proceeding! Exiting!\".format(util))\n sys.exit(1)", "def get_checked_define_files(self):\n return (self._files['src/config.h'],\n self._files['src/gromacs/simd/simd.h'],\n self._files['src/gromacs/ewald/pme_simd.h'],\n self._files['src/gromacs/nbnxm/nbnxm_simd.h'])", "def get_library_list(self):\n ret = []\n prefix = \"-l\"\n if self.__command_basename.startswith(\"cl.\"):\n prefix = \"/l\"\n for ii in self.__libraries:\n ret += [prefix + ii]\n return ret", "def get_required_module_descriptors(self):\r\n return []", "def get_dlls(comments):\n dlls = [line for line in comments if '.dll' in line.lower()]\n return list(set(line.split()[-1].lower() for line in dlls))", "def get_install_requires() -> List[str]:\n return [\n \n ]", "def get_code_dependencies(self):\n pip_commands = ['pip', 'pip3', '/usr/local/bin/pip3']\n for pip_cmd in pip_commands:\n try:\n raw_stdout = subprocess.check_output([pip_cmd, 'freeze'])\n except FileNotFoundError:\n continue\n\n dependencies = raw_stdout.decode('ascii').split('\\n')[0:-1]\n if dependencies:\n return dependencies\n else:\n msg = \"Couldn't find pip executable in: {}\"\n raise ValueError(msg.format(','.join(pip_commands)))", "def other_libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-other'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-other for package `%s': %s\" % (self.name, stderr))\n\n return uniq(stdout.split())", "def DEPENDENCIES(self):\n pass", "def find_requirements(root: str) -> Optional[Dict[str, bool]]:\n findings = {\n file_name: os.path.isfile(os.path.join(root, file_name))\n for file_name in [\"requirements.txt\", \"Pipfile\", \"Pipfile.lock\"]\n }\n\n if not sum(findings.values()):\n return None\n return findings", "def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list", "def filter_working_set_soft(working_set, requirements):\n\n unmet_requirements = []\n\n retval = pkg_resources.WorkingSet([])\n\n for req in requirements:\n try:\n dists = working_set.require(req)\n for dist in dists: retval.add(dist)\n except:\n unmet_requirements.append(req)\n\n return retval, unmet_requirements", "def read_dependencies(filename):\n\n dependencies = []\n with open(filename) as f:\n for line in f.readlines():\n if not line or line.startswith('#'):\n continue\n dependencies.append(line.strip())\n return dependencies", "def test_list_supported_assets(self):\n pass", "def relevant_deps(self, only_make_check: bool = False, only_depends: bool = False) -> List[str]:\n to_return = []\n\n if self.depends is not None and not only_make_check:\n to_return.extend(self.depends)\n if self.makedepends is not None and not only_depends:\n to_return.extend(self.makedepends)\n if self.checkdepends is not None and not only_depends:\n to_return.extend(self.checkdepends)\n\n return list(set(to_return))", "def ComputeELFFileDeps(self):\n ldpaths = lddtree.LoadLdpaths(self._root)\n\n # First iteration over all the files in root searching for symlinks and\n # non-regular files.\n parseelf_args = []\n for rel_path, file_data in self._files.iteritems():\n if rel_path in self._symlinks or rel_path in self._hardlinks:\n continue\n\n full_path = os.path.join(self._root, rel_path)\n st = os.lstat(full_path)\n if not stat.S_ISREG(st.st_mode):\n continue\n parseelf_args.append((self._root, rel_path, ldpaths))\n\n # Parallelize the ELF lookup step since it is quite expensive.\n elfs = dict(x for x in self._imap(ParseELFWithArgs, parseelf_args)\n if not x is None)\n\n for rel_path, elf in elfs.iteritems():\n file_data = self._files[rel_path]\n # Fill in the ftype if not set yet. We complete this value at this point\n # to avoid re-parsing the ELF file later.\n if not 'ftype' in file_data:\n ftype = self._file_type_decoder.GetType(rel_path, elf=elf)\n if ftype:\n file_data['ftype'] = ftype\n\n file_deps = file_data.get('deps', {})\n # Dependencies based on the result of ldd.\n for lib in elf.get('needed', []):\n lib_path = elf['libs'][lib]['path']\n if not 'ldd' in file_deps:\n file_deps['ldd'] = []\n file_deps['ldd'].append(lib_path)\n\n if file_deps:\n file_data['deps'] = file_deps", "def get_install_requires():\n requirements = []\n for line in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):\n continue\n # add line to requirements\n requirements.append(line)\n return requirements", "def have_package_lists():\n return 'Filename:' in execute('apt-cache', 'show', 'python', check=False, capture=True)", "def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version", "def get_used_release_specs(package, installed_version=None):", "def getExtraDlls(self, module):\n\n full_name = module.getFullName()\n\n if full_name == \"kivy\":\n kivy_info = self._getKivyInformation()\n\n kivy_dlls = []\n for dll_folder in kivy_info.sdl2_dep_bins + kivy_info.glew_dep_bins:\n kivy_dlls.extend(self.locateDLLsInDirectory(dll_folder))\n\n for full_path, target_filename, _dll_extension in kivy_dlls:\n yield self.makeDllEntryPoint(\n source_path=full_path,\n dest_path=target_filename,\n package_name=full_name,\n reason=\"needed by 'kivy'\",\n )\n\n self.reportFileCount(full_name, len(kivy_dlls))", "def get_required_module_descriptors(self):\r\n descriptors = []\r\n for location in self.sources_list:\r\n try:\r\n descriptor = self.system.load_item(location)\r\n descriptors.append(descriptor)\r\n except ItemNotFoundError:\r\n msg = \"Invalid module by location.\"\r\n log.exception(msg)\r\n self.system.error_tracker(msg)\r\n\r\n return descriptors", "def _dotnet_dependencies(self):\n dependencies = []\n dependencies.append('- script: |')\n dependencies.append(' dotnet restore')\n dependencies.append(' dotnet build --configuration Release')\n dependencies.append(\"- task: DotNetCoreCLI@2\")\n dependencies.append(\" inputs:\")\n dependencies.append(\" command: publish\")\n dependencies.append(\" arguments: '--configuration Release --output publish_output'\")\n dependencies.append(\" projects: '*.csproj'\")\n dependencies.append(\" publishWebProjects: false\")\n dependencies.append(\" modifyOutputPath: true\")\n dependencies.append(\" zipAfterPublish: false\")\n return dependencies", "def getOMFSrcModuleFiles(self) -> List[ghidra.app.util.bin.format.pe.debug.OMFSrcModuleFile]:\n ...", "def get_vul_info(vul_info):\n packages = []\n if vul_info.get('fixes') is None:\n return packages\n for fixes in vul_info['fixes']:\n packages.extend(get_package_os(fixes))\n return packages", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def required_files(self, args):\n args_set = set(args)\n edge_list = self.__transform_pre(self.__include_deps_supply.get_file_include_deps())\n targets = chain((target for (source, target) in edge_list if source in args_set), args_set)\n return self.__transform_post(targets)", "def check_dependencies():\n required_found = True\n recommended_found = True\n print 'Checking dependencies ...\\n'\n print 'Required dependencies:'\n try:\n import Image\n assert Image.VERSION >= '1.1.5'\n print ' Python Imaging Library ....... OK'\n except ImportError:\n print ' !!! Python Imaging Library ... Not found'\n required_found = False\n except AssertionError:\n print ' !!! Python Imaging Library ... version', Image.VERSION,\n print 'found'\n print ' !!! Python Imaging Library 1.1.5 or higher is required'\n required_found = False\n if not required_found:\n print '\\nCould not find all required dependencies!'\n print 'Please install them and try again.'\n sys.exit(1)\n print", "def _check_dependencies(self):\n imgmin = exists('imgmin')\n image_optim = exists('image_optim')\n\n if not imgmin or not image_optim:\n puts(p('Dependencies have not been installed:'))\n\n message = 'imgmin - https://github.com/rflynn/imgmin'\n message = s('✓ ' + message) if imgmin else e('✗ ' + message)\n puts(message)\n\n message = 'image_optim - http://rubygems.org/gems/image_optim'\n message = s('✓ ' + message) if image_optim else e('✗ ' + message)\n puts(message)\n\n sys.exit(0)", "def dependencies(self):\n return self._dependency_analyzer.GetDependencies(\n [self.stacktrace.crash_stack] if self.stacktrace else [])", "def used_mods(ffile):\n import re\n import codecs\n\n # Go through line by line,\n # remove comments and strings because the latter can include ';'.\n # Then split at at ';', if given.\n # The stripped line should start with 'use '.\n # After use should be the \"module_name\", ', intrinsic :: module_name', or\n # ', non_intrinsic :: module_name'. We allow also to use \":: module_name\"\n # After module name should only be ', only: ...' or ', a ==> b'\n olist = list()\n of = codecs.open(ffile, 'r', encoding='ascii', errors='ignore')\n for line in of:\n ll = line.rstrip().lower() # everything lower case\n ll = re.sub('!.*$', '', ll) # remove F90 comment\n ll = re.sub('^c.*$', '', ll) # remove F77 comments\n ll = re.sub('\".*?\"', '', ll) # remove \"string\"\n ll = re.sub(\"'.*?'\", '', ll) # remove 'string'\n # check if several commands are on one line\n if ';' in ll:\n lll = ll.split(';')\n else:\n lll = [ll]\n for il in lll:\n iil = il.strip()\n # line should start with 'use '\n if iil.startswith('use '):\n iil = iil[4:].strip() # remove 'use '\n # skip intrinsic modules\n if 'intrinsic' in iil:\n if 'non_intrinsic' in iil:\n iil = re.sub(', *non_intrinsic', '', iil)\n iil = iil.strip()\n else:\n continue # skip to next in lll\n if iil.startswith('::'):\n iil = iil[2:].strip() # remove ':: '\n # remove after ',' if rename-list or only-list\n iil = re.sub(',.*$', '', iil)\n olist.append(iil.strip())\n of.close()\n\n return olist", "def categorize (self):\n\n fout = defaultdict(list)\n\n # Flat lists of files to collect keyed by platform,category\n collect_files = dict()\n for platform in wanted_files:\n for category, flist in wanted_files[platform].items():\n for f in flist:\n collect_files[(platform,category,f)] = list()\n\n for a in self.artifacts:\n try:\n with zfile.ZFile(a.lpath, 'r') as zf:\n if os.path.splitext(a.lpath)[-1] == '.rpm':\n a.info['plat'] = 'rhel'\n\n platform = a.info['plat']\n if platform not in platforms:\n continue\n\n zfiles = zf.getnames()\n if len(zfiles) == 0:\n print('No files in %s?' % a)\n for category, flist in wanted_files[platform].items():\n for f in flist:\n matches = [(a,x) for x in zfiles if os.path.basename(x) == f]\n if len(matches) > 0:\n collect_files[(platform,category,f)] += matches\n fout[category] += matches\n\n except zfile.tarfile.ReadError as e:\n print('ignoring artifact: %s: %s' % (a.lpath, str(e)))\n\n # Verify that all wanted combinations were matched\n errors = 0\n for missing in [x for x in collect_files if len(collect_files[x]) == 0]:\n errors += 1\n print('ERROR: No matching artifact files for', missing)\n\n if errors > 0:\n raise Exception('Not all wanted files found in artifacts, see above.')\n return fout", "def get_requirements():\n requirements_list = []\n\n if not os.path.isfile(REQUIREMENTS_FILE):\n # Check if requirements file did not exist.\n return requirements_list\n\n with open(REQUIREMENTS_FILE) as reqs:\n for install in reqs:\n requirements_list.append(install.strip())\n\n return requirements_list", "def get_libraries(self, archs: list[str]):\n libraries = self.ScopedLibraryDict.get(\"common\", []).copy()\n\n for arch in archs:\n libraries + self.ScopedLibraryDict.get(arch, []).copy()\n return list(set(libraries))", "def find_with_deps(self, package_names):", "def main():\n argp = argparse.ArgumentParser(prog='-mshlibs', description=('Print the '\n 'complete list of shared libraries used by the specified binary '\n 'file(s), (optionally including all child dependencies)'))\n argp.add_argument('file', nargs='+', help='file(s) to report on')\n argp.add_argument('-a', '--all', action=\"store_true\", help=(\n \"recursively resolve all sub-dependencies\"))\n args = argp.parse_args()\n\n if args.all:\n deps = reduce(lambda a, b: a|b,\n [all_libraries_used(f) for f in args.file])\n else:\n deps = reduce(lambda a, b: set(a)|set(b),\n [libraries_used(f) for f in args.file])\n\n for path in sorted(deps):\n print path", "def _GetDefaultBinPathExcludes(self):\n if sys.platform == \"win32\":\n import cx_Freeze.util\n systemDir = cx_Freeze.util.GetSystemDir()\n windowsDir = cx_Freeze.util.GetWindowsDir()\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\n elif sys.platform == \"darwin\":\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\n else:\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\n \"/usr/lib64\"]", "def _list_dependencies_info(\n out: Callable, ljust: int, package: str, dependencies: List[Requirement]\n):\n unicode = sys.stdout.encoding.lower().startswith(\"utf\")\n if unicode:\n ljust += 1\n\n not_found: List[Requirement] = list()\n for dep in dependencies:\n if dep.name == package:\n continue\n try:\n version_ = version(dep.name)\n except Exception:\n not_found.append(dep)\n continue\n\n # build the output string step by step\n output = f\"✔︎ {dep.name}\" if unicode else dep.name\n # handle version specifiers\n if len(dep.specifier) != 0:\n output += f\" ({str(dep.specifier)})\"\n output += \":\"\n output = output.ljust(ljust) + version_\n\n # handle special dependencies with backends, C dep, ..\n if dep.name in (\"matplotlib\", \"seaborn\") and version_ != \"Not found.\":\n try:\n from matplotlib import pyplot as plt\n\n backend = plt.get_backend()\n except Exception:\n backend = \"Not found\"\n\n output += f\" (backend: {backend})\"\n out(output + \"\\n\")\n\n if len(not_found) != 0:\n not_found = [\n f\"{dep.name} ({str(dep.specifier)})\"\n if len(dep.specifier) != 0\n else dep.name\n for dep in not_found\n ]\n if unicode:\n out(f\"✘ Not installed: {', '.join(not_found)}\\n\")\n else:\n out(f\"Not installed: {', '.join(not_found)}\\n\")", "def test_list_dependent_assets(self):\n pass", "def dependent_prs(self):\n comments = self.data['body'].replace('\\r\\n', ' ')\n for comment in self.comments():\n comments += comment['body'].replace('\\r\\n', ' ')\n\n dependent_prs = []\n dependent_keywords = ['depends on']\n for keyword in dependent_keywords:\n pattern = r'%s %s/(\\S+)/(\\S+)/pull/(\\d+)' % (keyword, GITHUB)\n LOGGER.info(\"Finding dependent PRs by '%s' in the comments\")\n dependent_prs += re.findall(pattern, comments)\n return set(dependent_prs)", "def check_executables():\n\n for ex in [\n 'midi2pianoroll', 'SprToFmt3x', 'Fmt3xToHmm', 'ScorePerfmMatcher',\n 'ErrorDetection', 'RealignmentMOHMM', 'MetchToCorresp'\n ]:\n if not which(EITA_PATH / 'Programs' / ex):\n print(\n \"Eita tools seems to be uncorrectly compiled, please use the following command to compile\",\n file=sys.stderr)\n print(f\"`{EITA_PATH}/compile.sh`\")", "def getIncludePathsAsList( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n result = []\n\n # we are adding a trailing blank so that the \" -I\" replacement will\n # also work on the first element\n raw = getIncludePathsAsString( targetPlatform, targetName )\n tmp = (' ' + raw ).replace( ' -I', ' ' )\n\n for token in tmp.split():\n result.append( token.strip() )\n\n\n # remove empty entries (if present)\n try:\n result.remove( '' )\n except ValueError:\n pass\n\n return frozenset( result )", "def _node_dependencies(self):\n dependencies = []\n if self._requires_extensions():\n self._inject_extensions_build(dependencies)\n if self._requires_npm():\n dependencies.append('- script: |')\n dependencies.append(' npm install')\n dependencies.append(' npm run build --if-present')\n dependencies.append(' npm prune --production')\n\n return dependencies", "def getRequirements():\n\n \n cudaLibsOk = checkCUDAisAvailable() \n \n conditionalRequirements = []\n if cudaLibsOk:\n conditionalRequirements += [\"tensorflow-gpu==1.15.3\", ]\n else:\n print(\"\\n CUDA it's not available in your machine.\")\n print(\" You won't be able to use the GPU support.\\n\")\n #if olderPip or olderSetuptools:\n #tfRequirement = \"tensorflow==1.15.0\"\n #else:\n tfRequirement = \"tensorflow==1.15.3\"\n \n conditionalRequirements += [tfRequirement]\n\n return conditionalRequirements", "def getIncludePathsAsString( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s' % fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C = ''\n raw_CPP = ''\n regexp_C = re.compile( '^(?:C_FLAGS|C_INCLUDES)\\s=\\s+(.*)$' )\n regexp_CPP = re.compile( '^(?:CXX_FLAGS|CXX_INCLUDES)\\s=\\s+(.*)$' )\n result = ''\n\n for line in content:\n tmp = regexp_C.search( line )\n\n if tmp:\n raw_C = tmp.group( 1 )\n # logging.debug( 'raw C flags: %s' % raw_C )\n\n tmp = regexp_CPP.search( line )\n\n if tmp:\n raw_CPP = tmp.group( 1 )\n # logging.debug( 'raw CPP flags: %s' % raw_CPP )\n\n for candidate in ( shlex.split( raw_C ) + shlex.split( raw_CPP ) ):\n if candidate.startswith( '-I' ):\n result += candidate + ' '\n\n return result", "def determine_possible_extras(whls):\n whl_map = {\n whl.name(): whl\n for whl in whls\n }\n\n # TODO(mattmoor): Consider memoizing if this recursion ever becomes\n # expensive enough to warrant it.\n def is_possible(name, extra):\n # If we don't have the .whl at all, then this isn't possible.\n if name not in whl_map:\n return False\n whl = whl_map[name]\n # If we have the .whl, and we don't need anything extra then\n # we can satisfy this dependency.\n if not extra:\n return True\n # If we do need something extra, then check the extra's\n # dependencies to make sure they are fully satisfied.\n for extra_dep in whl.dependencies(extra=extra):\n req = pkg_resources.Requirement.parse(extra_dep)\n # Check that the dep and any extras are all possible.\n if not is_possible(req.project_name, None):\n return False\n for e in req.extras:\n if not is_possible(req.project_name, e):\n return False\n # If all of the dependencies of the extra are satisfiable then\n # it is possible to construct this dependency.\n return True\n\n return {\n whl: [\n extra\n for extra in whl.extras()\n if is_possible(whl.name(), extra)\n ]\n for whl in whls\n }" ]
[ "0.6188307", "0.60172987", "0.594588", "0.58179677", "0.57686013", "0.57123476", "0.56695175", "0.5656404", "0.56266695", "0.56120116", "0.5609849", "0.5595345", "0.5572678", "0.55723906", "0.5521513", "0.55099297", "0.5491542", "0.5476667", "0.5474204", "0.54685295", "0.5443033", "0.5421373", "0.53993523", "0.5397916", "0.5386504", "0.53844947", "0.53672016", "0.53638315", "0.5352114", "0.5341547", "0.5340787", "0.5339822", "0.53253067", "0.53171664", "0.53119045", "0.52712554", "0.5258964", "0.5254638", "0.5251287", "0.5235254", "0.5223911", "0.52160525", "0.5215404", "0.5213624", "0.51908195", "0.5188191", "0.5186201", "0.51841927", "0.5177102", "0.51729965", "0.51519287", "0.51508003", "0.51430935", "0.51363367", "0.5134181", "0.5132792", "0.51322013", "0.51285535", "0.51245004", "0.51172376", "0.51141346", "0.5112054", "0.5109257", "0.51024103", "0.50999624", "0.50988686", "0.5090515", "0.5089212", "0.5088109", "0.508649", "0.50846916", "0.50774866", "0.5072294", "0.5069897", "0.50668216", "0.5063869", "0.5062185", "0.50621337", "0.5061225", "0.50541776", "0.5045233", "0.50384474", "0.5036483", "0.5029052", "0.50281805", "0.5023107", "0.50221443", "0.50141025", "0.5010982", "0.500956", "0.50090027", "0.50078166", "0.5004723", "0.5002658", "0.49955317", "0.49938756", "0.4992828", "0.49891293", "0.4984166", "0.49794632" ]
0.5495714
16
Return true if the file should be copied to the target machine. This is done by checking the binPathIncludes, binPathExcludes, binIncludes and binExcludes configuration variables using first the full file name, then just the base file name, then the file name without any version numbers. Files are included unless specifically excluded but inclusions take precedence over exclusions.
def _ShouldCopyFile(self, path): # check for C runtime, if desired path = os.path.normcase(path) dirName, fileName = os.path.split(path) if fileName.startswith("msvcr") and fileName.endswith(".dll"): self.msvcRuntimeDir = dirName return self.includeMSVCR # check the full path if path in self.binIncludes: return True if path in self.binExcludes: return False # check the file name by itself (with any included version numbers) if fileName in self.binIncludes: return True if fileName in self.binExcludes: return False # check the file name by itself (version numbers removed) name = self._RemoveVersionNumbers(fileName) if name in self.binIncludes: return True if name in self.binExcludes: return False # check the path for inclusion/exclusion for path in self.binPathIncludes: if dirName.startswith(path): return True for path in self.binPathExcludes: if dirName.startswith(path): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, filename).replace(\"\\\\\",\"/\")\n\n update_if_different(filename1, filename2)", "def include_file(self, filename):\n # Only include Python files for now.\n if filename[-3:] == '.py':\n return True\n return False", "def copy_file_check(self):\n pass", "def BinaryExists(filename):\n return os.path.exists(os.path.join(self.options.build_dir, filename))", "def include_source_files(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"include_source_files\")", "def copy_file(self, filepath):\n copy_file = False\n try:\n copy_file = self.data[filepath]['copy']\n except KeyError:\n return False\n return copy_file", "def sysFile(*args, copy: AnyStr=\"\", delete: bool=True, makeDir: bool=True, move: AnyStr=\"\",\n removeEmptyDir: bool=True, rename: AnyStr=\"\", **kwargs)->bool:\n pass", "def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False", "def should_do_write():\n if not suffix_is_supported():\n return False\n\n if not has_write_access():\n return False\n\n # Files under exclude_dir should be exempted from writing.\n filepath = CURRENT_BUFFER.name\n file_dir = filepath.rsplit('/', 1)[0]\n exclude_dirs = vim.eval(\"g:BHExcludeDir\")\n exclude_dirs = [os.path.realpath(os.path.expanduser(_dir)) for _dir in exclude_dirs]\n for dirname in exclude_dirs:\n if file_dir.startswith(dirname):\n debug(\"File in BHExcludeDir, do not write header.\")\n return False\n\n # whitelist: files directly inside BHIn will have a header.\n in_list = vim.eval(\"g:BHIn\")\n for dirname in in_list:\n dirname = os.path.realpath(os.path.expanduser(dirname))\n if file_dir == dirname:\n debug(\"File in BHIn, do write.\")\n return True\n\n # whitelist: files under BHUnder or its sub-dir will have a header.\n under_list = vim.eval(\"g:BHUnder\")\n for dirname in under_list:\n dirname = os.path.realpath(os.path.expanduser(dirname))\n if filepath.startswith(dirname):\n debug(\"File under BHUnder, do write.\")\n return True\n\n debug(\"default, do not write header.\")\n return False", "def copy_file(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n if _passes_filter(src, filter):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if _should_copy(src, dst):\n #console(f'copy {src}\\n --> {dst}')\n shutil.copyfile(src, dst, follow_symlinks=True)\n shutil.copystat(src, dst, follow_symlinks=True)\n return True\n return False", "def can_minimize_file(file_path):\n # If this is not a binary file, we should be able to minimize it in some way.\n if not utils.is_binary_file(file_path):\n return True\n\n # Attempt to minimize IPC dumps.\n if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION):\n return supports_ipc_minimization(file_path)\n\n # Other binary file formats are not supported.\n return False", "def determine_should_sync(\n self, src_file: Optional[FileStats], dest_file: Optional[FileStats]\n ) -> bool:\n if dest_file:\n dest_file.operation_name = \"delete\"\n LOGGER.debug(\n \"syncing: (None) -> %s (remove), file does not \"\n \"exist at source (%s) and delete mode enabled\",\n dest_file.src if dest_file else None,\n dest_file.dest if dest_file else None,\n )\n return True", "def test_copy_required_include_and_exclude(self):\n include = ['yara/*', '*_malware_*']\n exclude = ['*mobile*', 'yara/?.yara']\n\n self.assertTrue(clone_rules._copy_required('yara/packed.yara', include, exclude))\n self.assertTrue(clone_rules._copy_required('base_malware_index.yara', include, exclude))\n self.assertTrue(clone_rules._copy_required('yara/mac_malware.yar', include, exclude))\n\n self.assertFalse(clone_rules._copy_required('not_included.yara', include, exclude))\n self.assertFalse(clone_rules._copy_required('yara/mobile_malware.yara', include, exclude))\n self.assertFalse(clone_rules._copy_required('yara/A.yara', include, exclude))", "def _include_file(self, root_parts, f):\n if len(root_parts) and root_parts[0] == \"lwc\":\n # only include expected file extensions within lwc components\n return f.lower().endswith((\".js\", \".js-meta.xml\", \".html\", \".css\", \".svg\"))\n return True", "def copy_file ( self, source, dest, chown=True, chmod=True ):\n if self._copy_file ( source, dest ):\n if chmod:\n self.chmod_file ( dest )\n if chown:\n self.chown_file ( dest )\n\n return True\n else:\n return False", "def copy_file(source, destination):\n\n try:\n shutil.copy(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True", "def copy_if_needed(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n #console(f'COPY {src} --> {dst}')\n if os.path.isdir(src):\n return copy_dir(src, dst, filter)\n else:\n return copy_file(src, dst, filter)", "def _local_install(self):\n config = self._config\n ext = config.plugins[self.full_name].get('pkg_extension', '')\n if not ext:\n return False\n\n # ensure extension begins with a dot\n ext = '.{0}'.format(ext.lstrip('.'))\n\n return config.context.package.arg.endswith(ext)", "def needs_conan(self):\n return any([Path(self.project_dir/conanfile).exists()\n for conanfile in (\"conanfile.py\", \"conanfile.txt\")])", "def check_for_assemble_file(task_file):\n if not os.path.exists(task_file):\n print_failure_msg(\"{} file is missing\".format(task_file))\n exit(127)\n return True", "def _FilterFile(affected_file):\n return affected_file.LocalPath().endswith(\n ('.h', '.cc', '.cpp', '.cxx', '.mm'))", "def _edit_arch_target_based(self, spec, prefix):\n if spec.version < Version(\"2.14\"):\n return False\n\n found_special_opt = False\n with working_dir(\"arch\"):\n arch_filename = \"{0}.arch\".format(self.build_directory)\n\n replace = [\n [r\"^CHARMARCH = .*$\", \"CHARMARCH = {0}\".format(self.spec[\"charmpp\"].charmarch)],\n [r\"^NAMD_ARCH = .*$\", \"NAMD_ARCH = {0}\".format(self.arch)],\n ]\n\n # Optimizations for skylake_avx512\n if (\n spec.platform == \"linux\"\n and self.compiler.name == \"intel\"\n and \"avx512\" in spec.target\n and spec.target >= \"skylake_avx512\"\n ):\n if spec.version >= Version(\"2.15\") and os.path.exists(\"Linux-AVX512-icc.arch\"):\n tty.info(\"Building binaries with AVX512-tile optimization\")\n copy(\"Linux-AVX512-icc.arch\", arch_filename)\n elif spec.version >= Version(\"2.14\") and os.path.exists(\"Linux-SKX-icc.arch\"):\n tty.info(\"Building binaries with Skylake-X\" \"AVX512 optimization\")\n copy(\"Linux-SKX-icc.arch\", arch_filename)\n else:\n return False\n\n replace.append([r\"^CXX = icpc\", \"CXX = {0}\".format(self.compiler.cxx)])\n replace.append([r\"^CC = icc\", \"CC = {0}\".format(self.compiler.cc)])\n found_special_opt = True\n\n if found_special_opt:\n for pattern, replacement in replace:\n filter_file(pattern, replacement, arch_filename)\n\n return found_special_opt", "def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)", "def _accept_for_flag (self, filename):\n\t\troot, ext = os.path.splitext(filename)\n\t\tif not ext:\n\t\t\treturn 1\n\t\telse:\n\t\t\tbinary_extensions = ['.jpg', '.gif', '.png', '.jar' ]\n\t\t\treturn ext not in ['.bak', '.off','.old', '.works', '.clean', '.obs', '.log', '.db'] + binary_extensions", "def is_file_excluded(self, file_path: Union[str, os.PathLike]) -> bool:\n # TODO: current design of ignore file can't distinguish between files and directories of the same name\n if self._path_spec is None:\n self._path_spec = self._create_pathspec()\n if not self._path_spec:\n return False\n file_path = self._get_rel_path(file_path)\n if file_path is None:\n return True\n\n norm_file = normalize_file(file_path)\n matched = False\n for pattern in self._path_spec:\n if pattern.include is not None:\n if pattern.match_file(norm_file) is not None:\n matched = pattern.include\n\n return matched", "def _check_for_custom_config(self, standard_conf_path):\n\n ret_val = False\n conf_filename = os.path.basename(standard_conf_path)\n custom_conf_expected_path = CUSTOM_CONFIG_DIR + '/' + self._get_tempdir() + '/' + conf_filename\n\n if os.path.isfile(custom_conf_expected_path):\n ret_val = True\n\n return ret_val", "def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))", "def file_allowed(self):\n if self._allowed_ext:\n if self.get_ext() not in self._allowed_ext:\n return False\n \n return True", "def _include_path(self, path, extensions=None):\r\n if extensions is None:\r\n extensions = tuple(self.readers.extensions)\r\n basename = os.path.basename(path)\r\n\r\n #check IGNORE_FILES\r\n ignores = self.settings['IGNORE_FILES']\r\n if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):\r\n return False\r\n\r\n if extensions is False or basename.endswith(extensions):\r\n return True\r\n return False", "def on_file(self) -> bool:\n\n return (\n self.env_var_helper.set_name(\"PYFUNCEBLE_DEBUG\").exists()\n or self.env_var_helper.set_name(\"DEBUG_PYFUNCEBLE\").exists()\n )", "def include_file_sample(self, file_obj: File, sample_id: str) -> bool:\n file_tags = {tag.name for tag in file_obj.tags}\n tags: Set[str]\n # Check if any of the file tags matches the sample tags\n for tags in self.sample_tags:\n working_copy = deepcopy(tags)\n if self.delivery_type != \"fastq\":\n working_copy.add(sample_id)\n if working_copy.issubset(file_tags):\n return True\n\n return False", "def _install_file(srcdir, filename, dstdir):\n srcfilename = os.path.join(srcdir, filename)\n dstfilename = os.path.join(dstdir, filename)\n if not os.path.exists(srcfilename):\n if os.path.exists(dstfilename):\n subprocess.run(['rm', dstfilename], check=True)\n return (False, True)\n return (False, False)\n\n equal = subprocess.run(['diff', '-q', srcfilename, dstfilename],\n check=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n if not equal:\n subprocess.run(['mv', srcfilename, dstfilename], check=True)\n return (True, not equal)", "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def _has_valid_save_as(self):\n try:\n output_path = self.settings[\"OUTPUT_PATH\"]\n except KeyError:\n # we cannot check\n return True\n\n try:\n sanitised_join(output_path, self.save_as)\n except RuntimeError: # outside output_dir\n logger.error(\n \"Skipping %s: file %r would be written outside output path\",\n self,\n self.save_as,\n )\n return False\n\n return True", "def fileCheck(filePath):\n if not os.path.isfile(filePath):\n return False\n return True", "def checkIfFileExistsInPossibleLocations(testConfig):\n assert \"name\" in testConfig\n assert \"file\" in testConfig\n assert \"file_locations\" in testConfig\n testPass = False\n for filePath in testConfig[\"file_locations\"]:\n if isfile(join(filePath,testConfig[\"file\"])):\n testPass=True\n \n assert testPass,\"Failure for package \"+testConfig[\"name\"]+\"\\n File: \"+\\\n testConfig[\"file\"]+\" does not exist\"+\"\\nSearched in \"+\\\n str(testConfig[\"file_locations\"])", "def Ignore(self, relative_file):\n return False", "def process(self, source_path: pathlib.Path) -> bool:", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def __copyfile(source, destination):\n logger.info(\"copyfile: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def validate_input_file(self):\r\n return os.path.isfile(self.input_file)", "def should_check_for_binary_versions(self):\n explicitly_asked_for_binaries_check = 'CHECK_BINARIES_VERSIONS' in config_vars\n update_was_requested = \"__UPDATE_INSTALLED_ITEMS__\" in config_vars.get(\"MAIN_INSTALL_TARGETS\", []).list()\n retVal = explicitly_asked_for_binaries_check or update_was_requested\n return retVal", "def test_conf_contain_only_include_file(self):\n\n # dummy configuration for include file 1\n conf = {\n 'runners': {\n 'inline': {\n 'local_tmp_dir': \"include_file1_local_tmp_dir\"\n }\n }\n }\n\n include_file_1 = self.save_conf('include_file_1', conf)\n\n # dummy configuration for include file 2\n conf = {\n 'runners': {\n 'inline': {\n 'local_tmp_dir': \"include_file2_local_tmp_dir\"\n }\n }\n }\n\n include_file_2 = self.save_conf('include_file_2', conf)\n\n # test configuration\n conf = {\n 'include': [include_file_1, include_file_2]\n }\n path = self.save_conf('twoincludefiles', conf)\n\n stderr = StringIO()\n with no_handlers_for_logger():\n log_to_stream('mrjob.conf', stderr)\n InlineMRJobRunner(conf_paths=[path])\n self.assertEqual(\n \"\",\n stderr.getvalue())", "def ShouldCheckFile(file_name):\n checked_extensions = [\n '.c',\n '.cc',\n '.h',\n '.m',\n '.mm',\n # These are not the preferred extension in our codebase,\n # but including them for good measure.\n # (They do appear in the newlib toolchain + third_party libraries).\n '.cpp',\n '.hpp',\n ]\n basename, extension = os.path.splitext(file_name)\n return extension in checked_extensions", "def __is_file_in_working_directory(self, filename) -> bool:\n return os.path.exists(os.path.join(self.__directory.working_path,\n filename))", "def ExecuteIf(self, args, src_files, dst_files):\n if self.ShouldBuild(src_files, dst_files):\n self.MakeDestinationDirectories(dst_files)\n self.Execute(args)\n if self.execute and not self.VerifyExists(dst_files):\n raise RuntimeError(\"FAILED: build did not create all required files\")", "def _check_if_cff_file_needs_rewriting(self, content):\n logger.info(\"Checking if we can re-use injection config file...\")\n if os.path.isfile(self.config_file_name) is False:\n logger.info(\"...no config file {} found.\".format(self.config_file_name))\n return True\n else:\n logger.info(\n \"...OK: config file {} already exists.\".format(self.config_file_name)\n )\n\n with open(self.config_file_name, \"r\") as f:\n file_content = f.read()\n if file_content == content:\n logger.info(\n \"...OK: file contents match, no update of {} required.\".format(\n self.config_file_name\n )\n )\n return False\n else:\n logger.info(\n \"...file contents unmatched, updating {}.\".format(\n self.config_file_name\n )\n )\n return True", "def deploy_configuration_file(filename, template_filename=None):\n if template_filename is None:\n template_filename = os.path.basename(filename)\n if not os.path.isabs(filename):\n filename = os.path.abspath(filename)\n if os.path.exists(filename):\n return True\n else:\n try:\n from pkg_resources import (Requirement, resource_filename,\n DistributionNotFound)\n import shutil\n except ImportError as err:\n raise gc3libs.exceptions.FatalError(\n \"Cannot import required Python modules: %s.\"\n \" Please check GC3Pie installation instructions at\"\n \" http://gc3pie.googlecode.com/svn/trunk/gc3pie/docs/html/install.html\" % # noqa\n str(err))\n try:\n # copy sample config file\n if not os.path.exists(dirname(filename)):\n os.makedirs(dirname(filename))\n sample_config = resource_filename(\n Requirement.parse(\"gc3pie\"),\n \"gc3libs/etc/\" +\n template_filename)\n shutil.copyfile(sample_config, filename)\n return False\n except IOError as err:\n gc3libs.log.critical(\"Failed copying configuration file: %s\", err)\n raise gc3libs.exceptions.NoConfigurationFile(\n \"No configuration file '%s' was found, and an attempt to\"\n \" create it failed. Aborting.\" % filename)\n except DistributionNotFound as ex:\n raise AssertionError(\n \"Cannot access resources for Python package: %s.\"\n \" Installation error?\" %\n str(ex))", "def has_source_file( self ):\n return self._source_file is not None", "def should_run(self):\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for dirpath, dirnames, filenames in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False", "def test_subversion_binary_file(host):\n assert host.file(PACKAGE_BINARY).is_file", "def built_file_must_not_contain(self, name, contents, **kw):\n return self.must_not_contain(self.built_file_path(name, **kw), contents)", "def assert_source_files_are_installed_and_obfuscated(install_path, source_files,\n source_files_without_obfuscate_path=None):\n assert os.path.isdir(install_path), \"%s does not exist\" % install_path\n for source_file, op, version, check_obfuscation in source_files:\n file_path = os.path.join(install_path, source_file)\n assert os.path.isfile(file_path), \"%s file not found in %s\" % (source_file, install_path)\n properties = get_file_properties(file_path)\n product_version = properties[\"StringFileInfo\"].get('ProductVersion', None) if properties[\"StringFileInfo\"] is not None else None\n assert compare_versions_str(product_version, op, version, default=True), \\\n \"%s ProductVersion %s is not %s %s\" % (file_path, product_version, op, version)\n if check_obfuscation:\n assert_not_equal_hash(file_path, source_files_without_obfuscate_path + source_file)", "def _applyIncludes(self, origfile, _file = file):\n opt = \"include_config\"\n try:\n try:\n includes = self._config.get(\"general\", opt, raw = True).strip()\n except ConfigParser.NoOptionError:\n opt = \"include-config\"\n includes = self._config.get(\"general\", opt, raw = True).strip()\n except ConfigParser.NoOptionError:\n # don't even ignore\n pass\n else:\n self._config.remove_option(\"general\", opt)\n if not len(includes):\n return\n\n origpath = os.path.dirname(os.path.abspath(origfile))\n includes = [\n util.filename.toLocale(\n config_file, self._charset_, self.runtime.path_encoding\n )\n for config_file in util.splitCommand(includes) if config_file\n ]\n\n for config_file in includes:\n try:\n config_fp = _file(os.path.join(origpath, config_file))\n self._config.readfp(config_fp, config_fp.name)\n config_fp.close()\n except IOError, exc:\n raise ConfigNotFoundError(\"%s: %s\" % (\n config_file, str(exc)\n ))", "def check_binary(self):\n if shutil.which(self.binary):\n return True\n else:\n logging.warning(R+'The supplied binary or path does not exist... Exiting'+W)\n exit(1)", "def allowed_file(filename: str) -> bool:\n if filename is None or filename.strip() is None:\n return False\n\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in current_app.config['FILE_ALLOWED_EXTENSIONS']", "def check_cython_includes(filename, includes):\n from os.path import exists, isfile, join\n for directory in includes:\n path = join(directory, filename) + \".pxd\"\n if exists(path) and isfile(path):\n return path\n path = join(directory, *filename.split('.')) + \".pxd\"\n if exists(path) and isfile(path):\n return path", "def test_copy_required_exclude_list(self):\n exclude_list = ['*.yar', 'skip/these/file*']\n self.assertTrue(clone_rules._copy_required('base.yara', [], exclude_list))\n self.assertTrue(clone_rules._copy_required('path/to/file.yara', [], exclude_list))\n self.assertFalse(clone_rules._copy_required('file.yar', [], exclude_list))\n self.assertFalse(clone_rules._copy_required('skip/these/file.yara', [], exclude_list))", "def verifyfile(self, path):\n return (\n super(InventoryModule, self).verify_file(path) and\n path.endswith((self.NAME + \".yaml\", self.NAME + \".yml\")))", "def preliminary_file_check(self):\n\n if self.has_error():\n return False\n\n if not self.filepath:\n self.add_error(\"A file was specified!\")\n return False\n\n if not isfile(self.filepath):\n self.add_error(\"The file was not found: %s\" % basename(self.filepath))\n return False\n\n if getsize(self.filepath) < 1:\n self.add_error(\"The file is empty (no bytes): %s\" % basename(self.filepath))\n return False\n\n if self.file_ext in ['xls', 'xlsx']:\n self.is_excel = True\n\n return True", "def needs_rebuild(source, target):\n return not os.path.isfile(target) or (\n os.path.getmtime(source) > os.path.getmtime(target))", "def check_c_includes(filename, includes):\n from os.path import exists, isfile, join\n for directory in includes:\n path = join(directory, filename)\n if exists(path) and isfile(path):\n return path", "def test_check(self):\n\n self.assertTrue(DirExclude().check(self.file_gitignore))\n self.assertTrue(DirExclude().check(self.file_perceval))\n self.assertTrue(DirExclude().check(self.file_authors))\n\n self.assertFalse(DirExclude().check(self.file_tests))\n self.assertFalse(DirExclude().check(self.file_bin))", "def checkIfAllowedToModify(self):\n\n oldBytes = b''\n testFileName = self.MAPSTUDIO + self.inputFiles[0] + '.msb'\n\n with open(testFileName, 'rb') as oldf:\n oldBytes = oldf.read()\n\n # Try writing something to the file\n\n try:\n with open(testFileName, 'wb') as outf:\n outf.write(b'TESTINGIFICANWRITEINTOTHISFILE')\n except:\n return False\n\n # Because apparently for _some_ reason it doesn't throw an error sometimes(?) so we confirm if the file was actually modified\n\n newBytes = b''\n with open(testFileName, 'rb') as oldf:\n newBytes = oldf.read()\n\n if (oldBytes == newBytes):\n return False\n\n # Restore the file to normal\n\n with open(testFileName, 'wb') as outf:\n outf.write(oldBytes)\n\n oldBytes = b''\n newBytes = b''\n\n return True", "def in_maya():\n return \"maya.bin\" in sys.argv[0]", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)", "def checkCopiedFiles(self):\n self.missingAiCopies = 0\n self.invalidAiCopies = 0\n self.invalidMapCopies = 0\n self.missingMapCopies = 0\n\n for iFile in self.inputFilesAll:\n if not (os.path.isfile(self.MAPCOPY + iFile + '.msb')):\n self.missingMapCopies += 1\n else:\n with open(self.MAPCOPY + iFile + '.msb', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidMapCopies += 1\n\n if not (iFile == \"m12_00_00_01\"):\n if (self.useDCX):\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd.dcx')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd.dcx', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n else:\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n\n if (self.missingAiCopies > 0 or self.invalidAiCopies > 0 or self.missingMapCopies > 0 or self.invalidMapCopies > 0 or self.missingSfxCopies > 0 or self.invalidSfxCopies > 0):\n return False\n else:\n return True", "def copy_file(name, n_name):\n\n if os.path.isfile(config_tools.full_dest+name):\n try:\n shutil.copyfile(config_tools.full_dest+name, config_tools.full_dest+n_name)\n except OSError:\n print(f\"Не возможно копировать файл {name}\")\n else:\n print(f\"Файл {config_tools.full_dest+name} скопирован как {config_tools.full_dest+n_name}\")", "def allowed_file(self,filename):\n \n return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.allowed_extensions", "def allowed_upload(self, filename):\n for suffix in ['.changes', '.dsc', '.tar.gz', '.diff.gz', '.deb', '.udeb', '.tar.bz2', \".tar.xz\"]:\n if filename.endswith(suffix):\n return True\n\n return False", "async def copy_module_files(self, base_dir):\n for file, pkg in self.files:\n # Make sure the package folders are there or are created.\n src_package_path = os.path.join(base_dir, 'gocat-extensions', pkg)\n dest_package_path = os.path.join(base_dir, 'gocat', pkg)\n if not os.path.exists(dest_package_path):\n os.makedirs(dest_package_path)\n\n # Check if entire package is to be copied\n if file == '*':\n await self._copy_folder_files(src_package_path, dest_package_path)\n else:\n await self._copy_file_with_hook(file, os.path.join(src_package_path, file),\n os.path.join(dest_package_path, file))\n return True", "def check_execution_path():\n file_name = \"LICENSE\"\n if not os.path.exists(file_name):\n logging.error(\n \"Don't execute the script from a sub-directory. \"\n \"Switch to the root of the project folder\"\n )\n return False\n return True", "def built_file_must_match(self, name, contents, **kw):\n return self.must_match(self.built_file_path(name, **kw), contents)", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def _is_path_inside_output_dir(self, path: str) -> bool:\n real_output_dir = os.path.realpath(self._output_dir)\n real_file_path = os.path.realpath(path)\n return os.path.commonpath([real_output_dir, real_file_path]) == real_output_dir", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def matches(self, tgt_residence_dir: str) -> bool:", "def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False", "def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath", "def exclude_filter(path):\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file", "def file_copy_remote_exists(self, src, dest=None, file_system=None):\n self.enable()\n if file_system is None:\n file_system = self._get_file_system()\n\n file_copy = self._file_copy_instance(src, dest, file_system=file_system)\n if file_copy.check_file_exists() and file_copy.compare_md5():\n log.debug(\"Host %s: File %s already exists on remote.\", self.host, src)\n return True\n\n log.debug(\"Host %s: File %s does not already exist on remote.\", self.host, src)\n return False", "def should_prune(path, relative_path, used_pep_set, used_pip_set):\n # Match against include patterns\n for pattern in filter(relative_path.match, PRUNING_INCLUDE_PATTERNS):\n used_pip_set.add(pattern)\n return True\n\n # Match against exclude patterns\n for pattern in filter(Path(str(relative_path).lower()).match, PRUNING_EXCLUDE_PATTERNS):\n used_pep_set.add(pattern)\n return False\n\n # Do binary data detection\n with path.open('rb') as file_obj:\n if _is_binary(file_obj.read()):\n return True\n\n # Passed all filtering; do not prune\n return False", "def _is_relevant(self, event):\n relevant = False\n if isinstance(event, FileMovedEvent):\n path = event.dest_path\n if self._filter(path):\n relevant = True\n else:\n path = False\n\n # for any event\n if self._filter(event.src_path):\n relevant = True\n return relevant", "def should_format(\n filename: Path, include_patterns: Iterable[str], exclude_patterns: Iterable[str]\n) -> Tuple[bool, str]:\n from fnmatch import fnmatch\n\n if any(fnmatch(os.path.abspath(filename), pattern) for pattern in exclude_patterns):\n return False, \"Excluded file\"\n\n filename_no_ext, ext = os.path.splitext(filename)\n # ignore .py file that has a jupytext configured notebook with the same base name\n ipynb_filename = filename_no_ext + \".ipynb\"\n if ext == \".py\" and os.path.isfile(ipynb_filename):\n with open(ipynb_filename, \"rb\") as f:\n if b\"jupytext\" not in f.read():\n return True, \"\"\n with open(filename, \"rb\") as f:\n if b\"jupytext:\" not in f.read():\n return True, \"\"\n return False, \"Jupytext generated file\"\n\n if any(fnmatch(os.path.basename(filename), pattern) for pattern in include_patterns):\n return True, \"\"\n\n return False, \"Unknown file type\"", "def should_copy_in_egg_info(f, is_custom_egg):\n if R_EGG_INFO.search(f):\n if is_custom_egg:\n if R_EGG_INFO_BLACK_LIST.search(f):\n return False\n else:\n return True\n else:\n return True\n else:\n return False", "def check_inversion_files(file_dic):\n patern_dic = {'file_generate_arcs' : \"\", \\\n 'file_source' : \"\", \\\n 'file_make_inversion' : \"\", \\\n 'file_best_fit' : \"\", \\\n 'file_chires' : \"\"}\n\n for i in patern_dic.keys():\n os.path.isfile(file_dic[i])\n if not ( i in file_dic.keys() ):\n print 'error(check_inversion_files): check_inversion_files: item', \\\n i, 'is not defined in the input dictionary'\n return False\n if not os.path.isfile(file_dic[i]):\n print 'error(check_inversion_files): check_inversion_files: file', \\\n file_dic[i], 'does not exists'\n return False\n return True", "def test_azurecli_binary_isfile(host):\n assert host.file(PACKAGE_BINARY).is_file", "def copy_supported_files(self):\n\n try:\n for directory in self.config.DIRS_TO_COPY:\n shutil.copytree(self.dir_helper.publication_path + directory,\n self.temp_dir + directory)\n except Exception, ex:\n print '[e] exception {}'.format(str(ex))\n print '[i] looks like to folder existing that are scheduled for copying'\n\n for file_ in self.config.FILES_TO_COPY:\n index = file_.rfind('\\\\')\n dest_file = file_\n if index != -1:\n dest_file = file_[index+1:]\n\n try:\n shutil.copy2(self.dir_helper.publication_path + file_,\n self.temp_dir + dest_file)\n except Exception, ex:\n print '[e] exception {}'.format(str(ex))\n print '[i] file \"{}\" was not copied'.format(self.dir_helper.publication_path + file_)", "def has_generated_waves(self):\n dirname = self.dirname\n name = self.get_name()\n videocluster = os.path.join(dirname, name)\n try:\n listwaves = os.listdir(videocluster)\n except OSError:\n return False\n listw = [os.path.join(videocluster, fil) for fil in listwaves]\n for wav in listw:\n if os.path.isfile(wav) == True:\n continue\n else:\n return False\n return True", "def exists(self):\n basedir = os.path.dirname(self.path)\n\n for filename in self.files:\n path = os.path.join(basedir, filename)\n if not os.path.exists(path):\n return False\n\n return True", "def checkfile(filename, source=None):\n if source:\n # Let's check some sums\n if os.path.exists(filename) and os.path.exists(source):\n src_sha = calchash(source)\n dest_sha = calchash(filename)\n if DRYRUN:\n print(\"{src} hash {src_sha}. {dest} hash {dest_sha}\".format(src=source, dest=filename, src_sha=src_sha.hexdigest(), dest_sha=dest_sha.hexdigest()))\n return src_sha.digest() == dest_sha.digest()\n else:\n return os.path.exists(filename)", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def match_path(self, file):\n # Check if path is excluded\n if self.path_exclude:\n if self.path_exclude.match(file.path):\n return False\n\n # Check if path matches\n if self.path_match:\n if not self.path_match.match(file.path):\n return False\n else:\n # File matches path, set definition for statistical purposes (even if it doesn't match the rest)\n # only if it already didn't match another filter\n if not file.definition:\n file.definition = self.name\n\n return True", "def test_copy_required_include_list(self):\n include_list = ['path/to/*', '[abc]?/*/file*']\n\n self.assertTrue(clone_rules._copy_required('path/to/rules.yara', include_list, []))\n self.assertTrue(clone_rules._copy_required(\n 'a1/some/long/path/file_apt.yara', include_list, []))\n self.assertTrue(clone_rules._copy_required('b2/malware/file ROOTKIT.YAR', include_list, []))\n\n self.assertFalse(clone_rules._copy_required('base.yara', include_list, []))\n self.assertFalse(clone_rules._copy_required('path/to/file.txt', include_list, []))\n self.assertFalse(clone_rules._copy_required('a1/file.yara', include_list, []))", "def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True", "def allowed_test_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config['ALLOWED_TEST_EXTENSIONS']", "def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False", "def copy_script(self):\n copy_source = self.script_file\n copy_dest = os.path.join(self.build_dir, os.path.basename(self.script_file))\n\n logging.debug('Copying {src} to {dst}..'.format(src=copy_source, dst=copy_dest))\n\n shutil.copyfile(\n src=copy_source,\n dst=copy_dest\n )", "def has_checksum_file(self):\n return self.checksum_file_path.is_file()" ]
[ "0.5926794", "0.576537", "0.5734732", "0.563512", "0.55802953", "0.5525144", "0.5376832", "0.5344218", "0.5341193", "0.5339224", "0.5329194", "0.53072923", "0.5279975", "0.5243441", "0.5227413", "0.5205067", "0.51915103", "0.51590645", "0.5157132", "0.51411957", "0.512988", "0.5128976", "0.5109894", "0.51074076", "0.5100447", "0.50748885", "0.50723916", "0.5058974", "0.50575316", "0.5048318", "0.5002042", "0.49977744", "0.4990414", "0.4967732", "0.49650538", "0.49596852", "0.49516058", "0.49488568", "0.494385", "0.49377882", "0.49316522", "0.49238265", "0.49150965", "0.49145958", "0.49143487", "0.48979804", "0.48976845", "0.48896983", "0.48673362", "0.48610476", "0.48430365", "0.48415536", "0.48371965", "0.4835668", "0.4833367", "0.48222867", "0.4819173", "0.48167706", "0.48136422", "0.48084792", "0.47996774", "0.47989997", "0.47977546", "0.47971228", "0.4792095", "0.47920734", "0.4791775", "0.47872466", "0.4778059", "0.47751698", "0.47704387", "0.47668466", "0.4764455", "0.47626185", "0.47622582", "0.47618285", "0.475859", "0.47432584", "0.47419918", "0.47409618", "0.47405955", "0.47386506", "0.47293183", "0.47215596", "0.47211185", "0.47203678", "0.4714031", "0.47133908", "0.47112706", "0.4709107", "0.47073656", "0.46977234", "0.46932897", "0.46878624", "0.4683446", "0.4679884", "0.46772403", "0.46771184", "0.46752945", "0.4674874" ]
0.80116713
0
Create the module which consists of declaration statements for each of the values.
def Create(self, finder): today = datetime.datetime.today() sourceTimestamp = 0 for module in finder.modules: if module.file is None: continue if module.inZipFile: continue if not os.path.exists(module.file): raise ConfigError("no file named %s (for module %s)", module.file, module.name) timestamp = os.stat(module.file).st_mtime sourceTimestamp = max(sourceTimestamp, timestamp) sourceTimestamp = datetime.datetime.fromtimestamp(sourceTimestamp) self.values["BUILD_TIMESTAMP"] = today.strftime(self.timeFormat) self.values["BUILD_HOST"] = socket.gethostname().split(".")[0] self.values["SOURCE_TIMESTAMP"] = \ sourceTimestamp.strftime(self.timeFormat) module = finder._AddModule(self.moduleName) sourceParts = [] names = list(self.values.keys()) names.sort() for name in names: value = self.values[name] sourceParts.append("%s = %r" % (name, value)) source = "\n".join(sourceParts) module.code = compile(source, "%s.py" % self.moduleName, "exec") return module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_module(self, body: list, **kwargs):\n return ast.Module(body=body)", "def genH(self,fp):\n id = 0\n for nm in GetOsekObjects('NM'):\n if(self == nm):\n break\n else:\n id += 1\n fp.write('\\n#define %s %s\\n'%(self.name,id))\n fp.write('#define %s_TYPE NM_%s\\n'%(self.name,self.getValue('TYPE')))\n fp.write('#define %s_tTyp %s\\n'%(self.name,self.getValue('TTYP')))\n fp.write('#define %s_tMax %s\\n'%(self.name,self.getValue('TMAX')))\n fp.write('#define %s_tError %s\\n'%(self.name,self.getValue('TERROR')))\n fp.write('#define %s_tTx %s\\n'%(self.name,self.getValue('TTX')))\n fp.write('#define %s_IDBASE %s\\n'%(self.name,self.getValue('IDBASE')))\n fp.write('#define %s_WINDOWMASK %s\\n'%(self.name,self.getValue('WINDOWMASK')))\n fp.write('#define %s_CONTROLLER %s\\n'%(self.name,self.getValue('CONTROLLER')))", "def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)", "def build_dynamic_module(self):\r\n self.code_gen()\r\n\r\n mod = cmodule.DynamicModule()\r\n\r\n # The code of instantiate\r\n # the 1 is for error_storage\r\n code = self.instantiate_code(1 + len(self.args))\r\n instantiate = cmodule.ExtFunction('instantiate', code,\r\n method=cmodule.METH_VARARGS)\r\n #['error_storage'] + argnames,\r\n #local_dict = d,\r\n #global_dict = {})\r\n\r\n # Static methods that can run and destroy the struct built by\r\n # instantiate.\r\n if PY3:\r\n static = \"\"\"\r\n static int {struct_name}_executor({struct_name} *self) {{\r\n return self->run();\r\n }}\r\n\r\n static void {struct_name}_destructor(PyObject *capsule) {{\r\n {struct_name} *self = ({struct_name} *)PyCapsule_GetContext(capsule);\r\n delete self;\r\n }}\r\n \"\"\".format(struct_name=self.struct_name)\r\n else:\r\n static = \"\"\"\r\n static int %(struct_name)s_executor(%(struct_name)s* self) {\r\n return self->run();\r\n }\r\n\r\n static void %(struct_name)s_destructor(void* executor, void* self) {\r\n delete ((%(struct_name)s*)self);\r\n }\r\n \"\"\" % dict(struct_name=self.struct_name)\r\n\r\n # We add all the support code, compile args, headers and libs we need.\r\n for support_code in self.support_code() + self.c_support_code_apply:\r\n mod.add_support_code(support_code)\r\n mod.add_support_code(self.struct_code)\r\n mod.add_support_code(static)\r\n mod.add_function(instantiate)\r\n for header in self.headers():\r\n mod.add_include(header)\r\n for init_code_block in self.init_code() + self.c_init_code_apply:\r\n mod.add_init_code(init_code_block)\r\n\r\n return mod", "def _generate_type(self, n, modifiers=[], emit_declname = True):\n\t\ttyp = type(n)\n\n\t\t#~ print(n, modifiers)\n\n\t\tif typ == pycparser.c_ast.TypeDecl:\n\t\t\ts = ''\n\t\t\tif n.quals: s += ' '.join(n.quals) + ' '\n\t\t\ts += self.visit(n.type)\n\n\t\t\t# Local variables & parameter renaming.\n\t\t\t#\n\t\t\t# Variable name substitution only applies to local variables or parameters names within function prototypes\n\t\t\t# (thus, global variables and function names need to be excluded)\n\t\t\t#\n\t\t\t# case 1: level-0 function parameters (no remanimg for nested parameters)\n\t\t\t# case 2: local variable declaration (thus excluding functions, global vars, struct-enum-union fields, nested parameters)\n\t\t\t#\n\t\t\tif self.__visitingParam == 1: # case 1\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case I)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.paramprefix + self.__currentFunction + '_'+self.inlineInfix #S:\n\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\telse: \n\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.paramprefix + self.__currentFunction + '_'+self.inlineInfix,self.__visitingCompound)]\n\t\t\t\tn.declname = (self.paramprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname) if n.declname else '' #S:\n\t\t\t\n\t\t\telif (self.__visitingParam == 0 and # case 2\n\t\t\t\t\tself.__visitFuncDef == 0 and\n\t\t\t\t\tn.declname not in self.Parser.funcName and\n\t\t\t\t\t#n.declname not in self.Parser.varNames[''] and\n\t\t\t\t\tself.__currentFunction != '' and\n\t\t\t\t\tself.__visitStructUnionEnum == 0):\n\t\t\t\tif self.__debug: print(\"SETTING NEWID for [%s,%s] (case II)\") % (self.__currentFunction,n.declname)\n\t\t\t\t#S: env.local, the followin two lines are replaced with the following if\n\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_'\n\t\t\t\t#n.declname = self.prefix + self.__currentFunction + '_' + n.declname if n.declname else ''\n\t\t\t\tif self.__init: \n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse: \n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.prefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.prefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\telse:\n\t\t\t\t\t#self.newIDs[self.__currentFunction,n.declname] = self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix #S:\n\t\t\t\t\tif (self.__currentFunction,n.declname) in self.newIDs:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname].append((self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)) #S:\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.newIDs[self.__currentFunction,n.declname] = [(self.nondetprefix + self.__currentFunction + '_' +self.inlineInfix,self.__visitingCompound)]\n\t\t\t\t\tn.declname = self.nondetprefix + self.__currentFunction + '_' + self.inlineInfix + n.declname if n.declname else '' #S:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t#print n.declname\n\t\t\t\t#print self.newIDs\n\t\n\n\t\t\tnstr = n.declname if n.declname else ''\n\n\t\t\t# Resolve modifiers.\n\t\t\t# Wrap in parens to distinguish pointer to array and pointer to\n\t\t\t# function syntax.\n\t\t\t#\n\t\t\tfor i, modifier in enumerate(modifiers):\n\t\t\t\tif isinstance(modifier, pycparser.c_ast.ArrayDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '[' + self.visit(modifier.dim) + ']'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.FuncDecl):\n\t\t\t\t\tif (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):\n\t\t\t\t\t\tnstr = '(' + nstr + ')'\n\t\t\t\t\tnstr += '(' + self.visit(modifier.args) + ')'\n\t\t\t\telif isinstance(modifier, pycparser.c_ast.PtrDecl):\n\t\t\t\t\tif modifier.quals:\n\t\t\t\t\t\tnstr = '* %s %s' % (' '.join(modifier.quals), nstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnstr = '*' + nstr\n\t\t\tif nstr: s += ' ' + nstr\n\t\t\treturn s\n\t\telif typ == pycparser.c_ast.Decl:\n\t\t\treturn self._generate_decl(n.type)\n\t\telif typ == pycparser.c_ast.Typename:\n\t\t\treturn self._generate_type(n.type)\n\t\telif typ == pycparser.c_ast.IdentifierType:\n\t\t\treturn ' '.join(n.names) + ' '\n\t\telif typ in (pycparser.c_ast.ArrayDecl, pycparser.c_ast.PtrDecl, pycparser.c_ast.FuncDecl):\n\t\t\treturn self._generate_type(n.type, modifiers + [n])\n\t\telse:\n\t\t\treturn self.visit(n)\n\n\n\n\t\tdef visit_Compound(self, n):\n\t\t\tself.__visitingCompound += 1\n\t\t\ts = super(self.__class__, self).visit_Compound(n)\n\t\t\tfor key in self.newIDs: #S: remove pairs that have been added in this compound\n\t\t\t\tstack = self.newIDs[key] \n\t\t\t\tif stack and stack[-1][1] == self.__visitingCompound: \n\t\t\t\t\tstack.pop()\n\t\t\tself.__visitingCompound -= 1\n\t\t\treturn s", "def generate_from(self, ast: ast_pb2.AST):\n for s in self._generate_headlines():\n yield s\n yield f'PYBIND11_MODULE({self._module_name}, m) {{'\n yield I+('m.doc() = \"CLIF generated pybind11-based module for '\n f'{ast.source}\";')\n for decl in ast.decls:\n if decl.decltype == ast_pb2.Decl.Type.FUNC:\n for s in function.generate_from(decl.func):\n yield s\n yield ''\n yield '}'", "def code_gen(blocks):\r\n\r\n decl = \"\"\r\n head = \"\"\r\n tail = \"\"\r\n for block in blocks:\r\n decl += block.declare\r\n head = head + (\"\\n{\\n%s\" % block.behavior)\r\n tail = (\"%s\\n}\\n\" % block.cleanup) + tail\r\n return decl + head + tail", "def new_declaration (var_names) :\r\n\r\n\ttokens = [\"::\"]\r\n\tfor n in var_names :\r\n\t\ttokens += tokenizer.tokenize(n) + [\",\"]\r\n\tdel tokens[-1]\r\n\r\n\tresult = declaration (tokens)\r\n\r\n\treturn result", "def gen_values(self):", "def visit_Declaration(self, node):\n name = self.name_gen.next()\n extend_ops = self.extend_ops\n self.push_name(name)\n base_code = compile(node.base.py_ast, self.filename, mode='eval')\n extend_ops([\n # f_globals = globals()\n (LOAD_GLOBAL, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (STORE_FAST, 'f_globals'),\n\n # eval_ = eval\n (LOAD_GLOBAL, 'eval'),\n (STORE_FAST, 'eval_'),\n\n # foo_cls = eval('Window', toolkit, f_globals)\n # foo = foo_cls.__enaml_call__(identifiers, toolkit)\n (LOAD_FAST, 'eval_'),\n (LOAD_CONST, base_code),\n (LOAD_FAST, 'toolkit'),\n (LOAD_FAST, 'f_globals'),\n (CALL_FUNCTION, 0x0003),\n (LOAD_ATTR, '__enaml_call__'),\n (LOAD_FAST, 'identifiers'),\n (LOAD_FAST, 'toolkit'),\n (CALL_FUNCTION, 0x0002),\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n # identifiers['foo'] = foo\n (LOAD_FAST, name),\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n \n visit = self.visit\n for item in node.body:\n visit(item)\n \n extend_ops([\n # return foo\n (LOAD_FAST, name),\n (RETURN_VALUE, None),\n ])\n\n self.pop_name()", "def type_skeleton():\n return {\"base_type\": None,\n \"values\": {\"names\": [], \"codes\": []}}", "def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used", "def setup(self):\n declared = []\n spec = get_specs_for_module(self.module_name)\n\n # Inputs\n for entry in spec.cpacs_inout.inputs:\n if entry.var_name in declared:\n log.info(\"Already declared\")\n elif entry.var_name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[entry.var_name]\n if entry.var_name in Rt.optim_var_dict:\n self.add_input(entry.var_name, val=var[1][0])\n declared.append(entry.var_name)\n\n if declared == []:\n self.add_input(self.module_name + \"_in\")\n declared = []\n\n for entry in spec.cpacs_inout.outputs:\n # Replace special characters from the name of the entry and checks for accronyms\n entry.var_name = change_var_name(entry.var_name)\n\n if entry.var_name in declared:\n log.info(\"Already declared\")\n elif entry.var_name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[entry.var_name]\n self.add_output(entry.var_name, val=var[1][0])\n declared.append(entry.var_name)\n elif (\n \"aeromap\" in entry.var_name and self.module_name == Rt.last_am_module\n ): # == 'PyTornado': #not skf^is_skf:\n # Condition to avoid any conflict with skinfriction\n for name in PARAMS:\n if name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[name]\n self.add_input(name, val=var[1][0])\n declared.append(entry.var_name)\n for name in COEFS:\n if name in Rt.optim_var_dict:\n var = Rt.optim_var_dict[name]\n if is_digit(var[1][0]):\n self.add_output(name, val=var[1][0])\n else:\n self.add_output(name)\n declared.append(entry.var_name)\n\n if declared == []:\n self.add_output(self.module_name + \"_out\")", "def declaration(self) -> global___Statement.Declaration:", "def __init__(self, constants, sorts):\n \n super(ConstantDecl, self).__init__()\n self.constants = constants\n self.sorts = sorts", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n if len(parent) == 0:\n parent = \"list\"\n dec = self.comment.buildPythonComment(indent)\n dec += \"%s%s['%s'] = %d\"%(indent, parent, self.name, self.value)\n return dec", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n if len(parent) == 0:\n parent = \"list\"\n dec = self.comment.buildPythonComment(indent)\n dec += \"%s%s['%s'] = %d\"%(indent, parent, self.name, self.value)\n return dec", "def Declarations(self):\n decs = list()\n while self.currtok[1].name in {\"INT\", \"FLOAT\", \"BOOLEAN\"}:\n dec = self.Declaration()\n decs.append(dec)\n return DeclarationsExpr(decs)", "def gen_code(self, compile_unit):\n self.builder = irutils.Builder()\n self.ir_var_map = {}\n self.logger.debug(\"Generating IR-code\")\n self.debug_db = debuginfo.DebugDb()\n ir_mod = ir.Module(\"main\", debug_db=self.debug_db)\n self.builder.module = ir_mod\n\n # Split declaration into functions and variables:\n functions = []\n variables = []\n for declaration in compile_unit.declarations:\n assert isinstance(declaration, declarations.CDeclaration)\n\n if isinstance(\n declaration,\n (declarations.Typedef, declarations.EnumConstantDeclaration),\n ):\n pass\n elif isinstance(declaration, declarations.FunctionDeclaration):\n functions.append(declaration)\n elif isinstance(declaration, declarations.VariableDeclaration):\n variables.append(declaration)\n else: # pragma: no cover\n raise NotImplementedError(str(declaration))\n\n # Generate code:\n for variable in variables:\n self.gen_global_variable(variable)\n\n for function in functions:\n self.create_function(function)\n\n for function in functions:\n self.gen_function(function)\n\n self.logger.info(\"Finished IR-code generation\")\n return ir_mod", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def make_typedefs(self):\n type_dict = self.python_madz_types_dict + self.mangled_namespace\n res = \"{} = {{}}\\n\".format(type_dict)\n\n for node in self.description.declarations():\n varname = self.python_madz_types + self.mangled_namespace + \"___\" + node.name\n # Hack to get self referential top level structs.\n if (node.type.node_type() == pdl.TypeStruct):\n self._is_top_level = varname\n res += self.gen_type_string(node.type)\n res += \"\\n\"\n else:\n res += \"{} = {}\\n\".format(varname, self.gen_type_string(node.type))\n res += \"{}['{}'] = {}\\n\".format(type_dict, node.name, varname)\n return res", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def generate_cpp():\n cpp_file = AUTOGEN_WARNING\n cpp_file += \"// Implements basic nuclear data functions.\\n\"\n cpp_file += \"#ifndef PYNE_IS_AMALGAMATED\\n\"\n cpp_file += '#include \"atomic_data.h\"\\n'\n cpp_file += '#include \"nucname.h\"\\n'\n cpp_file += \"#endif\\n\"\n cpp_file += \" \\n\"\n cpp_file += \"void pyne::_load_atomic_mass_map_memory() { \\n\"\n cpp_file += \" // header version of atomic weight table data \\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!atomic_mass_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_atomic_mass_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!natural_abund_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_abund_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // calculate the atomic_masses of the elements \\n\"\n cpp_file += \" std::map<int,double> :: iterator it;\\n\"\n cpp_file += \" \\n\"\n cpp_file += \" for (int z = 1; z <= 92 ; z++) {\\n\"\n cpp_file += \" // loop through the natural abundance map\\n\"\n cpp_file += \" double element_atomic_weight = 0.0;\\n\"\n cpp_file += \" for (it = natural_abund_map.begin(); it != natural_abund_map.end() ; ++it){\\n\"\n cpp_file += \" // if the atomic number of the abudance matches the\\n\"\n cpp_file += \" // that of index\\n\"\n cpp_file += \" if(pyne::nucname::znum(it->first) == z) {\\n\"\n cpp_file += \" // take atomic abundance and multiply by mass\\n\"\n cpp_file += (\n \" // to get the mass of that nuclide / 100 since abundance is in %\\n\"\n )\n cpp_file += \" element_atomic_weight += (it->second*atomic_mass_map[it->first]/100.0);\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // insert the abundance of the element into the list\\n\"\n cpp_file += \" atomic_mass_map[z*10000000] = element_atomic_weight;\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_atomic_mass_map() { \\n\"\n cpp_file += generate_atomic_mass()\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_abund_map() { \\n\"\n cpp_file += generate_abundances()\n cpp_file += \"}\\n\"\n return cpp_file", "def generate_source():\n \"\"\"their dependencies\"\"\"\n global dictionary_names, dictionary_slices\n src = \"\"\n for s in dictionary_slices:\n src += deconstruct(s)\n src += \" '\" + pointer_to_name(s)\n src += \"' define\\n\"\n return src + \"\\n\"", "def fortran_type_definition(self) -> str:\n result = ''\n public = ''\n if self.public:\n public = ', public'\n for val_name, val_value in self.values:\n result += 'integer, parameter{} :: {}_{}_{} = {}\\n'.format(\n public, self.f_prefix, self.name, val_name, val_value)\n result += ('integer, parameter{0} :: {1}_{2} = selected_int_kind(9)\\n\\n'\n ).format(public, self.f_prefix, self.name)\n return indent(result, 4*' ')", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)", "def _gen_def(self):\n attributes = self.attributes()\n self._def = proto_util.make_operator_def_cpp(\n name=attributes.get('name', 'Op'),\n cache_key=self._cache_key,\n op_type=attributes['op_type'],\n device_option=proto_util.get_device_option(\n self._device.type,\n self._device.index,\n self._seed,\n ),\n **attributes['arguments']\n )", "def visit_Declaration(self, node):\n # This creates a function from the generated code ops then\n # wraps that function in an EnamlDeclaration.\n func_code = DeclarationCompiler.compile(node, self.filename)\n name = node.name\n self.code_ops.extend([\n (LOAD_CONST, func_code),\n (MAKE_FUNCTION, 0),\n (STORE_NAME, name),\n (LOAD_NAME, 'EnamlDeclaration'),\n (LOAD_NAME, name),\n (CALL_FUNCTION, 0x0001),\n (STORE_NAME, name),\n ])", "def define(self):\n self.E1.v_str = f'{self._E1.name} + (1 - {self.name}_zE1)'\n self.E2.v_str = f'{self._E2.name} + 2*(1 - {self.name}_zE2)'\n\n self.SE1.v_str = f'{self._SE1.name} + (1 - {self.name}_zSE1)'\n self.SE2.v_str = f'{self._SE2.name} + 2*(1 - {self.name}_zSE2)'\n\n self.A.v_str = f'{self.name}_zE1*{self.name}_zE2 * ' \\\n f'{self.name}_E1*{self.name}_SE1*' \\\n f'exp({self.name}_E1*log({self.name}_E2*{self.name}_SE2/' \\\n f'({self.name}_E1*{self.name}_SE1))/({self.name}_E1-{self.name}_E2))'\n\n self.B.v_str = f'-log({self.name}_E2*{self.name}_SE2/({self.name}_E1*{self.name}_SE1))/' \\\n f'({self.name}_E1 - {self.name}_E2)'", "def __init__(self):\n self.name = ''\n self.variables = []\n self.assumptions = []\n self.guarantees = []", "def parameters(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement.Declaration]:", "def _createModuleObj(self):\n ModuleOutputSolnDomain.__init__(self)", "def __init__(self, parser, **kwargs):\n self.quadruples = parser.quadruples\n self.func_directory = parser.func_directory\n self.type_directory = parser.type_directory\n constants = parser.constants\n\n for type_, variables in constants.items():\n for addr, value in variables.items():\n if type_ == 'Int':\n value = int(value) if value != '[]' else value\n elif type_ == 'Float':\n value = float(value)\n self.set_value(addr, value, type_, True)", "def generate_symbol_definitions(mode, symbols, prefix, definition):\n direct = []\n tabled = []\n for ii in symbols:\n direct += [ii.generate_rename_direct(prefix)]\n tabled += [ii.generate_rename_tabled(prefix)]\n if \"vanilla\" == mode:\n tabled = direct\n return template_symbol_definitions % (definition, \"\\n\".join(direct), \"\\n\".join(tabled))", "def visit_Declaration(self, node):\n code_ops = self.code_ops\n name = node.name\n description = DeclarationCompiler.compile(node, self.filename)\n code_ops.extend([\n (SetLineno, node.lineno),\n (LOAD_NAME, '_make_enamldef_helper_'), # Foo = _make_enamldef_helper_(name, base, description, globals)\n (LOAD_CONST, name),\n (LOAD_NAME, node.base),\n (LOAD_CONST, description), # description is a marshalable dict\n (LOAD_NAME, 'globals'),\n (CALL_FUNCTION, 0x0000),\n (CALL_FUNCTION, 0x0004),\n (STORE_NAME, name),\n ])\n\n # We now have a new Declarative subclass stored at 'name' to\n # which we need to add any user defined attributes and events.\n code_ops.extend([\n (LOAD_NAME, name),\n (LOAD_ATTR, '_add_user_attribute'),\n ])\n\n # Dispatch to add any class-level info contained within the\n # declaration body. Visit nonstrict since not all child nodes\n # are valid at the class-level. The '_add_user_attribute'\n # class method is left on the top of the stack and popped\n # at the end of the visitors.\n for child_node in node.body:\n self.visit_nonstrict(child_node)\n\n code_ops.append((POP_TOP, None))", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n return \"\"", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n name = self.name\n if kwargs.has_key(\"typedefName\"):\n name = kwargs[\"typedefName\"]\n # do we have a parent\n if len(parent) != 0:\n name = parent+\".\"+name\n name = CAMEL_CASE_SEARCH.sub(CAMEL_CASE_REPLACE_STRING,name)\n\n dec = indent+\"# %s\\n\" % (name)\n dec += self.comment.buildPythonComment(indent)\n dec += indent+\"%s = biDirDict()\\n\" % (name)\n off = 0\n for entry in self.entries:\n dec += entry.buildPythonDeclaration(indent, name, offset=off)+\"\\n\"\n # does this reset the value, either way increment after\n if entry.getValue() != None:\n off = entry.getValue()\n off += 1\n return dec", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n name = self.name\n if len(parent) != 0:\n name = parent+\".\"+self.name\n dec = indent+\"# %s\\n\" % (name)\n dec += self.comment.buildPythonComment(indent)\n dec += indent+\"%s = biDirDict()\\n\" % (name)\n off = 0\n for entry in self.entries:\n dec += entry.buildPythonDeclaration(indent, name, offset=off)+\"\\n\"\n # does this reset the value, either way increment after\n if entry.getValue() != None:\n off = entry.getValue()\n off += 1\n return dec", "def make(self, code, descriptions={}):\n # Removes line continuation symbols from declarations\n # to make parsing easier.\n lines = code_parser.remove_continuations_symbols(code).split('\\n')\n\n for ln in lines:\n if 'Attribute VB_Name = \"' in ln:\n mod_name = self.__get_mod_name(ln)\n if (mod_name in descriptions):\n doc = module_doc.ModuleDoc(\n mod_name, descriptions[mod_name])\n else:\n doc = module_doc.ModuleDoc(mod_name)\n\n elif 'Public Sub' in ln or 'Public Function' in ln:\n meth_name = self.__get_method_name(ln)\n\n args = self.__get_args(ln)\n formatted = self.__format_args(list(args.values()))\n key = mod_name + '.' + meth_name + f' ({formatted})'\n if (key in descriptions):\n doc.addMethod(meth_name, args,\n descriptions[key]['short-description'])\n else:\n doc.addMethod(meth_name, args)\n\n return doc", "def __init__(self, name, value):\n super(VariableDeclarationNode, self).__init__()\n self._name = name\n self._value = value\n self._type = None", "def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file", "def to_declaration(self):\n declaration = \"\\'\" + self.name + \"\\'\"\n declaration += \" \" + self.type\n if self.notnull:\n declaration += \" NOT NULL\"\n if self.primary:\n if self.autoincrement:\n declaration += \" PRIMARY KEY AUTOINCREMENT\"\n else:\n declaration += \" PRIMARY KEY\"\n if self.unique:\n declaration += \" UNIQUE\"\n if self.default:\n declaration += \" DEFAULT \\'%s\\'\" % self.default\n return declaration", "def gen_python_addr_module(module_name,root,creg_base,sreg_base):\n fo = open(module_name+\".py\",\"w\")\n fo.write(\"\"\n \"\\\"\\\"\\\"This file is automatically generated by the \"+sys.argv[0]+\" script\\n\"\n \"All modifications should be done in that file\\n\\\"\\\"\\\"\\n\"+\n root.name+\"_dict=\")\n (res,creg_base,sreg_base)=root.gen_python_addr(creg_base,sreg_base)\n fo.write(res+\"\\n\")\n fo.write(\"\"\n \"#Convert the dictionary to object, as described in https://stackoverflow.com/a/6993694/1735409\\n\"\n \"class Struct(object):\\n\"\n \" def __init__(self, data):\\n\"\n \" for name, value in data.items():\\n\"\n \" setattr(self, name, self._wrap(value))\\n\"\n \" def _wrap(self, value):\\n\"\n \" if isinstance(value, (tuple, list, set, frozenset)):\\n\"\n \" return type(value)([self._wrap(v) for v in value])\\n\"\n \" else:\\n\"\n \" return Struct(value) if isinstance(value, dict) else value\\n\"+\n root.name+\"=Struct(\"+root.name+\"_dict)\\n\")\n fo.close()", "def make_module(module_name, module_type, parameters):\n\n module = {module_name: {}}\n \n for parameter in parameters:\n required = input(\"Is \" + parameter + \" required (y/n)?\")\n \n if required == 'n':\n module[module_name][parameter] = None \n\n elif required == 'y':\n module[module_name][parameter] = 'r'\n \n with open('all_modules.yml', 'a') as file_object:\n yaml.dump(module, file_object, default_flow_style=False, sort_keys=False)", "def generate_ast(\n source_code: str, source_id: int, contract_name: str\n) -> tuple[Settings, vy_ast.Module]:\n return vy_ast.parse_to_ast_with_settings(source_code, source_id, contract_name)", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n return \"\"", "def create_modules(*names):\n source = 'attr = {0!r}'\n created_paths = []\n mapping = {}\n state_manager = None\n uncache_manager = None\n try:\n temp_dir = tempfile.mkdtemp()\n mapping['.root'] = temp_dir\n import_names = set()\n for name in names:\n if not name.endswith('__init__'):\n import_name = name\n else:\n import_name = name[:-len('.__init__')]\n import_names.add(import_name)\n if import_name in sys.modules:\n del sys.modules[import_name]\n name_parts = name.split('.')\n file_path = temp_dir\n for directory in name_parts[:-1]:\n file_path = os.path.join(file_path, directory)\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n created_paths.append(file_path)\n file_path = os.path.join(file_path, name_parts[-1] + '.py')\n with open(file_path, 'w') as file:\n file.write(source.format(name))\n created_paths.append(file_path)\n mapping[name] = file_path\n uncache_manager = util.uncache(*import_names)\n uncache_manager.__enter__()\n state_manager = util.import_state(path=[temp_dir])\n state_manager.__enter__()\n yield mapping\n finally:\n if state_manager is not None:\n state_manager.__exit__(None, None, None)\n if uncache_manager is not None:\n uncache_manager.__exit__(None, None, None)\n support.rmtree(temp_dir)", "def __init__(self):\n self.variables = [] # List of all variables in certain scope.\n self.field_id = 0 # Id of next field varibale.\n self.argumen_id = 0 # Id of next argument variable.\n self.local_id = 0 # Id of next local variable.\n self.static_id = 0 # Id of next static variable.", "def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")", "def define(self, scope=None):\n declaration = self._declaration.define_with_args(self._name, typedef='complex_and_params', scope=scope)\n lines = [\n '/* EMG_WRAPPER {} */\\n'.format(self._name),\n declaration + \" {\\n\"\n ]\n lines.extend(['\\t{}\\n'.format(stm) for stm in self.body])\n lines.append(\"}\\n\")\n return lines", "def make_modules(self, config):\n pass", "def _declaration_variable(self, node: ET.Element):\n # variable names\n variables_and_values = self.transform_all_subnodes(\n self.get_one(node, './variables'), skip_empty=True,\n ignored={'entity-decl-list__begin', 'entity-decl-list','attr-spec' })\n if not variables_and_values:\n _LOG.error('%s', ET.tostring(node).decode().rstrip())\n raise SyntaxError('at least one variable expected in variables list')\n variables = [var for var, _ in variables_and_values]\n # base type of variables\n base_type = self.transform_one(self.get_one(node, './type'))\n\n # dimensionality information (only for array types)\n dimensions_node = node.find('./dimensions')\n variable_dimensions = [getattr(var, 'fortran_metadata', {}).get('dimensions', None)\n for var in variables]\n has_variable_dimensions = any([_ is not None for _ in variable_dimensions])\n if has_variable_dimensions and not self._split_declarations:\n raise NotImplementedError('inline dimensions not implemented yet')\n if dimensions_node is not None and has_variable_dimensions:\n raise SyntaxError(\n 'declaration dimension data as well as per-variable dimension data present')\n if dimensions_node is not None:\n dimensions = self.transform_one(dimensions_node)\n assert len(dimensions) >= 1\n self.ensure_import('static_typing', 'st')\n annotation = make_st_ndarray(base_type, dimensions)\n annotations = [annotation for _ in variables]\n elif has_variable_dimensions:\n self.ensure_import('static_typing', 'st')\n annotations = [base_type if _ is None else make_st_ndarray(base_type, _)\n for _ in variable_dimensions]\n else:\n annotations = [base_type for _ in variables]\n\n # initial values\n if dimensions_node is not None:\n values = [None if val is None else make_numpy_constructor('array', val, base_type)\n for _, val in variables_and_values]\n elif has_variable_dimensions:\n assert len(variables_and_values) == len(variable_dimensions)\n values = [None if val is None\n else (val if dim is None else make_numpy_constructor('array', val, base_type))\n for (_, val), dim in zip(variables_and_values, variable_dimensions)]\n else:\n values = [val for _, val in variables_and_values]\n\n metadata = {'is_declaration': True}\n intent_node = node.find('./intent')\n if intent_node is not None:\n metadata['intent'] = intent_node.attrib['type']\n\n attributes = ('allocatable', 'asynchronous', 'external', 'intrinsic', 'optional',\n 'parameter', 'pointer', 'protected', 'save', 'target', 'value', 'volatile')\n for attribute in attributes:\n if node.find('./attribute-{}'.format(attribute)) is not None:\n metadata['is_{}'.format(attribute)] = True\n\n if metadata:\n metadata_node = horast_nodes.Comment(\n value=ast.Str(' Fortran metadata: {}'.format(repr(metadata))), eol=False)\n\n _handled = {'variables', 'type', 'dimensions', 'intent'}\n extra_results = self.transform_all_subnodes(node, ignored={\n 'type-declaration-stmt'} | _handled | {'attribute-{}'.format(_) for _ in attributes})\n if extra_results:\n _LOG.warning('ignoring additional information in the declaration:\\n%s', extra_results)\n\n if not self._split_declarations:\n raise NotImplementedError()\n assignments = [{\"name\":var, \"type\":ann, \"value\":val}\n for var, ann, val in zip(variables, annotations, values)]\n if metadata:\n new_assignments = []\n for assignment in assignments:\n assignment.update({\"metadata\":metadata})\n new_assignments.append(assignment)\n new_assignments.append(metadata_node)\n assignments = new_assignments\n\n return assignments", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def GenerateCode(self):\n print \"Generating code...\"\n for type in self.getObjectTypes():\n generator = __import__(\"codegen.Cpp\" + type, globals(), locals(), [''])\n print \"Generating code for objects of type: %s\" % type\n generator.GenerateCode(self)", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"define\", \"#define\")\n if len(self.value) == 0:\n self.string = \" %s\" % (self.name)\n else:\n self.string = \" %s %s\" % (self.name, self.value)", "def _generate_headlines(self):\n includes = set()\n for decl in self._ast.decls:\n includes.add(decl.cpp_file)\n for include in includes:\n yield f'#include \"{include}\"'\n yield '#include \"third_party/pybind11/include/pybind11/pybind11.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''", "def __init__ ( self , syms , defn ):\n\n self.logic = cognitiveDefiner.convertDefinition(syms,defn)", "def buildPythonDeclaration(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)", "def _createModuleObj(self):\n # Create the SWIG module object to provide access to the C++ object.\n ModuleUniformVelModel.__init__(self)\n return", "def gen_declaration_statement(self, statement):\n declaration = statement.declaration\n if isinstance(declaration, declarations.VariableDeclaration):\n if declaration.storage_class == \"static\":\n self.gen_local_static_variable(declaration)\n else:\n self.gen_local_variable(declaration)\n elif isinstance(declaration, declarations.FunctionDeclaration):\n # Ehm, okay, we have declared a function, no worries.\n # Just ensure that it does not have a body.\n assert not declaration.body\n else:\n raise NotImplementedError(str(declaration))", "def _create_def_list(self, parent):\n\n definition_list = nodes.definition_list()\n parent.append(definition_list)\n\n return definition_list", "def GenPy(mod,fname):\n f = open(fname, 'w')\n title = \"\"\"#\n# This file is generated automatically\n# Author:IAN\n# http://www.iknot.org\n\"\"\"\n f.write(title)\n for i in mod.__dict__.keys():\n s = \"def \" + i + \"():\" + \"\\n\"\n f.write(s)\n s = \" return\"\n f.write(s + \"\\n\")\n f.close()\n kcs_ui.message_noconfirm('py file saved to:%s'%(fname))", "def gen_module(root_path, walls_height=3, floor_thickness=.3):\n levels = [gen_level(root_path + lv.rstrip() + '/')(\n floor_thickness = floor_thickness,\n walls_height = walls_height)\n for lv in os.popen('ls ' + root_path)]\n \n walls_hpc = []\n windows_hpc = []\n doors_hpc = []\n handrails_hpc = []\n floors_hpc = []\n stairs_foots = []\n lv = 0\n for walls, windows, doors, handrails, floor, stair_foot in levels:\n level_height = walls_height * lv\n \n walls_hpc.append(T(3)(level_height)(walls))\n windows_hpc.append(T(3)(level_height)(windows))\n doors_hpc.append(T(3)(level_height + floor_thickness)(doors))\n handrails_hpc.append(T(3)(level_height)(handrails))\n floors_hpc.append(T(3)(level_height)(floor))\n \n stairs_foots.append(stair_foot+[level_height])\n \n lv += 1\n \n walls_hpc = UNION(walls_hpc)\n windows_hpc = UNION(windows_hpc)\n doors_hpc = STRUCT(doors_hpc)\n handrails_hpc = UNION(handrails_hpc)\n floors_hpc = UNION(floors_hpc)\n \n cubes_hpc = []\n stairs_hpc = []\n for i in range(0, len(stairs_foots), 2):\n stair, cube = gen_stairs(stairs_foots[i], stairs_foots[i+1])\n cubes_hpc.append(cube)\n stairs_hpc.append(T(3)(floor_thickness)(stair))\n \n stairs_hpc = STRUCT(stairs_hpc)\n \n cubes_hpc = T(3)(floor_thickness)(STRUCT(cubes_hpc))\n floors_hpc = DIFFERENCE([floors_hpc, cubes_hpc])\n \n return STRUCT([\n SKEL_1(walls_hpc),\n windows_hpc,\n doors_hpc,\n handrails_hpc,\n floors_hpc,\n stairs_hpc])", "def map(self) -> global___Statement.Declaration:", "def __init__(self, type_name, fields_names, fields_values):\n super(RecordLiteralExpressionNode, self).__init__()\n self._type_name = type_name\n self._fields_names = fields_names\n self._fields_values = fields_values", "def _declare_parameters(self):\n def to_modelica(arg):\n \"\"\" Convert to Modelica array.\n \"\"\"\n # Check for strings and booleans\n if isinstance(arg, str):\n return '\\\\\"' + arg + '\\\\\"'\n elif isinstance(arg, bool):\n if arg is True:\n return 'true'\n else:\n return 'false'\n try:\n return '{' + \", \".join(to_modelica(x) for x in arg) + '}'\n except TypeError:\n return repr(arg)\n dec = list()\n\n for k, v in list(self._parameters_.items()):\n # Dymola requires vectors of parameters to be set in the format\n # p = {1, 2, 3} rather than in the format of python arrays, which\n # is p = [1, 2, 3].\n # Hence, we convert the value of the parameter if required.\n s = to_modelica(v)\n dec.append('{param}={value}'.format(param=k, value=s))\n\n return dec", "def module_constructor(loader, node):\n new_module = Module.__new__(Module)\n yield new_module\n values = loader.construct_mapping(node, deep=True)\n values[\"constraint\"] = ec2rlcore.constraint.Constraint(values[\"constraint\"])\n values[\"path\"] = Module.temp_path\n # Strip trailing newlines from string values where yaml added them (e.g. title, helptext)\n for key in values.keys():\n if isinstance(values[key], str):\n values[key] = values[key].rstrip()\n new_module.__init__(**values)", "def create_module(module_dict: Dict[str, Any], nets: List[Net]) -> Module:\n m_data = module_dict['module']\n footprint = m_data[0].replace('\"', \"\")\n layer = convert_to_layers(get_dict_by_key(m_data, 'layer')['layer'])[0]\n coords = get_dict_by_key(m_data, 'at')['at']\n if len(coords) == 3 and \"B.\" in layer.name:\n coords[2] = (float(coords[2]) + 180) % 360\n coords[1] = str(-1*float(coords[1]))\n attr = get_dict_by_key(m_data, 'attr')\n smd: bool = True if (attr and attr['attr'] == 'smd') else False\n module_texts: List[FpText] = get_texts(m_data, 'fp_text')\n figures: List[Union[FpPoly, FpCircle, FpArc, FpLine]] = get_lines(m_data, 'fp_line')\n figures.extend(get_circles(m_data, 'fp_circle'))\n pads = get_pads(m_data, nets)\n ref = [text.text for text in module_texts if text.text_type ==TextType.reference][0]\n update_nets_with_pads(pads, nets, ref)\n figures.extend(get_polys(m_data, 'fp_poly'))\n figures.extend(get_arcs(m_data, 'fp_arc'))\n return Module(footprint=footprint, layer=layer, coords=coords, smd=smd,\n texts=module_texts, pads=pads, figures=figures, extrapads=list())", "def generate_mutant_module(self, mutated_ast, module_shortname=\"\"):\n mutant_module_shortname = module_shortname\n mutant_code = compile(mutated_ast, mutant_module_shortname, \"exec\")\n mutant_module = imp.new_module(mutant_module_shortname)\n try:\n exec mutant_code in mutant_module.__dict__\n except TypeError:\n print 'checkpoint'\n return mutant_module", "def declare(module_name, *func_name):\n for func in func_name:\n func = SqlFunction.normalize_name(func)\n if func not in SqlFunction._definitions:\n SqlFunction._definitions[func] = module_name", "def test_types_python(self):\n self.single_file_generator('py', PythonGenerator, filtr=metadata_filter)\n\n # Make sure the python is valid\n with open(os.path.join(self.source_path, 'types.py')) as f:\n pydata = f.read()\n spec = compile(pydata, 'test', 'exec')\n module = ModuleType('test')\n exec(spec, module.__dict__)", "def __init__(self, total, function_name, param_sorts, return_sort):\r\n super(FunctionDecl, self).__init__()\r\n global functions\r\n self.total = total\r\n self.function_name = function_name\r\n self.param_sorts = param_sorts\r\n self.return_sort = return_sort\r\n self.basic = basic\r\n self.static = static\r\n\r\n function_info = []\r\n function_info.append(static)\r\n function_info.append(param_sorts)\r\n function_info.append(return_sort)\r\n functions[function_name] = function_info", "def _create_tables(self):\n self._c.execute(\"CREATE TABLE IF NOT EXISTS token ( \"\n \"id INTEGER PRIMARY KEY, \"\n \"name CHAR(50) NOT NULL)\")\n \n self._c.execute(\"CREATE TABLE IF NOT EXISTS val ( \"\n \"id INTEGER PRIMARY KEY, \"\n \"usd_raised REAL NOT NULL, \"\n \"month CHAR NOT NULL, \"\n \"token_sale_price REAL NOT NULL, \"\n \"current_token_price REAL NOT NULL, \"\n \"token_return CHAR NOT NULL, \"\n \"eth_return CHAR NOT NULL, \"\n \"btc_return CHAR NOT NULL, \"\n \"token_eth_return CHAR NOT NULL, \"\n \"token_btc_return CHAR NOT NULL, \"\n \"datetime DATETIME NOT NULL, \"\n \"token_name CHAR(50) NOT NULL, \"\n \"FOREIGN KEY(token_name) REFERENCES token(name) \"\n \"ON DELETE CASCADE ON UPDATE CASCADE)\")\n\n self._conn.commit()", "def test_typedef00401m_type_def00401m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def buildPythonDeclaration(self, indent=\"\", parent=\"\", **kwargs):\n dec = self.comment.buildPythonComment(indent)\n if self.sym != None:\n return dec+self.sym.buildPythonDeclaration(indent, \"\", typedefName=self.name)\n else:\n return dec+self.define.buildPythonDeclaration(indent, \"\", typedefName=self.name)", "def process_declaration(self, decl,datatype):\n # Looping character by character in the string which contains only the part where variables are mentioned(without the datatype keyword).\n filename = \"sym.csv\"\n name = \"\"\n i=0\n pos=-1\n flag=0 #used to track identifiers with having digits\n with open(filename, 'a') as csvfile:\n csvwriter = csv.writer(csvfile)\n for c in decl:\n \n pos += 1\n i=0\n # Checking if the variable mentioned is an array declaration. This will be executed only if '[' character has been encountered earlier.\n if self.flag_array:\n # Checks if the '[' has a matching ']'\n if (c == \"]\"):\n self.structure += c # Saving the current status of the array structure.\n self.flag_array = False # Setting flag to say that the array declaration has been processed completely.\n self.array_structure[self.identifiers[-1]] = self.structure # Saving the array structure(or dimension) into a dictionary.\n continue # Jumping into the next iteration.\n # Checks if next variable is going to be declared.\n elif (c == \",\"):\n continue # Jumping to the next iteration.\n # Checks if declaration statement is ending.\n elif (c == \";\"):\n print(self.structure)\n break # Breaking out of the loop. It can also be return.\n # If none of the conditions match then it is the dimension being mentioned.\n else:\n self.structure += c # Saving the dimension into the variable that is used for saving the overall structure of the array.\n continue # Jumping to the next iteration.\n\n # Checking if the variable is being assigned with a value during the declaration.\n \n if c == \"=\":\n flag=1 #this means the encountered digits are values\n self.identifiers += self.temp # Saving the characters parsed so far as an identifier, since '=' operator marks the end of the identifier and beginning of the value.\n\n if(len(name.strip())>0 and re.search(r\"[a-zA-Z_][a-zA-Z_0-9]*\",name.strip()) and not(self.check(name.strip()))):\n if(re.search(r\"[0-9]\", decl).start()):\n indexValue = re.search(r\"[0-9\\.]+\", decl[pos:]).start()\n lastValue = re.search(r\"[0-9\\.]+\", decl[pos:]).end()\n value = decl[pos+indexValue:pos+lastValue]\n csvwriter.writerow(['Identifier',datatype,name.strip(),value,''])\n name=\"\"\n \n continue\n csvwriter.writerow(['Identifier',datatype,name.strip(),'',''])\n name = \"\"\n \n \n\n # Checking if an array is being declared.\n elif c == \"[\":\n self.identifiers += self.temp # String read so far is saved as an identifier, since '[' marks the end of identifier name and begin of the dimension.\n self.temp = \"\" # Emptying the string which contained the name of the identifier.\n if len(name.strip())>0 and re.search(r\"[a-zA-Z_][a-zA-Z_0-9]*\",name.strip()) and not(self.check(name.strip())):\n csvwriter.writerow(['Identifer',datatype+\"Array\",name.strip(),'',''])\n name = \"\"\n self.structure += c # Adding '[' into the structure variable.\n self.flag_array = True # Marking the beginning of dimension.\n\n # Checking if the declaration is for a function.\n elif c == \"(\":\n self.temp = \"\" # Emptying the string which holds the identifier name since it is a function name and not that of any variable.\n break\n\n # Checking if the declaration is marking end of a variable name.\n elif c == \",\":\n flag=0 #after \",\" it's possible that identifier has digit in it. So, make flag 0 again\n self.identifiers += self.temp # Adding the name parsed so far into the list of all variables.\n self.temp = \"\" # Emptying the string which holds the name of the identifier.\n if len(name.strip())>0 and re.search(r\"[a-zA-Z_][a-zA-Z_0-9]*\",name.strip()) and not(self.check(name.strip())):\n # if(re.search())\n \n csvwriter.writerow(['Identifer',datatype,name.strip(),'',''])\n name = \"\"\n continue\n\n # Checking if the statement has ended.\n elif c == \";\":\n self.identifiers += self.temp # Adding the string parsed so far into the list of identifiers.\n self.temp = \"\" # Emptying the string that holds the name of the identifier.\n if len(name.strip())>0 and re.search(r\"[a-zA-Z_][a-zA-Z_0-9]*\",name.strip()) and not(self.check(name.strip())):\n csvwriter.writerow(['Identifer',datatype,name.strip(),'',''])\n name = \"\"\n break\n\n # Checking for whitespaces within the line.\n elif c == \" \":\n continue\n #Doesn't take values as identifier names\n elif(re.search(r'[0-9]',c)):\n if(flag == 1): #check if it's value or identifier\n continue\n else:\n self.temp += c\n name += c\n\n # If none of the conditions satisfy, then the character is part of the identifier name. Hence adding it to the name string.\n else:\n self.temp += c\n name += c", "def create(name):\n if not SchModule._ready:\n raise ValueError(\"not mounted\")\n\n schdir = SchModule.DIR.hpath(name)\n\n if path.exists(schdir):\n raise Exception(\"Already exists\")\n\n # create this scheme directory\n os.makedirs(schdir)\n\n with codecs.open(path.join(schdir, SchModule.DESCR), \"w\", \"utf8\") as f:\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n user = os.getenv(\"USER\", os.getenv(\"USERNAME\", \"Unknown\"))\n f.write(SchModule.DESCRTEMPLATE % locals())\n\n with codecs.open(path.join(schdir, SchModule.PYMODULE), \"w\", \"utf8\") as f:\n f.write(SchModule.PYMODULETEMPLATE)", "def build_mapping() -> str:\n templates = make_module_docstring(\"Template classes for GBD entities\", __file__)\n templates += make_import(\"typing\", [\"Union\", \"Tuple\"])\n templates += (\n make_import(\n \".id\",\n [\n \"c_id\",\n \"s_id\",\n \"hs_id\",\n \"me_id\",\n \"cov_id\",\n \"rei_id\",\n \"scalar\",\n ],\n )\n + SPACING\n )\n templates += make_gbd_record()\n\n for entity, info in get_base_types().items():\n templates += SPACING\n templates += make_record(entity, **info)\n\n return templates", "def get_declarations(self):\n return \"extern const unsigned int %s;\\n\" % self.name", "def make_globals(py_c_api):\n for fn in py_c_api:\n gv = ir.GlobalValue(fn.name, fn.signature, external=True)\n if gv.badval: gv.add_metadata(badval=ir.Const(gv.badval))\n if gv.maybe: gv.add_metadata(cpy_occurred=True)\n yield fn.name, gv", "def _make_source(name, init, body):\n code = \"\"\"\n #include <Python.h>\n\n %(body)s\n\n PyMODINIT_FUNC\n PyInit_%(name)s(void) {\n %(init)s\n }\n \"\"\" % dict(\n name=name, init=init, body=body,\n )\n return code", "def visit_Declaration(self, node):\n self.block = node.name\n obj = {\n 'enamldef': True,\n 'type': node.name,\n 'base': node.base,\n 'doc': node.doc,\n 'lineno': node.lineno,\n 'identifier': node.identifier,\n 'filename': self.filename,\n 'block': self.block,\n 'children': [],\n 'bindings': [],\n }\n self.stack.append(obj)\n for item in node.body:\n self.visit(item)", "def generate(self):\n\n if not self.validated:\n self.validate()\n\n # output all the functions\n for func in self.functions:\n for line in func.generate():\n yield line\n\n # then the footer with the constants\n if self.consts:\n yield CONSTS_HEADER\n for value, idx in self.consts.iteritems():\n yield CONST.format(idx=idx, value=value)", "def add_IO(self, inputs, outputs):\n self.input_seqs = []\n self.input_structs = []\n for (seq_name, wc), struct_name in inputs:\n self.assertTrue( seq_name in self.seqs, \"Declare statement references undefined sequence '%s'\" % seq_name )\n if wc:\n self.input_seqs.append( self.seqs[seq_name].wc )\n else:\n self.input_seqs.append( self.seqs[seq_name] )\n \n if struct_name:\n self.assertTrue( struct_name in self.structs, \"Declare statement references undefined structure '%s'\" % struct_name )\n self.input_structs.append( self.structs[struct_name] )\n else:\n self.input_structs.append(None)\n \n self.output_seqs = []\n self.output_structs = []\n for (seq_name, wc), struct_name in outputs:\n self.assertTrue( seq_name in self.seqs, \"Declare statement references undefined sequence '%s'\" % seq_name )\n if wc:\n self.output_seqs.append( self.seqs[seq_name].wc )\n else:\n self.output_seqs.append( self.seqs[seq_name] )\n \n if struct_name:\n self.assertTrue( struct_name in self.structs, \"Declare statement references undefined structure '%s'\" % struct_name )\n self.output_structs.append( self.structs[struct_name] )\n else:\n self.output_structs.append(None)", "def set_values(self,module):\n if type(module) == dict:\n self.set_value(\"name\",module[\"name\"])\n self.set_value(\"hrname\",module[\"hrname\"])\n self.set_value(\"version_major\",module[\"version_major\"])\n self.set_value(\"version_minor\",module[\"version_minor\"])\n self.set_value(\"revision\",module[\"revision\"])\n if module.has_key(\"signature\"):\n self.set_value(\"signature\",module[\"signature\"])\n elif module.__class__.__name__ == \"Module\":\n pass #TODO IMPLEMENT / DISCUSS AFTER IMPLEMENTING MODULE-SUBSYSTEM", "def output_data_definitions(self):\n return {}", "def _define(self):\n definition = []\n q = QuantumRegister(1, \"q\")\n rule = [\n (U2Gate(0, pi), [q[0]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition", "def generate(module_name, code):\n try:\n ast_tree = ast.parse(code)\n except Exception:\n raise RuntimeError('Bad Python code')\n\n visitor = SearchSpaceGenerator(module_name)\n try:\n visitor.visit(ast_tree)\n except AssertionError as exc:\n raise RuntimeError('%d: %s' % (visitor.last_line, exc.args[0]))\n return visitor.search_space, astor.to_source(ast_tree)", "def _create_module(self, rootdir):\n name = 'module_' + rootdir.get_name()\n moduleobj = Module(name, rootdir)\n rootdir.set_module(moduleobj)\n self._modules[name] = moduleobj", "def _define_generators(self):\n\t\treturn {\n\t\t \"transaction_id\" : Mgcp._generate_uint32,\n\t\t \"connection_id\" : Mgcp._generate_uint32,\n\t\t \"request_id\" : Mgcp._generate_uint32,\n\t\t \"timestamp\" : Mgcp._generate_timestamp\n\t\t}", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def __init__(self, total, function_name, param_sorts, return_sort):\n super(FunctionDecl, self).__init__()\n global functions\n self.total = total\n self.function_name = function_name\n self.param_sorts = param_sorts\n self.return_sort = return_sort\n self.basic = basic\n self.static = static\n\n function_info = []\n function_info.append(static)\n function_info.append(param_sorts)\n function_info.append(return_sort)\n functions[function_name] = function_info", "def install_def(self, name, dstmt, interleave=False):\n delem = SchemaNode.define(name, interleave=interleave)\n delem.attr[\"name\"] = name\n self.defs[name] = delem\n self.handle_substmts(dstmt, delem)", "def fortran_type_definition(self) -> str:\n result = ''\n if self.public:\n result += f'public :: {self.f_prefix}_{self.name}\\n'\n\n result += f'type {self.f_prefix}_{self.name}\\n'\n for value in self.values:\n result += f' logical :: {value} = .false.\\n'\n result += '\\n'\n result += 'contains\\n'\n result += f' procedure :: to_int => {self.f_prefix}_{self.name}_to_int_\\n'\n result += 'end type\\n'\n return indent(result, 4*' ')", "def make_values(sv, lines):\r\n for num, lig in enumerate(lines): # browse program lines\r\n\r\n # process conditions and values\r\n if lig.startswith(When): # conditions (unprocessed) \r\n clau=no_brackets(lig[len(When):].strip(Space)) # no surrounding spaces or brackets\r\n elif lig.startswith(Col): # values (unprocessed. Normally, clau has been defined)\r\n if clau is not None:\r\n vlu=no_brackets(lig[len(Col):].strip(Space)) # no surrounding spaces or brackets\r\n nod.clauses+=[((clau, None, None), (vlu, None, None))] # store into list of doublets (condition, value)\r\n clau=None # only one value per clause\r\n else:\r\n print(\"\\n\", Err_empty_name)\r\n print(lig)\r\n raise ReferenceError\r\n\r\n # process object names\r\n else: # neither condition nor value -> defined name\r\n nod=sv.Object[lig]\r\n if nod.equivalent: \r\n nod.equivalent=lines[num+2][len(Col):] # equivalent value is two lines down \r", "def test_typedef00201m_type_def00201m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef00202m_type_def00202m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00202m/typeDef00202m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00202m/typeDef00202m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def __init__(self):\n # just the list of class/construct types\n self.lut = {}\n self.lut[\"struct\"] = structure\n self.lut[\"typedef\"] = typedef\n self.lut[\"define\"] = define\n self.lut[\"enum\"] = enum\n self.lut[\"enumEntry\"] = enumEntry\n self.lut[\"ifdef\"] = ifdef\n self.lut[\"ifndef\"] = ifndef\n self.lut[\"hashIf\"] = hashIf\n self.lut[\"hashElse\"] = hashElse\n self.lut[\"hashElif\"] = hashElif\n self.lut[\"endif\"] = endif\n self.lut[\"banner\"] = banner\n self.lut[\"general\"] = general\n self.lut[\"listDefine\"] = listDefine\n self.lut[\"listEntry\"] = listEntry\n self.lut[\"listNumEls\"] = listNumEls\n self.lut[\"union\"] = union\n\n # and the dictionary of all symbols we declare\n self.symbols = {}", "def writeCode(doc):\n\n comp_template = \"model.addCompartment(vol=%s, comp_id='%s');\"\n species_template = \"model.addSpecies(species_id='%s', amt=%s, comp='%s');\"\n param_template = \"model.addParameter(param_id='%s', val=%s, units='%s');\"\n rxn_template = (\n \"model.addReaction(reactants=%s, products=%s, \"\n \"expression='%s', local_params=%s, rxn_id='%s');\"\n )\n event_template = (\n \"model.addEvent(trigger='%s', assignments=%s, persistent=%s, \"\n \"initial_value=%s, priority=%s, delay=%s, event_id='%s');\"\n )\n event_defaults = [True, False, \"0\", 0]\n assignrule_template = \"model.addAssignmentRule(var='%s', math='%s');\"\n raterule_template = \"model.addRateRule(var='%s', math='%s', rr_id='%s');\"\n initassign_template = \"model.addInitialAssignment(symbol='%s', math='%s')\"\n init_template = (\n \"import simplesbml\\nmodel = simplesbml.sbmlModel(time_units='%s', \"\n \"extent_units='%s', sub_units='%s', level=%s, version=%s);\"\n )\n init_defaults = [\"min\", \"Molar\", \"Molar\", 3, 1]\n command_list = []\n\n if doc.getLevel() == 1:\n warnings.warn(\"Warning: SimpleSBML does not support SBML Level 1.\")\n\n props = libsbml.ConversionProperties()\n props.addOption(\"flatten comp\", True)\n result = doc.convert(props)\n if result != libsbml.LIBSBML_OPERATION_SUCCESS:\n raise SystemExit(\"Conversion failed: (\" + str(result) + \")\")\n\n mod = doc.getModel()\n comps = mod.getListOfCompartments()\n species = mod.getListOfSpecies()\n params = mod.getListOfParameters()\n rxns = mod.getListOfReactions()\n events = mod.getListOfEvents()\n rules = mod.getListOfRules()\n print(\"rules\", rules)\n inits = []\n if doc.getLevel() == 3 or (doc.getLevel() == 2 and doc.getVersion() > 1):\n inits = mod.getListOfInitialAssignments()\n\n timeUnits = \"min\" # second\n substanceUnits = \"Molar\" # mole\n extentUnits = \"Molar\" # mole\n if doc.getLevel() == 3:\n timeUnits = mod.getTimeUnits()\n extentUnits = mod.getExtentUnits()\n substanceUnits = mod.getSubstanceUnits()\n level = mod.getLevel()\n version = mod.getVersion()\n init_list = [timeUnits, extentUnits, substanceUnits, level, version]\n for i in range(0, 5):\n if init_list[i] == init_defaults[i]:\n init_list[i] = \"del\"\n\n command_list.append(\n init_template\n % (init_list[0], init_list[1], init_list[2], init_list[3], init_list[4])\n )\n\n for comp in comps:\n if comp.getId() != \"c1\":\n if comp.getId()[0] == \"c\" and comp.getId()[1 : len(comp.getId())].isdigit():\n if comp.getSize() == 1e-15:\n command_list.append(comp_template % (\"del\", \"del\"))\n else:\n command_list.append(comp_template % (comp.getSize(), \"del\"))\n else:\n if comp.getSize() == 1e-15:\n command_list.append(comp_template % (\"del\", comp.getId()))\n else:\n command_list.append(comp_template % (comp.getSize(), comp.getId()))\n\n for s in species:\n conc = s.getInitialConcentration()\n amt = s.getInitialAmount()\n sid = s.getId()\n if s.getCompartment() == \"c1\":\n comp = \"del\"\n else:\n comp = s.getCompartment()\n bc = s.getBoundaryCondition()\n if bc:\n sid = \"$\" + sid\n if isnan(conc) or amt > conc:\n command_list.append(species_template % (sid, str(amt), comp))\n else:\n command_list.append(species_template % (\"[\" + sid + \"]\", str(conc), comp))\n\n for p in params:\n val = p.getValue()\n pid = p.getId()\n if p.getUnits() == \"per_second\":\n units = \"del\"\n else:\n units = p.getUnits()\n isDelay = pid.find(\"Delay\")\n if isDelay == -1:\n command_list.append(param_template % (pid, str(val), str(units)))\n\n for v in rxns:\n vid = v.getId()\n if vid[0] == \"v\" and vid[1 : len(vid)].isdigit():\n vid = \"del\"\n reactants = []\n for r in v.getListOfReactants():\n reactants.append(\n (str(r.getStoichiometry()) + \" \" + r.getSpecies()).replace(\"1.0 \", \"\")\n )\n products = []\n for p in v.getListOfProducts():\n products.append(\n (str(p.getStoichiometry()) + \" \" + p.getSpecies()).replace(\"1.0 \", \"\")\n )\n expr = libsbml.formulaToString(v.getKineticLaw().getMath())\n local_params = {}\n local_ids = []\n local_values = []\n for k in v.getKineticLaw().getListOfParameters():\n local_ids.append(k.getId())\n local_values.append(k.getValue())\n local_params = dict(zip(local_ids, local_values))\n if len(local_params) == 0:\n local_params = \"del\"\n command_list.append(\n rxn_template % (str(reactants), str(products), expr, str(local_params), vid)\n )\n\n for e in events:\n persistent = True\n initialValue = False\n priority = \"0\"\n eid = e.getId()\n if len(eid) == 0 or (eid[0] == \"e\" and eid[1 : len(eid)].isdigit()):\n eid = \"del\"\n if doc.getLevel() == 3:\n persistent = e.getTrigger().getPersistent()\n initialValue = e.getTrigger().getInitialValue()\n priority = e.getPriority()\n if isinstance(priority, libsbml.Priority):\n priority = libsbml.formulaToL3String(priority.getMath())\n else:\n priority = \"0\"\n tri = libsbml.formulaToL3String(e.getTrigger().getMath())\n did = e.getDelay()\n if isinstance(did, libsbml.Delay):\n delay = libsbml.formulaToL3String(did.getMath())\n else:\n delay = \"0\"\n assigns = e.getListOfEventAssignments()\n var = []\n values = []\n for assign in assigns:\n var.append(assign.getVariable())\n values.append(libsbml.formulaToL3String(assign.getMath()))\n assigns = dict(zip(var, values))\n\n event_list = [persistent, initialValue, priority, delay]\n for i in range(0, 4):\n if event_list[i] == event_defaults[i]:\n event_list[i] = \"del\"\n\n command_list.append(\n event_template\n % (\n tri,\n str(assigns),\n event_list[0],\n event_list[1],\n event_list[2],\n event_list[3],\n eid,\n )\n )\n\n for r in rules:\n rid = r.getId()\n print(\"rid\")\n # if rid[0] == 'Rate' and rid[1:len(rid)].isdigit():\n # rid = 'del'\n sym = r.getVariable()\n math = libsbml.formulaToL3String(r.getMath())\n if r.getTypeCode() == libsbml.SBML_ASSIGNMENT_RULE:\n command_list.append(assignrule_template % (sym, math))\n elif r.getTypeCode() == libsbml.SBML_RATE_RULE:\n command_list.append(raterule_template % (sym, math, rid))\n else:\n pass\n\n for i in inits:\n sym = i.getSymbol()\n math = libsbml.formulaToL3String(i.getMath())\n command_list.append(initassign_template % (sym, math))\n\n commands = \"\\n\".join(command_list)\n commands = sub(r\"\\w+='?del'?(?=[,)])\", \"\", commands)\n commands = sub(r\"\\((, )+\", \"(\", commands)\n commands = sub(r\"(, )+\\)\", \")\", commands)\n commands = sub(\"(, )+\", \", \", commands)\n return commands" ]
[ "0.5886545", "0.58752614", "0.585057", "0.57920617", "0.5724532", "0.5722173", "0.56763613", "0.5671061", "0.5638687", "0.55701363", "0.556386", "0.55578065", "0.5548401", "0.553911", "0.5494058", "0.54860514", "0.54860514", "0.54849756", "0.5464117", "0.5413439", "0.54076755", "0.54031336", "0.54031336", "0.5374271", "0.5348807", "0.5322103", "0.5316241", "0.52586365", "0.52364296", "0.5235198", "0.5228953", "0.5226991", "0.52051866", "0.52049327", "0.5195163", "0.51752794", "0.5174328", "0.5171252", "0.5169882", "0.51542693", "0.5150888", "0.51396495", "0.51391166", "0.5134635", "0.5104629", "0.5099643", "0.50930864", "0.5082493", "0.5079803", "0.5061977", "0.50619465", "0.5059345", "0.50521684", "0.50426644", "0.5041504", "0.5037689", "0.5028599", "0.50269675", "0.5025793", "0.5025505", "0.50250864", "0.5023167", "0.50111926", "0.5010429", "0.5008809", "0.500294", "0.49954662", "0.4989152", "0.49837393", "0.4982253", "0.4981953", "0.4981641", "0.49775293", "0.49611625", "0.49569213", "0.49565485", "0.4956348", "0.49557656", "0.49494773", "0.4944693", "0.49444658", "0.4939192", "0.49388948", "0.49386895", "0.49371225", "0.49336946", "0.49301895", "0.4929361", "0.49282673", "0.49266082", "0.49122372", "0.490941", "0.4908356", "0.48995113", "0.48971736", "0.4885159", "0.48843586", "0.4883337", "0.48813885", "0.48780304" ]
0.48852018
95
Test that a dotted path is properly converted to a file address.
def test_get_file_path(self): path = self.corpus.get_file_path('chatterbot.corpus.english') self.assertIn( os.path.join('chatterbot_corpus', 'data', 'english'), path )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_pathname(self):\n self.assertTrue(Util.is_pathname_valid('./myrandomvalidfilename.dat'))\n self.assertTrue(Util.is_pathname_valid('myrandomvalidfilename.dat'))", "def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)", "def test_io_path_string(args, string):\n assert deepr.io.Path(*args) == string", "def test_get_absolute_path():\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"../foo\"), \"/bar/foo\")\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"/foo\"), \"/foo\")", "def test_path(self):\n self.assertEqual(self.ftp_case.path, '/rfc/rfc1808.txt')\n self.assertEqual(self.ldap_case.path, '/c=GB')\n self.assertEqual(self.news_case.path, \n 'comp.infosystems.www.servers.unix')\n self.assertEqual(self.telnet_case.path, '/')\n self.assertEqual(self.urn_case.path, \n 'oasis:names:specification:docbook:dtd:xml:4.1.2')", "def test_sanitized_filename(self):\n value = \"/absolute/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"\n\n value = \"../relative/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"", "def test_realpath(self):\n print real_upath(\"ref with space\")\n self.assertTrue(real_upath(\"ref with space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_upath(\"ref\\ with\\ space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_ppath(\"ref with space\").endswith(\"ref with space\"))\n self.assertTrue(real_ppath(\"ref\\ with\\ space\").endswith(\"ref with space\"))", "def test_invalid_pathname(self):\n self.assertFalse(Util.is_pathname_valid(''))", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None", "def _get_fullpath(self, address):\n address = os.path.abspath(address)\n if len(address) < 4 or address[-4:] != \".dta\":\n address = address + \".dta\"\n return address", "def abspath(path: str) -> str:\n pass", "def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")", "def test_fpath():\n\n assert fpath(None, 'data.json') == 'data.json'\n assert fpath('/path/', 'data.json') == '/path/data.json'\n assert fpath(Path('/path/'), 'data.json') == '/path/data.json'", "def test_remove_dot_segments():\n assert (normalize_url(\"http://www.example.com/../a/b/../c/./d.html\") ==\n \"http://www.example.com/a/c/d.html\")", "def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"//foo/bar//baz/\"), \"/foo/bar/baz/\")\n self.assertEqual(normalize_path(\"//f%20oo/bar\"), \"/f oo/bar\")", "def test_relativise_src_under():\n src = pathlib.Path(\"/tmp/foo/bar/baz/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../../dst.txt\")", "def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"", "def stringyfy(path):\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')", "def test_local_path():\n URL_PATH = \"http://www.google.com\"\n URL_PATH1 = \"www.google.com\"\n LOCAL_PATH = \"tests/index.html\"\n\n assert URL_PATH == is_local(URL_PATH)\n assert \"file\" in is_local(os.path.abspath(LOCAL_PATH))\n assert URL_PATH1 == is_local(URL_PATH1)", "def test_get_contracts_addresses_bad_path():\n addresses = ContractHandler.get_contracts_addresses(\n _NETWORK, address_file=\"/bin/foo/bar/tralala\"\n )\n assert addresses is None", "def test_path(self, fs_path, fs):\n assert fs.path == fs_path", "def test_relativise_dst_under():\n src = pathlib.Path(\"/tmp/foo/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/bar/baz/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"bar/baz/dst.txt\")", "def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)", "def path_validate(path):\n # functionality to be added later\n return path", "def format_path (in_path):\n return os.path.realpath(os.path.expanduser(in_path))", "def validate_safe_path(value):\n base = \"/input/\"\n\n try:\n new_path = safe_join(base, value)\n except SuspiciousFileOperation:\n raise ValidationError(\"Relative paths are not allowed.\")\n\n valid_path = new_path[len(base) :]\n\n if value != valid_path:\n raise ValidationError(f\"Invalid file path, should be {valid_path}.\")", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def test_accepts_filenames_with_spaces(tmp_path):\n os.chdir(tmp_path)\n Path(\"foo bar\").write_text(\"foo bar stuff\")\n Path(\"baz\").write_text(\"baz stuff\")\n assert _ls_visiblefile_paths() == [\n str(Path(\"baz\").resolve()),\n str(Path(\"foo bar\").resolve()),\n ]\n assert Path.cwd() == Path(tmp_path)", "def isabs(path):\n # If detected as storage path, it is an absolute path.\n return True", "def parse_file_uri(path):\n p = urlparse.urlparse(path)\n\n if p.scheme in [\"https\", \"http\"]:\n return True, path\n elif p.scheme == \"file\":\n # url to path name, i.e: convert %20 to space\n path = urllib.url2pathname(p.path)\n return False, os.path.abspath(os.path.join(p.netloc, path))\n else:\n # treat as a local file\n return False, urllib.unquote(path)", "def is_path(self, s):\n return True", "def test_verify_path_2(self):\n result = basic.verify_path(str(self.test_filepath1) + \"abcxyz\", \"file\")\n self.assertFalse(result)", "def test_get_file_with_dots_extension(self) -> None:\n path = \"/home/user/file.name.ext2\"\n result = get_file_extension(path)\n self.assertEqual(result, \"ext2\")", "def test_client_id_path() -> None:\n assert indieauth._parse_client_id(\"http://ex.com\").path == \"/\"\n assert indieauth._parse_client_id(\"http://ex.com/hello\").path == \"/hello\"\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello/.world\").path == \"/hello/.world\"\n )\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello./.world\").path\n == \"/hello./.world\"\n )\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/.\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/./yo\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/../yo\")", "def resolved_path(path):\n path = os.path.abspath(path)\n elements = path_elements(path)\n result = \"\"\n for element in elements:\n segment = element\n segment_path = os.path.join(result, segment)\n if os.path.islink(segment_path):\n segment = os.readlink(segment_path)\n result = os.path.join(result, segment)\n result = os.path.normpath(result)\n return result", "def testIsFile(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingFilePath=P(self.existingFilePathStr)\r\n nonExistingFilePath=P(self.nonExistingFilePathStr)\r\n existingValidFileSymlinkPath=P(self.existingValidSymlinkFilePathStr)\r\n existingInvalidFileSymlinkPath=\\\r\n P(self.existingInvalidSymlinkFilePathStr)\r\n existingDirPath=P(self.existingDirPathStr)\r\n\r\n\r\n # 1\r\n self.assertEquals(existingFilePath.isFile(),True,\r\n '%r is a file'%str(existingFilePath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingFilePath.isFile(),False,\r\n 'File %r does not exist'%str(nonExistingFilePath))\r\n\r\n # 3\r\n self.assertEquals(existingValidFileSymlinkPath.isFile(),True,\r\n '%r is a file'%str(existingValidFileSymlinkPath))\r\n\r\n # 4\r\n self.assertEquals(existingInvalidFileSymlinkPath.isFile(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidFileSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingDirPath.isFile(),False,\r\n '%r is a dir'%str(existingDirPath))", "def test_pathlib_obj(self):\n \"\"\"\n We do this because pygame functions internally use pg_EncodeString\n to decode the filenames passed to them. So if we test that here, we\n can safely assume that all those functions do not have any issues\n with pathlib objects\n \"\"\"\n encoded = encode_string(pathlib.PurePath(\"foo\"), \"utf-8\")\n self.assertEqual(encoded, b\"foo\")\n\n encoded = encode_string(pathlib.Path(\"baz\"))\n self.assertEqual(encoded, b\"baz\")", "def test_verify_path2_8(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_local_filepath_helper():\n expected_local_filepath = TEST_LOCAL_CONFIG_PATH.replace('.cfg', '_local.cfg')\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH) == TEST_LOCAL_CONFIG_PATH\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH, True) == expected_local_filepath", "def containing_path(path: Union[str, os.PathLike]) -> str:\n if not path:\n return str(path)\n url = urlparse(str(path))\n if url.scheme:\n if url.path:\n return os.path.dirname(path)\n return url.scheme + \"://\"\n return os.path.dirname(os.path.realpath(path))", "def is_posix_path3(my_path):\n return \"/\" in str(my_path)", "def test_verify_path2_10(self):\n result, msg = basic.verify_path2(self.file, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def is_absolute_path(path: str) -> bool:\n # This is a rather weak test, may be enhanced if desired\n return \"//\" in path \\\n or \":\" in path \\\n or path.startswith(\"/\")", "def test_import_string_invalid_path(self):\n invalid_path = 'some invalid module path'\n with pytest.raises(ImportError) as error:\n utils.import_string(invalid_path)\n assert '{} doesn\\'t look like a module path'.format(\n invalid_path) == str(error.value)", "def is_posix_path2(my_path):\n return \"/\" in str(my_path)", "def check_address(self):\n if RE_PATH.search(self.address) is None:\n raise ValueError(\"{} isn't an acceptable path\".format(repr(self.address)))", "def test_set_path_1(self):\n self.file.touch()\n # Since using tempfile, there is an added quirk.\n # the tempfile path may be a symlink, so passing it through set path\n # will resolve the symlink, changing the path, and breaking the test.\n self.file = self.file.resolve()\n output = basic.set_path(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertIsInstance(output, Path)\n with self.subTest():\n self.assertEqual(str(self.file), str(output))", "def realpath(path: str) -> str:\n pass", "def test_verify_path2_4(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_links_have_path(self):\n link = DownloadLink(path=\"/home/not/real\")\n self.assertEqual(link.path, \"/home/not/real\")", "def test_fspath(self, env: yaenv.Env):\n from os import fspath\n from filecmp import cmp\n assert fspath(env) == 'tests/.env'\n assert cmp(env, 'tests/.env')", "def check_path(p, cwd):\n if not path.isabs(p):\n p = path.normpath(path.join(cwd,p))\n return p", "def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False", "def test_append_slash():\n assert normalize_url(\"http://example.com\") == \"http://example.com/\"", "def test_verify_path2_9(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_verify_path2_7(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def is_absolute_url(path):\n return path.startswith(\"http\")", "def test_find_path_bi():\n assert True", "def test_get_contracts_addresses_good_path_custom_network(tmp_path):\n # tmp_path:pathlib.Path is special pytest feature\n\n # create & fill test file\n d = tmp_path / \"subdir\"\n d.mkdir()\n address_file = d / \"address.json\"\n address_file.write_text('{\"my_custom_network\" : \"myvals\"}')\n\n # the main test\n addresses = ContractHandler.get_contracts_addresses(\n network=\"my_custom_network\", address_file=address_file\n )\n assert addresses == \"myvals\"", "def ensure_file_abs_path_valid(file_abs_path: Text) -> Text:\n project_meta = load_project_meta(file_abs_path)\n raw_abs_file_name, file_suffix = os.path.splitext(file_abs_path)\n file_suffix = file_suffix.lower()\n\n raw_file_relative_name = convert_relative_project_root_dir(raw_abs_file_name)\n if raw_file_relative_name == \"\":\n return file_abs_path\n\n path_names = []\n for name in raw_file_relative_name.rstrip(os.sep).split(os.sep):\n\n if name[0] in string.digits:\n # ensure file name not startswith digit\n # 19 => T19, 2C => T2C\n name = f\"T{name}\"\n\n if name.startswith(\".\"):\n # avoid \".csv\" been converted to \"_csv\"\n pass\n else:\n # handle cases when directory name includes dot/hyphen/space\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_\")\n\n path_names.append(name)\n\n new_file_path = os.path.join(\n project_meta.RootDir, f\"{os.sep.join(path_names)}{file_suffix}\"\n )\n return new_file_path", "def verify_path(path):\n if path is None:\n sys.exit('Program terminated. You must specify a correct path.')\n path = Path(path)\n assert path.exists(), f'The specified path was not found: {path}.'\n return path", "def test_get_native_path(self) -> None:\n import tempfile\n f, filename = tempfile.mkstemp(text=True)\n os.close(f)\n data = '1234567890 ' + filename\n try:\n with open(filename, 'w') as f:\n f.write(data)\n with open(get_native_path(filename), 'r') as f:\n assert f.read() == data\n finally:\n try:\n os.unlink(filename)\n except OSError:\n pass", "def check_relpath(path1, path2, exception=True):\r\n p1 = op.normpath(path1)\r\n p2 = op.normpath(op.join(path1, path2))\r\n if op.relpath(p1, p2).endswith(op.basename(p1)):\r\n if exception:\r\n raise ValueError(\"Invalid path '%s'\" % path2)\r\n return False\r\n return p2", "def validate_short_path(short_path):", "def test_verify_path_5(self):\n result = basic.verify_path(str(self.test_directory1))\n self.assertTrue(result)", "def test_github_path_purepath():\n p = github_api.GithubPath('/tensorflow/datasets/tree/master/')\n sub_p = p / 'some_folder'\n assert isinstance(sub_p, github_api.GithubPath)\n assert str(p) == '/tensorflow/datasets/tree/master'\n assert p == github_api.GithubPath.from_repo('tensorflow/datasets')", "def validated_path(basepath, env = None, *path):\n if basepath is not None:\n result = os.path.realpath(os.path.join(os.path.expanduser(basepath), *path))\n\n if env is not None and not os.path.isdir(result):\n env.warn(result + ' not found.')\n\n return result\n else:\n raise ValueError", "def test_split_fullpath_with_route_domain():\n\n # Expected input must have route specified, otherwise reject\n tests = [\n [\"/Partition/1.2.3.4%0:80\", \"/Partition\", \"1.2.3.4\", 0, 80],\n [\"/Part/Folder/1.2.3.4%1:443\", \"/Part/Folder\", \"1.2.3.4\", 1, 443],\n [\"/Part/::ffff:0:0%2.8080\", \"/Part\", \"::ffff:0:0\", 2, 8080],\n [\"/Part/1.2.3.4:8080\", None, None, None, None],\n [\"/Part/::ffff:0:0.8080\", None, None, None, None]\n ]\n\n for test in tests:\n results = split_fullpath_with_route_domain(test[0])\n assert results[0] == test[1]\n assert results[1] == test[2]\n assert results[2] == test[3]\n assert results[3] == test[4]", "def is_valid_path(input_path):\n if not os.path.exists(input_path):\n print('\\'{}\\' is not a valid path.'.format(input_path))\n exit(1)\n return input_path", "def test_get_contracts_addresses_good_path_use_network_alias(tmp_path):\n assert ContractHandler.network_alias == {\"ganache\": \"development\"}\n\n # create & fill test file\n d = tmp_path / \"subdir\"\n d.mkdir()\n address_file = d / \"address.json\"\n address_file.write_text('{\"development\" : \"myvals\"}') # not \"ganache\"\n\n # the main test\n addresses = ContractHandler.get_contracts_addresses(\n network=\"ganache\", address_file=address_file\n )\n assert addresses == \"myvals\"", "def testIsAbsolute(self):\r\n data={\r\n # 1\r\n 'relative':['dir/file',False],\r\n # 2\r\n 'absoluteRoot':['/dir/file',True],\r\n # 3\r\n # FIX:'absoluteHome':['~/file',True]\r\n # 4\r\n # FIX:'absoluteUser':['~ufsiTest/file',True]\r\n }\r\n\r\n for k in data.iterkeys():\r\n r1=ufsi.NativeUnixPath(data[k][0]).isAbsolute()\r\n r2=data[k][1]\r\n self.assertEquals(r1,r2,\r\n '%s: isAbsolute result was %r but should be %r'\r\n %(k,r1,r2))", "def test_host_path(self):\n url = create_url(\n host=\"www.example.com\", path=\"path/to/resource\", scheme_no_ssl=\"http\"\n )\n self.assertEqual(url, \"http://www.example.com/path/to/resource\")", "def nettest_to_path(path, allow_arbitrary_paths=False):\n if allow_arbitrary_paths and os.path.exists(path):\n return path\n\n fp = FilePath(config.nettest_directory).preauthChild(path + '.py')\n if fp.exists():\n return fp.path\n else:\n raise e.NetTestNotFound(path)", "def is_filename_safe(value):\n return value == str_to_filename(value)", "def test_path_percent_encoding():\n assert (normalize_url(\"http://example.com/hello world{}\") ==\n \"http://example.com/hello%20world%7B%7D\")", "def addresses_in_spec_path(self, spec_path):", "def is_posix_path(my_path: str) -> bool:\n return \"/\" in str(my_path)", "def test_invalid_path(self):\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, 'foo')", "def test_path(self):\n options = ControlOptions()\n options.parseOptions([b\"--data-path\", b\"/var/xxx\"])\n self.assertEqual(options[\"data-path\"], FilePath(b\"/var/xxx\"))", "def win_path_check(path):\n if IS_WIN:\n return path.replace(\"\\\\\", \"/\").replace(\":\", \"\\\\:\")\n return path", "def getFulldirAddress(x):\n x_first10 = x[:10]\n if x_first10.find(\":\\\\\") >=0 or x_first10.startswith(\"/\") or x_first10.find(\":/\") >=0:\n return x\n else:\n return os.path.join(os.getcwd(),x)", "def test_s3uri_is_valid(path) -> bool:\n expected = path.startswith(\"s3://\")\n assert S3URI(path).is_valid == expected\n assert not expected or type(AutoURI(path)) == S3URI", "def test_verify_path2_3(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_set_path_4(self, verify_path2_mock):\n test_file = Path(\"/dir1/dir2/../file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(\"/dir1/file.txt\")\n self.assertEqual(output, exp)", "def test_escape_argument_simple_path():\n encoded = win_functions.escape_argument(\"C:\\\\some\\\\path\")\n assert encoded == \"C:\\\\some\\\\path\"", "def is_file_o(value):\n if not (type(value) is str and os.path.split(value)[0]):\n return False\n else:\n return True", "def test_unicode_path():\n assert (normalize_url(\"http://example.com/résumé\") ==\n \"http://example.com/r%C3%A9sum%C3%A9\")", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def testNoForwardSlashOrAt(self, illegal_char):\n self.assertRaises(ValueError, dicom_path.Path, 'project%cid' % illegal_char,\n 'l', 'd', 's')\n self.assertRaises(ValueError, dicom_path.Path, 'p',\n 'locat%cion' % illegal_char, 'd', 's')\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l',\n 'data%cset' % illegal_char, 's')\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd',\n 'st%core' % illegal_char)\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's',\n '1.2%c3' % illegal_char)\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', '1.2.3',\n '4.5%c6' % illegal_char)\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', '1.2.3',\n '4.5.6', '7.8%c9' % illegal_char)", "def handle_dotted_path(\n value: str, author: str\n) -> Tuple[List[str], Path, ConfigLoader, Optional[ComponentId]]:\n parts = value.split(\".\")\n\n root = parts[0]\n if root not in ALLOWED_PATH_ROOTS:\n raise AEAException(\n \"The root of the dotted path must be one of: {}\".format(ALLOWED_PATH_ROOTS)\n )\n\n if (\n len(parts) < 2\n or parts[0] == AGENT\n and len(parts) < 2\n or parts[0] == VENDOR\n and len(parts) < 5\n or parts[0] != AGENT\n and len(parts) < 3\n ):\n raise AEAException(\n \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n # if the root is 'agent', stop.\n if root == AGENT:\n resource_type_plural = AGENTS\n path_to_resource_configuration = Path(DEFAULT_AEA_CONFIG_FILE)\n json_path = parts[1:]\n component_id = None\n elif root == VENDOR:\n # parse json path\n resource_author = parts[1]\n resource_type_plural = parts[2]\n resource_name = parts[3]\n\n # extract component id\n resource_type_singular = resource_type_plural[:-1]\n try:\n component_type = ComponentType(resource_type_singular)\n except ValueError as e:\n raise AEAException(\n f\"'{resource_type_plural}' is not a valid component type. Please use one of {ComponentType.plurals()}.\"\n ) from e\n component_id = ComponentId(\n component_type, PublicId(resource_author, resource_name)\n )\n\n # find path to the resource directory\n path_to_resource_directory = (\n Path(\".\") / VENDOR / resource_author / resource_type_plural / resource_name\n )\n path_to_resource_configuration = (\n path_to_resource_directory\n / RESOURCE_TYPE_TO_CONFIG_FILE[resource_type_plural]\n )\n json_path = parts[4:]\n if not path_to_resource_directory.exists():\n raise AEAException( # pragma: nocover\n \"Resource vendor/{}/{}/{} does not exist.\".format(\n resource_author, resource_type_plural, resource_name\n )\n )\n else:\n # navigate the resources of the agent to reach the target configuration file.\n resource_type_plural = root\n resource_name = parts[1]\n\n # extract component id\n resource_type_singular = resource_type_plural[:-1]\n component_type = ComponentType(resource_type_singular)\n resource_author = author\n component_id = ComponentId(\n component_type, PublicId(resource_author, resource_name)\n )\n\n # find path to the resource directory\n path_to_resource_directory = Path(\".\") / resource_type_plural / resource_name\n path_to_resource_configuration = (\n path_to_resource_directory\n / RESOURCE_TYPE_TO_CONFIG_FILE[resource_type_plural]\n )\n json_path = parts[2:]\n if not path_to_resource_directory.exists():\n raise AEAException(\n \"Resource {}/{} does not exist.\".format(\n resource_type_plural, resource_name\n )\n )\n\n config_loader = ConfigLoader.from_configuration_type(resource_type_plural[:-1])\n return json_path, path_to_resource_configuration, config_loader, component_id", "def test_set_path_3(self, verify_path2_mock):\n home = Path(\"~\")\n home = home.expanduser()\n test_file = Path(\"~/path/to/file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(home, \"path/to/file.txt\")\n self.assertEqual(output, exp)", "def nt_path_to_posix_path(path):\r\n path = path.replace(\"\\\\\", \"/\")\r\n parts = path.split(\":\")\r\n if len(parts) > 1:\r\n return \"/\" + parts[0].lower() + parts[1]\r\n return path", "def any_to_uri(uri_or_path):\n if os.path.splitdrive(uri_or_path)[0]:\n return path_to_file_uri(uri_or_path)\n u = urlparse(uri_or_path)\n return uri_or_path if u.scheme else path_to_file_uri(uri_or_path)", "def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))", "def test_BenchmarkSuite_invalid_path_access(benchmark_suite: typing.Callable):\n bs = benchmark_suite()\n with test.Raises(TypeError):\n _ = bs.path", "def testFromStringInvalid(self):\n self.assertRaises(ValueError, dicom_path.FromString, 'invalid_path')", "def test_uris(self):\r\n invariant = [ \r\n u\"ftp://ftp.is.co.za/rfc/rfc1808.txt\",\r\n u\"http://www.ietf.org/rfc/rfc2396.txt\",\r\n u\"ldap://[2001:db8::7]/c=GB?objectClass?one\",\r\n u\"mailto:[email protected]\",\r\n u\"news:comp.infosystems.www.servers.unix\",\r\n u\"tel:+1-816-555-1212\",\r\n u\"telnet://192.0.2.16:80/\",\r\n u\"urn:oasis:names:specification:docbook:dtd:xml:4.1.2\" ]\r\n for uri in invariant:\r\n self.assertEqual(uri, iri2uri(uri))", "def tests_with_prefix(self):\n\n for domain in self.domains:\n expected = domain\n\n data = f\"0.0.0.0 {domain}\"\n actual = File(data).get_converted()\n\n self.assertEqual(expected, actual)\n\n for domain in self.domains:\n expected = domain\n\n data = f\"127.0.0.1 {domain}\"\n actual = File(data).get_converted()\n\n self.assertEqual(expected, actual)" ]
[ "0.64686894", "0.63700265", "0.63254786", "0.6183657", "0.6174094", "0.6157679", "0.6133528", "0.60470575", "0.6045977", "0.6009044", "0.5981257", "0.5977913", "0.5938566", "0.5925104", "0.5923361", "0.5874661", "0.5874289", "0.5859754", "0.58453584", "0.5831364", "0.58010995", "0.57604563", "0.5756382", "0.5739845", "0.5729875", "0.5720218", "0.5709161", "0.5707278", "0.5702234", "0.5689992", "0.56563306", "0.5651466", "0.5634203", "0.5631974", "0.56315684", "0.5615166", "0.5600633", "0.55977035", "0.557639", "0.55654734", "0.5559866", "0.5543142", "0.5541135", "0.5535919", "0.55347466", "0.5533372", "0.552693", "0.55252314", "0.5523294", "0.5521299", "0.5516545", "0.5512689", "0.5501062", "0.5500022", "0.54899985", "0.54892945", "0.548658", "0.54794836", "0.5461091", "0.54507995", "0.54419655", "0.54419225", "0.54399", "0.5432791", "0.542934", "0.5429287", "0.54222023", "0.54082924", "0.54052466", "0.53963107", "0.5396005", "0.5393856", "0.5392303", "0.5390234", "0.53848875", "0.5384373", "0.538391", "0.5383767", "0.5379618", "0.5378148", "0.5377083", "0.53770113", "0.5375081", "0.5374692", "0.5374463", "0.53743804", "0.53696907", "0.5366632", "0.5365229", "0.53650635", "0.53552634", "0.5354639", "0.53531414", "0.5348111", "0.5344765", "0.53417695", "0.5341409", "0.53412205", "0.5336989", "0.5334766", "0.5329826" ]
0.0
-1
Read and convert to example, returns None if no data is available.
def read_and_convert(self, result_file): if self._example_pointer == self._num_examples: return None path_to_image_file = self._path_to_image_files[self._example_pointer] # Get image index index = int(path_to_image_file.split('/')[-1].split('.')[0]) self._example_pointer += 1 label_of_digits = result_file[index].strip().split(' ') # for digits: 10 represents no digit, for letters: 0 represents no letter digits = [10, 10, 10, 10] letters = [0, 0, 0, 0, 0] idd = 0 idl = 0 for i in range(len(label_of_digits)): if i in [0, 4, 5, 6]: digits[idd] = int(label_of_digits[i]) # label 10 is essentially digit zero idd += 1 if i in [1, 2, 3, 7, 8]: letters[idl] = int(label_of_digits[i]) idl += 1 image = Image.open(path_to_image_file) image = image.resize([96, 24]) image = np.array(image).tobytes() example = tf.train.Example(features=tf.train.Features(feature={ 'image': ExampleReader._bytes_feature(image), 'digits': tf.train.Feature(int64_list=tf.train.Int64List(value=digits)), 'letters': tf.train.Feature(int64_list=tf.train.Int64List(value=letters)) })) return example
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def example_to_data(self, example):\n raise NotImplementedError", "def _read_one_example(\n top_example_dir_name, full_storm_id_string, storm_time_unix_sec,\n source_name, radar_field_name, include_sounding):\n\n if source_name == radar_utils.GRIDRAD_SOURCE_ID:\n num_radar_rows = NUM_GRIDRAD_ROWS\n num_radar_columns = NUM_GRIDRAD_COLUMNS\n else:\n num_radar_rows = NUM_MYRORSS_ROWS\n num_radar_columns = NUM_MYRORSS_COLUMNS\n\n training_option_dict = dict()\n training_option_dict[trainval_io.RADAR_FIELDS_KEY] = [radar_field_name]\n training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL\n training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = (\n SOUNDING_FIELD_NAMES if include_sounding else None\n )\n training_option_dict[trainval_io.SOUNDING_HEIGHTS_KEY] = (\n SOUNDING_HEIGHTS_M_AGL\n )\n\n training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows\n training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns\n training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None\n training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME\n training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False\n training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None\n training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None\n training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False\n\n model_metadata_dict = {\n cnn.TRAINING_OPTION_DICT_KEY: training_option_dict,\n cnn.LAYER_OPERATIONS_KEY: None,\n }\n\n print(MINOR_SEPARATOR_STRING)\n\n example_dict = testing_io.read_predictors_specific_examples(\n top_example_dir_name=top_example_dir_name,\n desired_full_id_strings=[full_storm_id_string],\n desired_times_unix_sec=numpy.array([storm_time_unix_sec], dtype=int),\n option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],\n layer_operation_dicts=None\n )\n\n predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]\n sounding_pressure_matrix_pa = example_dict[\n testing_io.SOUNDING_PRESSURES_KEY]\n\n if sounding_pressure_matrix_pa is None:\n sounding_pressures_pa = None\n else:\n sounding_pressures_pa = sounding_pressure_matrix_pa[0, ...]\n\n return predictor_matrices, model_metadata_dict, sounding_pressures_pa", "def read(self, source):\n raise NotImplementedError( 'Needs implementation' )", "def read_example(self, index):\n if (index < 0 or index >= len(self._data)):\n raise ValueError(\"Index must be from 0 (inclusive) to number of examples (exclusive).\")\n\n t = self._data[index][1]\n (X, header) = self._read_timeseries(self._data[index][0], t)\n y = self._data[index][2]\n\n return (X, t, y, header)", "def read_example(self, index):\n if (index < 0 or index >= len(self._data)):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n \n t = self._data[index][1]\n (X, header) = self._read_timeseries(self._data[index][0], t)\n y = self._data[index][2]\n\n return (X, t, y, header)", "def _read_data(self):", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)", "def loadExample(self, exID):\r\n return \"\"", "def _get_next_example(self):\n\t\tif self._offset >= len(self._metadata):\n\t\t\tself._offset = 0\n\t\t\tnp.random.shuffle(self._metadata)\n\n\t\tmeta = self._metadata[self._offset]\n\t\tself._offset += 1\n\n\t\ttext = meta[1]\n\n\t\tinput_data = np.asarray(text, dtype=np.int32)\n\t\ttarget = meta[0].astype(np.float32)\n\t\treturn (input_data, target)", "def read_example(self, index):\n if (index < 0 or index >= len(self._data)):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n (X, header) = self._read_timeseries(self._data[index][0])\n y = self._data[index][2]\n\n return (X, self._data[index][1], y, header)", "def read_data(self):\n raise NotImplementedError", "def example(self):\n result = getattr(self, '_example', None)\n if result is None:\n # No example batch was found, so get one from the `.train` dataset\n result = next(iter(self.train))\n # And cache it for next time\n self._example = result\n return result", "def parse_example(example):\n metadata, data = example.strip().split('\\n\\n')\n metadata = pytoml.loads(metadata)\n metadata['success'] = metadata['result'] == 'success'\n metadata['name'] = re.sub(r'[ -]', '_', metadata['name'].lower())\n del metadata['result']\n return Example(data=data.strip(), **metadata)", "def read_one(example_object_id):\n # Get the example_object requested\n example_object = ExampleObject.query.filter(ExampleObject.example_object_id == example_object_id).one_or_none()\n\n # Did we find a example_object?\n if example_object is not None:\n\n # Serialize the data for the response\n example_object_schema = ExampleObjectSchema()\n data = example_object_schema.dump(example_object)\n return data\n\n # Otherwise, nope, didn't find that example_object\n else:\n abort(\n 404,\n \"ExampleObject not found for Id: {example_object_id}\".format(example_object_id=example_object_id),\n )", "def import_sample(infile):\n deserialized = None\n with open(infile, 'r') as file_handle:\n deserialized = json.load(file_handle, object_hook=decode_sample)\n return deserialized", "def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))", "def get_input_data(sample):\n with checkpoints.query_portal.get(sample=sample).output[0].open() as f:\n data = json.read(f)\n return data", "def readOneData(self):\n\t\tpass", "def test_default_read():\n # If new data formats are added to preprocess, they need to be tested\n tested_data_formats = [\"ASCII\", \"SU\", \"SAC\"]\n\n preprocess = Default()\n assert(set(tested_data_formats) ==\n set(preprocess._obs_acceptable_data_formats))\n\n st1 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.semd\"),\n data_format=\"ascii\")\n\n st2 = preprocess.read(os.path.join(TEST_DATA, \"Uy_file_single_d.su\"),\n data_format=\"su\")\n\n st3 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.sac\"),\n data_format=\"sac\")\n\n assert(st1[0].stats.npts == st2[0].stats.npts)\n assert(st3[0].stats.npts == st2[0].stats.npts)", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\"Index must be from 0 (inclusive) to number of examples (exclusive).\")\n\n name = self._data[index][0]\n t = self._data[index][1]\n y = self._data[index][2]\n (X, header) = self._read_timeseries(name, t)\n\n return {\"X\": X,\n \"t\": t,\n \"y\": y,\n \"header\": header,\n \"name\": name}", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n name = self._data[index][0]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X,\n \"t\": self._data[index][1],\n \"ihm\": self._data[index][2],\n \"los\": self._data[index][3],\n \"pheno\": self._data[index][4],\n \"decomp\": self._data[index][5],\n \"header\": header,\n \"name\": name}", "def read(self):\n raise NotImplementedError", "def _convert_to_example(filename, subset_idx, left_image, right_image, disparity=None, mask=None):\n left_image_raw = left_image.tostring()\n right_image_raw = right_image.tostring()\n if disparity is not None:\n mask_raw = mask.tostring()\n disparity_raw = disparity.tostring()\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(left_image.shape[0]),\n 'width': _int64_feature(left_image.shape[1]),\n 'left_image_raw': _bytes_feature(left_image_raw),\n 'right_image_raw': _bytes_feature(right_image_raw),\n 'mask_raw': _bytes_feature(mask_raw),\n 'disparity_raw': _bytes_feature(disparity_raw),\n 'filename': _bytes_feature(tf.compat.as_bytes(filename)),\n 'subset_idx': _int64_feature(subset_idx)\n }))\n else:\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(left_image.shape[0]),\n 'width': _int64_feature(left_image.shape[1]),\n 'left_image_raw': _bytes_feature(left_image_raw),\n 'right_image_raw': _bytes_feature(right_image_raw),\n 'filename': _bytes_feature(tf.compat.as_bytes(filename)),\n 'subset_idx': _int64_feature(subset_idx)\n }))\n return example", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n name = self._data[index][0]\n t = self._data[index][1]\n y = self._data[index][2]\n (X, header) = self._read_timeseries(name, t)\n\n return {\"X\": X,\n \"t\": t,\n \"y\": y,\n \"header\": header,\n \"name\": name}", "def read_example(self, index):\n if (index < 0 or index >= len(self._data)):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n (X, header) = self._read_timeseries(self._data[index][0])\n y = self._data[index][1]\n\n return (X, self._period_length, y, header)", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n name = self._data[index][0]\n t = self._data[index][1]\n y = self._data[index][2]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X,\n \"t\": t,\n \"y\": y,\n \"header\": header,\n \"name\": name}", "def load_data():\n # Load in data\n sample_frame = energy_connection.sample_series('energy_readings')\n # TODO: Rooms/QL Extract\n sample_frame = energy_connection.sample_series('external_readings', append_frame=sample_frame)\n\n # To object\n sample = TimeSeriesSample(sample_frame, 'time')\n\n return sample", "def _decode_record(self, record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n # tf.logging.info(t)\n # t = tf.sparse.to_dense(t)\n # tf.logging.info(t.get_shape().as_list())\n # assert t.get_shape().as_list()[0] is not None\n example[name] = t\n \n del example[\"source_sos_ids\"]\n del example[\"source_sos_mask\"]\n\n return example", "def _read_data(self, path: str) -> T:\n raise NotImplementedError", "def convert_txt_to_data():\n pass", "def parse_single_example(serialized, features, name=None, example_names=None):\n return parse_single_example_v2(serialized, features, example_names, name)", "def read():\n # TODO", "def extract_info_from_sequence_example(path_to_tfrecord, from_scratch=False):\n assert(os.path.isfile(path_to_tfrecord))\n\n # The csv file containing extraction result\n output_dir = os.path.dirname(path_to_tfrecord)\n yaml_name = '.do_not_modify.dataset_info.yaml'\n csv_name = '.do_not_modify.example_info.csv'\n yaml_filepath = os.path.join(output_dir, yaml_name)\n csv_filepath = os.path.join(output_dir, csv_name)\n\n if not from_scratch \\\n and os.path.isfile(yaml_filepath) \\\n and os.path.isfile(csv_filepath):\n with open(yaml_filepath, 'r') as f:\n dataset_info = yaml.load(f)\n examples_info = pd.read_csv(csv_filepath)\n if verbose:\n print(\"Successfully loaded existing dataset info and examples info.\")\n return dataset_info, examples_info\n else: # from scratch\n if verbose:\n print(\"Extracting dataset info and examples info from scratch\",\n \"(by iterating the sequence examples)...\")\n\n # Some basic information on the dataset\n matrix_bundle_fields = []\n classes = set()\n # For now we only have dataset having 1 single bundle (e.g. no video+audio)\n num_bundles = 1\n num_classes = 0\n num_examples = 0\n sequence_size_max = 0\n sequence_size_min = 0\n sequence_size_median = 0\n is_sparse = None # True or False\n # Domain in ['audio_text_or_time_series', 'image_or_vector', 'video']\n # inferred_dataset_domain = None\n\n # Some basic information on each example\n num_timestamps = []\n num_features = []\n num_labels = []\n\n # Begin extracting\n counter = 0\n for se in tf.python_io.tf_record_iterator(path_to_tfrecord):\n sequence_example = tf.train.SequenceExample.FromString(se)\n\n context_feature = sequence_example.context.feature\n feature_lists_container = sequence_example.feature_lists.feature_list\n # Update num_labels\n labels = list(context_feature['label_index'].int64_list.value)\n num_labels.append(len(labels))\n\n if not matrix_bundle_fields:\n matrix_bundle_fields += list(feature_lists_container)\n else: # Make sure that fields name are consistent (coherent)\n assert(all([x in matrix_bundle_fields for x in feature_lists_container]))\n\n # Update classes\n classes = classes.union(set(labels))\n\n dense_key = '0_dense_input'\n sparse_value = '0_sparse_value'\n if dense_key in feature_lists_container:\n if is_sparse:\n raise ValueError(\"Inconsistent sparsity at index {}!\".format(counter))\n elif is_sparse is None:\n is_sparse = False\n key = dense_key\n elif sparse_value in feature_lists_container:\n if is_sparse is not None:\n if not is_sparse:\n raise ValueError(\"Inconsistent sparsity at index {}!\"\\\n .format(counter))\n else:\n is_sparse = True\n key = sparse_value\n\n # Update num_timestamps\n feature_list = feature_lists_container[key]\n num_timestamps.append(_len_feature_list(feature_list))\n # Update num_features\n feature_vec = _get_first_feature(feature_list)\n num_features.append(_len_feature(feature_vec))\n\n counter += 1\n\n examples_info = pd.DataFrame({'num_timestamps': num_timestamps,\n 'num_features': num_features,\n 'num_labels': num_labels})\n\n sequence_sizes = examples_info['num_timestamps']\n sequence_size_max = int(sequence_sizes.max())\n sequence_size_min = int(sequence_sizes.min())\n sequence_size_median = sequence_sizes.median()\n\n dataset_info = {'matrix_bundle_fields': matrix_bundle_fields,\n 'classes': list(classes),\n 'num_bundles': num_bundles,\n 'num_classes': len(classes),\n 'num_examples': examples_info.shape[0],\n 'sequence_size_max': sequence_size_max,\n 'sequence_size_min': sequence_size_min,\n 'sequence_size_median': sequence_size_median,\n 'is_sparse': is_sparse\n }\n examples_info.to_csv(csv_filepath, index=False)\n with open(yaml_filepath, 'w') as f:\n yaml.dump(dataset_info, f)\n return dataset_info, examples_info", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\n \"Index must be from 0 (inclusive) to number of lines (exclusive).\"\n )\n\n name = self._data[index][0]\n t = self._period_length\n y = self._data[index][1]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X, \"t\": t, \"y\": y, \"header\": header, \"name\": name}", "def readData(self, rawstring, datatype):\n data = rawstring[:-1] #remove last NULL byte\n\n if datatype == ERROR:\n if is_python3():\n data2 = data.tobytes()\n data = data2.decode('utf-8')\n return data\n elif datatype == STRING or datatype == DOUBLE:\n # try to convert data to a more appropriate type\n if is_python3():\n data2 = data.tobytes()\n data = data2.decode('utf-8')\n\n try:\n data = int(data)\n except:\n try:\n data = float(data)\n except:\n pass\n\n return data\n elif datatype == ASSOC:\n return rawtodictonary(rawstring)\n elif SpecArray.isArrayType(datatype):\n #Here we read cols and rows... which are *supposed* to be received in the header!!!\n #better approach: data contains this information (since it is particular to that data type)\n return SpecArray.SpecArray(rawstring, datatype, self.rows, self.cols)\n else:\n raise TypeError", "def read(self):", "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n name = self._data[index][0]\n t = self._period_length\n y = self._data[index][1]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X,\n \"t\": t,\n \"y\": y,\n \"header\": header,\n \"name\": name}", "def read_raw_data(self):\n # Must be set by the user\n raise Exception(\"not implemented\")", "def get_example(example_id=None):\n # This is all local, requires no external GPT3 calls\n # Return all examples\n if not example_id:\n return json.dumps(gpt.get_all_examples())\n\n example = gpt.get_example(example_id)\n if not example:\n return error(\"id not found\", HTTPStatus.NOT_FOUND)\n return json.dumps(example.as_dict())", "def read(self, src):\n self.read_mesh(src)\n self.read_data(src)", "def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res", "def load(cls, filename_or_stream: str | PathLike | TextIO) -> \"Experiment\":\n if isinstance(filename_or_stream, (str, PathLike)):\n p = Path(filename_or_stream)\n if not p.suffix:\n p = p.with_suffix(\".json\")\n s: TextIO = open(p, \"r\")\n close = True\n else:\n s = filename_or_stream\n close = False\n\n exp = cls._structure(json.load(s))\n if close:\n s.close()\n return exp", "def test_read_raw_supported(fname):\n read_raw(fname)\n read_raw(fname, verbose=False)\n raw = read_raw(fname, preload=True)\n assert \"data loaded\" in str(raw)", "def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n input_ids, segment_ids, input_mask = \\\n tokenizer.encode_text(text_a=example.text_a,\n text_b=example.text_b,\n max_seq_length=max_seq_length)\n\n label_id = label_map[example.label]\n\n # here we disable the verbose printing of the data\n if ex_index < 0:\n logging.info(\"*** Example ***\")\n logging.info(\"guid: %s\", example.guid)\n logging.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n logging.info(\"input_ids length: %d\", len(input_ids))\n logging.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n logging.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n logging.info(\"label: %s (id = %d)\", example.label, label_id)\n\n feature = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature", "def read_input_file(\n self, input_filename: str, infer: bool = False\n ) -> Union[List['BertExample'], Tuple[List['BertExample'], Tuple[str, str]]]:\n\n if not path.exists(input_filename):\n raise ValueError(\"Cannot find file: \" + input_filename)\n examples = [] # output list of BertExample\n hyps_refs = [] # output list of tuples (ASR-hypothesis, candidate_str)\n with open(input_filename, 'r') as f:\n for line in f:\n if len(examples) % 1000 == 0:\n logging.info(\"{} examples processed.\".format(len(examples)))\n if infer:\n parts = line.rstrip('\\n').split('\\t')\n hyp, ref, target, span_info = parts[0], parts[1], None, None\n if len(parts) == 4:\n target, span_info = parts[2], parts[3]\n try:\n example = self.build_bert_example(hyp, ref, target=target, span_info=span_info, infer=infer)\n except Exception as e:\n logging.warning(str(e))\n logging.warning(line)\n continue\n if example is None:\n logging.info(\"cannot create example: \")\n logging.info(line)\n continue\n hyps_refs.append((hyp, ref))\n examples.append(example)\n else:\n hyp, ref, target, semiotic_info = line.rstrip('\\n').split('\\t')\n try:\n example = self.build_bert_example(\n hyp, ref, target=target, span_info=semiotic_info, infer=infer\n )\n except Exception as e:\n logging.warning(str(e))\n logging.warning(line)\n continue\n if example is None:\n logging.info(\"cannot create example: \")\n logging.info(line)\n continue\n examples.append(example)\n logging.info(f\"Done. {len(examples)} examples converted.\")\n if infer:\n return examples, hyps_refs\n return examples", "def read_raw(self, name, source, test_data=''):\n self.m.path.assert_absolute(source)\n step_test_data = lambda: self.test_api.read_raw(test_data)\n result = self._run(name, ['copy', source, self.m.raw_io.output()],\n step_test_data=step_test_data)\n return result.raw_io.output", "def fixture_example_data():\n import_example_data()", "def _extract_sample(self, particle_class, sequence_number, file_time, regex,\n raw_data, timestamp):\n particle = None\n particle_dict = {}\n\n try:\n if regex is None or regex.match(raw_data):\n particle = particle_class(raw_data, sequence_number, file_time,\n internal_timestamp=timestamp,\n preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)\n\n # need to actually parse the particle fields to find out if there are errors\n particle_dict = particle.generate_dict()\n log.trace('Parsed particle: %s\\n\\n' % particle_dict)\n encoding_errors = particle.get_encoding_errors()\n if encoding_errors:\n log.warn(\"Failed to encode: %s\", encoding_errors)\n raise SampleEncodingException(\"Failed to encode: %s\" % encoding_errors)\n\n # Also catch any possible exceptions thrown from unpacking data\n except (RecoverableSampleException, SampleEncodingException, struct.error) as e:\n log.error(\"Sample exception detected: %s raw data: %r\", e, raw_data)\n if self._exception_callback:\n self._exception_callback(e)\n else:\n raise e\n\n # Do not return a particle if there are no values within\n if not particle_dict or not particle_dict.get(DataParticleKey.VALUES):\n return None\n\n return particle", "def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n image = train_preprocess_fn(image)\n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label", "async def get(self, kind=None):\n if kind is None:\n kind = self.data_type\n\n data = await self.read()\n if kind == DataType.TEXT:\n return data.decode()\n elif kind == DataType.BINARY:\n return data", "def _get_to_actual_data(raw):\n raise NotImplemented", "def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)", "def read_sample(myfilestr=\"MAY06001.SA3_CM_D545\"):\n detdata,metadata=data.readNCNRData(myfilestr) #note that it should be None for the default\n return SansData(detdata, metadata)", "def read(self):\n pass", "def __read_test_case(test_case):\n # type: (str) -> Optional[dict]\n with open('data/calculator.json') as json_file:\n data = json.load(json_file)\n return data[test_case] if data[test_case] else None", "def get_example_file(file_type: ExampleFileType) -> util.ExampleFile:\n with open(TEST_ROOT / file_type.value, \"rb\") as f:\n contents = f.read()\n return util.ExampleFile(contents, file_type.value)", "def test_predict_probe_data_2(self):\n reader = StringIO('1380:\\n804004\\n2369086\\n')\n writer = StringIO()\n predict_probe_data(reader, writer)\n self.assertEqual(writer.getvalue(), '1380:\\n3.5\\n3.4\\n')", "def read(cls, proto):\n pass", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example", "def test(self, example):\n #just get input; don't care about class label\n x = self._transformEx(example)[0] \n Y = self._propagateInput(x)[0]\n \n return Y", "def test_artemis_reader():\n _test_raw_reader(\n read_raw_artemis123,\n input_fname=short_hpi_1kz_fname,\n pos_fname=dig_fname,\n verbose=\"error\",\n )", "def test_read_data_augmented():\n data = read_data(\"src/tests/dataclassificationmodel/ferPlus_augment.pbz2\", True)\n assert len(data) == 7 and type(data) is tuple", "def parse_example(self, serialized_example):\n # Because of RaggedTensor specs, feature_specs can be a 2-level nested dict,\n # so have to wrap `tf.io.parse_single_example` between\n # `flatten_nest_dict`/`pack_as_nest_dict`.\n # {\n # 'video/image': tf.io.FixedLenSequenceFeature(...),\n # 'video/object/bbox': {\n # 'ragged_flat_values': tf.io.FixedLenSequenceFeature(...),\n # 'ragged_row_lengths_0', tf.io.FixedLenSequenceFeature(...),\n # },\n # }\n example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=self.flat_feature_specs,\n )\n example = utils.pack_as_nest_dict(example, self._nested_feature_specs)\n\n example = { # pylint:disable=g-complex-comprehension\n k: _deserialize_single_field(example_data, tensor_info)\n for k, (example_data, tensor_info) in utils.zip_dict(\n example, self._flat_example_specs\n )\n }\n # Reconstruct all nesting\n example = utils.pack_as_nest_dict(example, self._example_specs)\n return example", "def getData(self, data_source):\r\n if isinstance(data_source, str):\r\n try:\r\n return eval(data_source)\r\n except (NameError, SyntaxError):\r\n try:\r\n data_f = open(data_source, 'U')\r\n data = data_f.read()\r\n data_f.close()\r\n try:\r\n return eval(data)\r\n except (NameError, SyntaxError, TypeError):\r\n pass\r\n return data\r\n except (IOError, NameError, TypeError):\r\n pass\r\n # if we got here, either we didn't get a string or we couldn't read\r\n # the data source into any other kind of object\r\n return data_source", "def parse_single_example(serialized_example):\n feature_description = {\n \"immrf/data\": tf.io.FixedLenFeature([], tf.string),\n \"immrf/shape\": tf.io.VarLenFeature(tf.int64),\n \"immrf/path\": tf.io.FixedLenFeature([], tf.string),\n \"tmap/data\": tf.io.FixedLenFeature([], tf.string),\n \"tmap/shape\": tf.io.VarLenFeature(tf.int64),\n \"tmap/path\": tf.io.FixedLenFeature([], tf.string),\n \"mask/data\": tf.io.FixedLenFeature([], tf.string),\n \"mask/shape\": tf.io.VarLenFeature(tf.int64),\n \"mask/path\": tf.io.FixedLenFeature([], tf.string),\n }\n slice = tf.io.parse_single_example(serialized_example, feature_description)\n for key in [\"immrf\", \"tmap\", \"mask\"]:\n slice[key + \"/data\"] = tf.io.decode_raw(slice[key + \"/data\"], out_type=tf.float32)\n slice[key + \"/data\"] = utils.reshape_back(slice, key)\n return slice", "def test_predict_probe_data_1(self):\n reader = StringIO('138:\\n1735266\\n1270280\\n')\n writer = StringIO()\n predict_probe_data(reader, writer)\n self.assertEqual(writer.getvalue(), '138:\\n3.3\\n3.2\\n')", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example", "def _parse_single_sequence_example_raw(serialized,\n context,\n feature_list,\n debug_name,\n name=None):\n with ops.name_scope(name, \"ParseSingleExample\", [serialized, debug_name]):\n serialized = ops.convert_to_tensor(serialized, name=\"serialized\")\n serialized = _assert_scalar(serialized, \"serialized\")\n return _parse_sequence_example_raw(serialized, debug_name, context,\n feature_list, name)[:2]", "def _load(examples, f):\n\n for l in f:\n json_example = json.loads(l)\n if FLAGS.mode == 'long_answers' and not has_long_answer(json_example):\n continue\n\n elif FLAGS.mode == 'short_answers' and not has_short_answer(json_example):\n continue\n\n example = Example(json_example)\n examples[example.example_id] = example\n\n if len(examples) == FLAGS.max_examples:\n break", "def sample_input(self, loader, is_test=False):\n pass", "def as_example(self, dataset_item):\n return dataset_item", "def convert_to_tf_example(\n patient_data: Tuple[str, Dict[str, object]]\n) -> tf.train.Example:\n try:\n data = patient_data[1]\n patient = data[\"patient\"][0]\n studies = data[\"studies\"][0]\n \n features = convert_patient_to_feature(patient)\n for study_id, study in studies:\n study_data = convert_study_to_feature(study)\n for feature in study_data:\n features.update(feature)\n return tf.train.Example(features=tf.train.Features(feature=features),)\n except Exception as e:\n _logger.error(\n f\"Error occurred when creating a TFRecord. patient_data: {data.get('patient', data)}. Error: {e}.\"\n )\n return tf.train.Example(features=tf.train.Features(feature={}),)", "def _simple_read(filename, converter):\n with open(filename) as file:\n return converter(file.read())", "def test_read(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)f8')\n dset = f.create_dataset('x', (10,), dtype=dt)\n # TODO implement this\n # assert dset.shape == (10,)\n # assert dset.dtype == dt\n\n # Full read\n out = dset[...]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (10, 3)\n\n # Single element\n out = dset[0]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (3,)\n\n # Range\n out = dset[2:8:2]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (3, 3)", "def load_data(self) -> None:", "def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example", "def parse_from_example_in_example(serialized,\n list_size=None,\n context_feature_spec=None,\n example_feature_spec=None,\n size_feature_name=None,\n mask_feature_name=None,\n shuffle_examples=False,\n seed=None):\n parser = _ExampleInExampleParser(\n list_size=list_size,\n context_feature_spec=context_feature_spec,\n example_feature_spec=example_feature_spec,\n size_feature_name=size_feature_name,\n mask_feature_name=mask_feature_name,\n shuffle_examples=shuffle_examples,\n seed=seed)\n return parser.parse(serialized)", "def read_example(self, index):\n if (index < 0 or index >= len(self._data)):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n (X, header) = self._read_timeseries(self._data[index][0])\n return [X] + list(self._data[index][1:]) + [header]", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def deepconsensus_input_to_example(\n deepconsensus_input: deepconsensus_pb2.DeepConsensusInput,\n example_height: int,\n inference: bool,\n counters: Optional[Dict[str, metrics.Metrics.counter]] = None,\n) -> Optional[tf.train.Example]:\n if not deepconsensus_input.subreads:\n if counters and counters['examples_no_subreads_counter']:\n counters['examples_no_subreads_counter'].inc()\n return\n\n # Get the example_width from the first subreads.\n example_width = len(deepconsensus_input.subreads[0].bases)\n\n # The full example will include 4 rows for the signal to noise ratio (sn)\n # values. The remaining rows will contain three sets of per-base values:\n # the base, pulse width (pw), and interpulse distance (ip). Some models\n # may use only a subset of this information downstream.\n per_base_rows = get_per_base_rows(example_height)\n if per_base_rows < 0 or per_base_rows % 4 != 0:\n raise ValueError('example_height - 5 must be non-negative, and divisible '\n 'by four.')\n max_passes = get_max_passes(example_height)\n\n if len(deepconsensus_input.subreads) > max_passes:\n # Increment a counter if the number of subreads from the\n # deepconsensus_input is more than the `max_passes` derived from the\n # input `example_height`.\n # But still continue.\n if counters and counters['examples_with_discarded_subreads']:\n counters['examples_with_discarded_subreads'].inc()\n\n example = tf.train.Example()\n features = example.features\n data = np.zeros(\n shape=(example_height, example_width, 1), dtype=dc_constants.NP_DATA_TYPE)\n data += dc_constants.GAP_OR_PAD_INT\n\n # Number of subreads is capped at num_subreads. In the cases of fewer\n # subreads, rows are left empty.\n kept_subreads = 0\n # Add extra dimension so that shape is (example_width, 1).\n base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices = get_indices(\n max_passes)\n for i in range(min(len(deepconsensus_input.subreads), max_passes)):\n subread = deepconsensus_input.subreads[i]\n # Each tuple should already be padded to the appropriate length.\n assert len(subread.bases) == example_width\n\n encoded_bases = encode_dna_as_floats(subread.bases) # pytype: disable=wrong-arg-types\n assert encoded_bases is not None\n data[base_indices[0] + i] += np.expand_dims(np.array(encoded_bases), -1)\n data[pw_indices[0] + i] += np.expand_dims(np.array(subread.pw), -1)\n data[ip_indices[0] + i] += np.expand_dims(np.array(subread.ip), -1)\n data[strand_indices[0] + i] += np.expand_dims(\n np.expand_dims(np.array(subread.subread_strand), -1), -1)\n kept_subreads += 1\n\n if kept_subreads == 0:\n if counters and counters['examples_no_subreads_counter']:\n counters['examples_no_subreads_counter'].inc()\n return\n\n if deepconsensus_input.ccs_sequence:\n encoded_ccs_bases = encode_dna_as_floats(deepconsensus_input.ccs_sequence) # pytype: disable=wrong-arg-types\n data[slice(*ccs_indices)] += np.expand_dims(np.array(encoded_ccs_bases), -1)\n\n data[slice(*sn_indices)] += np.expand_dims(\n np.expand_dims(np.array(deepconsensus_input.sn), -1), -1)\n\n features.feature['subreads/encoded'].bytes_list.value.append(data.tostring())\n features.feature['subreads/shape'].int64_list.value.extend(data.shape)\n features.feature['subreads/num_passes'].int64_list.value.append(kept_subreads)\n\n if not inference:\n label_bases_list = encode_dna_as_floats(deepconsensus_input.label.bases) # pytype: disable=wrong-arg-types\n assert label_bases_list is not None\n # Final shape of label should be (example_width, ).\n label_matrix = np.array(label_bases_list).astype(dc_constants.NP_DATA_TYPE)\n features.feature['label/encoded'].bytes_list.value.append(\n label_matrix.tostring())\n features.feature['label/shape'].int64_list.value.extend(label_matrix.shape)\n features.feature['deepconsensus_input/encoded'].bytes_list.value.append(\n deepconsensus_input.SerializeToString())\n return example", "def create_example(filename, sample_rate, load_audio_with_librosa):\n wav_data = tf.gfile.Open(filename, 'rb').read()\n example_list = list(\n audio_label_data_utils.process_record(\n wav_data=wav_data,\n sample_rate=sample_rate,\n ns=music_pb2.NoteSequence(),\n # decode to handle filenames with extended characters.\n example_id=six.ensure_text(filename, 'utf-8'),\n min_length=0,\n max_length=-1,\n allow_empty_notesequence=True,\n load_audio_with_librosa=load_audio_with_librosa))\n assert len(example_list) == 1\n return example_list[0].SerializeToString()", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def demonstration_examples(kind):\n\n DIR = './data/demos/'\n RNN_DEMO = \"-demo_rnn_examples\"\n EXT = '.pkl'\n\n if kind == 'rnn':\n return pd.read_pickle(DIR+RNN_DEMO+EXT)", "def example_loader(\n data_path: str,\n index_path: typing.Union[str, None],\n description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[typing.Dict[str, np.ndarray]]:\n\n typename_mapping = {\n \"byte\": \"bytes_list\",\n \"float\": \"float_list\",\n \"int\": \"int64_list\"\n }\n\n record_iterator = tfrecord_iterator(\n data_path=data_path,\n index_path=index_path,\n shard=shard,\n compression_type=compression_type,\n )\n\n for record in record_iterator:\n example = example_pb2.Example()\n example.ParseFromString(record)\n\n yield extract_feature_dict(example.features, description, typename_mapping)", "def test_predict_probe_data_3(self):\n reader = StringIO('13800:\\n2232104\\n802351\\n')\n writer = StringIO()\n predict_probe_data(reader, writer)\n self.assertEqual(writer.getvalue(), '13800:\\n3.5\\n3.9\\n')", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data", "def read(self, *args, **kwargs):\n pass", "def load_data():\n\n if global_deserializer is None:\n raise SystemExit('global de-serializer was not set')\n\n return global_deserializer(input())", "def load(self, input):", "def load(self, input_artifact: BundleInputArtifact) -> None:\n self.logger.info(\n f\"Loading from {input_artifact.path}\"\n )\n\n with open(input_artifact.path / \"example.txt\") as input_file:\n self.example_trained_param = float(input_file.read())", "def _read(self, question_id):\n question_id = question_id.squeeze(-1)\n correlation_weight = self._compute_correlation_weight(question_id)\n read_content = torch.matmul(self._value_memory, correlation_weight.unsqueeze(-1)).squeeze(-1)\n return read_content.to(ARGS.device)", "def to_tydi_example(entry, is_training):\n\n if is_training:\n answer = make_tydi_answer(entry[\"contexts\"], entry[\"answer\"])\n start_byte_offset = answer.offset\n end_byte_offset = answer.offset + byte_len(answer.text)\n else:\n answer = None\n start_byte_offset = None\n end_byte_offset = None\n\n return TyDiExample(\n example_id=int(entry[\"id\"]),\n language_id=get_language_id(entry[\"language\"]),\n question=entry[\"question\"],\n contexts=entry[\"contexts\"],\n plaintext=entry[\"plaintext\"],\n context_to_plaintext_offset=entry[\"context_to_plaintext_offset\"],\n answer=answer,\n start_byte_offset=start_byte_offset,\n end_byte_offset=end_byte_offset)", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def read_raw(self, offset, size, return_raw = False):\n raw_data = self.reader(offset, size)\n if raw_data is None:\n return None\n if return_raw:\n return raw_data\n else:\n if size == 1:\n data = struct.unpack(\"%dB\" %size, raw_data)[0]\n else:\n data = struct.unpack(\"%dB\" %size, raw_data)\n return data" ]
[ "0.7003179", "0.6128065", "0.59333533", "0.562596", "0.560513", "0.559558", "0.5577824", "0.5567824", "0.55473655", "0.55461675", "0.5537794", "0.55172545", "0.55114156", "0.55060756", "0.5497584", "0.5480115", "0.5461704", "0.54357123", "0.5398757", "0.53854465", "0.5373124", "0.53603077", "0.53507996", "0.53480977", "0.5343207", "0.5334902", "0.532483", "0.53199035", "0.528183", "0.528043", "0.527774", "0.52656204", "0.52541286", "0.52501434", "0.5226759", "0.52203953", "0.5217762", "0.52177185", "0.5193534", "0.5181308", "0.51764846", "0.5173398", "0.51631635", "0.5161739", "0.5158467", "0.51577914", "0.51488024", "0.51479465", "0.51448286", "0.514263", "0.5128258", "0.5126673", "0.5124268", "0.51142263", "0.51139045", "0.51098233", "0.50978404", "0.50889194", "0.5086388", "0.50805587", "0.50766075", "0.5066771", "0.50643164", "0.50634265", "0.50621897", "0.5046318", "0.504458", "0.5031635", "0.5031518", "0.50283957", "0.5027362", "0.50256616", "0.5025604", "0.50187624", "0.5017236", "0.50154066", "0.501438", "0.5014233", "0.5013682", "0.5012568", "0.50115275", "0.50115275", "0.50115275", "0.50079596", "0.50078475", "0.5006434", "0.5004144", "0.4997877", "0.49940023", "0.4977962", "0.4967104", "0.49578133", "0.49514976", "0.49493685", "0.49479795", "0.49444214", "0.49399027", "0.49367625", "0.49348992", "0.49315184" ]
0.6061454
2
Print the list of toppings that have been requested.
def make_pizza(*toppings): print(toppings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toppings(request, pizza_id):\r\n pizza = Pizza.objects.get(id=pizza_id)\r\n toppings = pizza.topping_set.order_by('name')\r\n context = {'pizza': pizza, 'toppings': toppings}\r\n return render(request, 'pizzas/toppings.html', context)", "def printResults(self):\n for tweet in self.tweets:\n print(tweet)\n print(\"---------------------\\n\")", "def printall():\n print listAll()", "def print_list(self):\r\n pass", "def print_tasks(self):\n unformatted_rows = self.db_link.get_tasks()\n formatted_rows = self.display.format_row(unformatted_rows)\n self.display.print_task_list_formatted(formatted_rows)", "def dump_list_of_rts():\n rtlist = get_list_of_rts()\n if rtlist:\n for tweet in rtlist:\n print(' # Extracted from https://twitter.com/%s/status/%s' %\n (tweet['retweeted_status']['user']['screen_name'],\n tweet['retweeted_status']['id_str']))\n print(' (ur\"\"\"%s\"\"\", False),' %\n tweet['retweeted_status']['text'])", "def print_results(self):\n pass", "def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1", "def print_list(things_to_print, prefix=\"\\t\", stream=sys.stdout):\n for item in things_to_print:\n print(f\"{prefix}{item}\", file=stream)", "def make_pizza(*toppings):\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def print_results(flight_info_list, departure_airport, return_airport):\n flight_info_list = sorted(flight_info_list, key=lambda k: k[\"price($)\"])\n\n print \"*********************************************************\"\n print \"Top 10 Cheapest Flights From {} to {}\".format(\n departure_airport,\n return_airport\n )\n print \"*********************************************************\"\n\n for i in flight_info_list[:10]:\n print \"----------------------------------\"\n print json.dumps(i, indent=4)\n\n flight_info_list = sorted(flight_info_list, key=lambda k: k[\"flight_duration(hours)\"])\n\n print \"*********************************************************\"\n print \"Top 10 Shortest Flights From {} to {}\".format(\n departure_airport,\n return_airport\n )\n print \"*********************************************************\"\n \n for i in flight_info_list[:10]:\n print \"----------------------------------\"\n print json.dumps(i, indent=4)\n\n print \"--------------------------------------\"\n print \"Total of %s flights were found\" % len(flight_info_list)\n print \"--------------------------------------\"", "def make_pizza(*toppings):\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)", "def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())", "def _print_findings(self) -> None:\n for ip_address in self._ip_addresses:\n print(f\"{ip_address}\")", "def printOrders(self, event):\n \n pass", "def prnt(self):\n print \"%s %s %s %s\" % (time.ctime(), self.time, self.who, self.region)\n print \"%s %s %s\" % (time.ctime(), ' ' * len(self.time), self.text)\n for r in self.recipients:\n print \"%s %s %s\" % (time.ctime(), ' ' * len(self.time), r)", "def print_results(results):\n print(\"\\033[4m\\033[1m%-75s%s\\033[0m\" % (\"NAME\", \"ADDRESS\"))\n\n for selections in data:\n print(\"%-75s%s\" % (selections['applicant'], selections['location']))\n \n print(\"\\n\\033[1m--- PAGE \", page_num, \"---\\033[0m\\n\")", "def print_readings(data):\n output = [str(data['timestamp'])]\n output.append(getvalue(data, 't_in', '%0.2f'))\n output.append(getvalue(data, 'h_in', '%d'))\n for i in range(1, 6):\n output.append(getvalue(data, 't_%d' % i, '%0.2f'))\n output.append(getvalue(data, 'h_%d' % i, '%d'))\n output.append(getvalue(data, 'slp', '%0.1f'))\n output.append(getvalue(data, 'uv', '%0.1f'))\n output.append(getvalue(data, 'forecast', '%d'))\n output.append(getvalue(data, 'storm', '%d'))\n output.append(getvalue(data, 'winddir', '%d'))\n output.append(getvalue(data, 'windspeed', '%0.1f'))\n output.append(getvalue(data, 'windgust', '%0.1f'))\n output.append(getvalue(data, 'windchill', '%0.1f'))\n output.append(getvalue(data, 'rain', '%d'))\n print ':'.join(output)", "def get_pizza_toppings(): # noqa: E501\n data = pizza_toppings_service.get_all_pizza_toppings_from_db()\n pizza_toppings = list()\n for item in data:\n current = PizzaTopping(item.pizza_toppings_id, \n item.name,\n item.topping_price,\n item.gluten_free)\n pizza_toppings.append(current)\n return pizza_toppings, 200", "def print_entries(self):\n self.print_selected_entries(self.entries)", "def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)", "def report(self):\n print \"Got {} places from Wikipedia.\".format(len(self.places))\n print \"Got {} existing places.\".format(len(self.existing))\n print \"Found {} missing places:\".format(len(self.missing))\n print '\\n'.join(sorted(self.missing, key=lambda miss: miss[0]))\n print \"Found {} missing articles:\".format(len(self.missing_articles))\n print '\\n'.join(sorted(self.missing_articles, key=lambda miss: miss[0]))", "def get_result_printer(self):\n if isinstance(self.get_output, list):\n for value, key in self.get_output:\n print(value, key)\n else:\n for key, value in self.get_output.items():\n print(value, key)", "def print_ticket_list(json_data):\n print(\n \"ID | Subject | Submitted By | Date Created | Last Updated | Status \"\n )\n for ticket in json_data[\"tickets\"]:\n print(ticket['id'], \" | \", ticket['subject'], \" | \",\n ticket['submitter_id'], \" | \", ticket['created_at'], \" | \",\n ticket['updated_at'], \" | \", ticket['status'])", "def show_current(self):\n for packet in self.station.genLoopPackets():\n print(packet)\n break", "def print_out():\n pass", "def printWaiting(self):\n\t\tfor wait in self.w:\n\t\t\tw_print=\"\"\n\t\t\tfor c in wait:\n\t\t\t\tif c:\n\t\t\t\t\tw_print += str(c[1])\n\t\t\t\telse:\n\t\t\t\t\tw_print += 'NO'\n\t\t\t\tw_print += \" \"\n\t\t\tprint w_print", "def print_queue(self):\n for i in self.Obs:\n print(i)", "def printPassbook(self) :\n for expense in self.__passbook:\n print(expense.toString())", "def print_drinks(self):\n for beverage in self.drinks:\n print(beverage.get_name())", "def make_pizza(size,*toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following topppings:\")\n for topping in toppings:\n print(\"-\" + topping)", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def print_results(results):\n print()\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%% RESULTS %%\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\"Route \\t Cells \\t\")\n print(\"Length\\tChecked\\t Time\")\n print(\"--------------------------------\")\n print(\"{0}\\t{1}\\t{2}\".format(*results))\n print()", "def print_routes() -> None:\n mbta = MBTA(config.CT_MBTA_API_KEY)\n routes = mbta.get_routes()\n title_text = f\"List of Routes on MBTA\"\n print(f\"{title_text:=^80}\")\n for route in routes:\n print(\n f\"ID: {route['id']}, NAME: {route['attributes']['long_name']}\"\n )\n return", "def print_list(self):\n\n current = self.head\n\n while current is not None:\n print current.data\n current = current.next", "def make(size, *toppings):\r\n print(\"The size of the pizza is \"+ str(size))\r\n for topping in toppings:\r\n print('- '+ topping)", "def print_results(results):\n print(f\"Intial Entries: {results[0]}\")\n print(f\"Added Entries: {results[1]}\")\n print(f\"Final Entries: {results[2]}\")\n print(f\"Total Run Time: {results[3]}\")\n print(\"\\n\")", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def printsessions(update, context, sessions_file, sessions_passed_file):\n\n\tsessions_list = get_sessions_list(sessions_file)\n\tsessions_passed_list = get_sessions_list(sessions_passed_file)\n\tsessions_all_list = sessions_list + sessions_passed_list\n\n\tif (len(sessions_all_list) == 0):\n\t\tupdate.effective_message.reply_text('Session list is empty.\\n')\n\t\treturn\n\n\telse:\n\t\theaders = ('Date/Time', 'URL', 'Passed')\n\t\trows = [session.values() for session in sessions_all_list]\n\t\ttab_all_sessions_list = \"```\" + tabulate.tabulate(rows, headers, tablefmt=\"simple\", showindex=\"always\") + \"```\"\n\t\tupdate.effective_message.reply_text(tab_all_sessions_list, parse_mode=\"Markdown\")\n\n\treturn", "def print_data_list(self):\n print('\\n{0}'.format(self.webDataFrame))", "def make_pizza(size,*toppings):\r\n print(\"\\nMaking a \"+str(size)+\r\n \"-inch pizza with the following toppings:\")\r\n for topping in toppings:\r\n print(\"- \"+topping)\r\n print('----')", "def human_friendly_print_running_tasks(one_off, scheduled):\n all_vals = []\n name_pad = 5\n if one_off:\n for name in one_off:\n if len(name) > name_pad:\n name_pad = len(name)\n all_vals += one_off.values()\n\n if scheduled:\n for name in scheduled:\n if len(name) > name_pad:\n name_pad = len(name)\n all_vals += scheduled.values()\n\n name_pad += 1\n\n header = f'{\"Name\":<{name_pad}}| Task type | Status | Start'\n print(header)\n print('-' * (len(header) + 5))\n for task in all_vals:\n print(f'{task[\"name\"]:<{name_pad}}| {task[\"type\"].title():<10}| {task[\"status\"]:<8} | {task[\"start\"]}')", "def make_pizza(size, *toppings):\n print(f\"\\nMaking {size}-inch pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(size, *toppings):\n print(f\"\\nMaking a {size}-inch pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def list(self):\n self.background_scheduler.print_jobs()", "def make_pizza(\n size, *toppings):\n print(\"\\n Making a {size}-inch pizza with the following toppings: \")\n for topping in toppings:\n print(f\"- {topping}\")", "def showtrafficitemnames():\n trafficItems = middleware.trafficObj.getAllTrafficItemNames()\n print('\\nAll Traffic Items:\\n')\n for index, eachTrafficItem in enumerate(trafficItems):\n print('\\t{0}: {1}'.format(int(index)+1, eachTrafficItem))\n print()", "def print_jobs():\n for job in job_records():\n command = job['command']\n timestamp = job['timestamp'].isoformat().replace('T', ' ')[:-3]\n print('\\t'.join((job['id'], timestamp, command)))", "def make_pizza(size, *toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)", "def showrt(self):\n for name in self.neighbors:\n if not self.neighbors[name].is_killed:\n break\n else:\n print('All neighbors offline, all distances are set to infinity. ')\n return\n print str(datetime.datetime.now().replace(microsecond=0)) + ' Distance vector list is:'\n for name in self.distance_vector:\n print('Destination={}, Cost={}, link=({})').format(name, self.distance_vector[name].cost, self.distance_vector[name].link)", "def show_checked_in_passengers(checked_in):\n print(\"\\nThe following passengers have been checked in: \")\n for passengers in checked_in:\n print(passengers)", "def process_list(self):\n for p in self._queue:\n print \"%-5d %-10s %-10s %2d %10s %10s\" % (p.id, p.name,\n p.status['type'], p.priority, \n self.print_cr_tree(p.creation_tree['parent']), \n self.print_cr_tree(p.creation_tree['child']))", "def show_completed_models(completed_models):\r\n print(\"\\nThe following models have been printed:\")\r\n for completed_model in completed_models:\r\n print(completed_model)", "def show_all_tasks(self):\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n else:\n print('Nothing to do!')\n print()", "def print_parking(type):\r\n if (type=='Regular' or type=='Priority' or type=='VIP'):\r\n tempList=[]\r\n for i in carsSeq:\r\n if i[1]==type:\r\n tempList.append(i)\r\n for i in range(len(tempList)):\r\n if i==len(tempList)-1:\r\n print(\"car: {0}, parking time: {1}\".format(tempList[i][0],tempList[i][2])) \r\n else:\r\n print(\"Unknown parking lot type\")", "def PrintFeed(feed):\n for entry in feed.entry:\n PrintResource(entry)", "def travel_print(self):\n if self.is_empty():\n print(\"Linked list's length is 0\")\n else:\n node = self.head\n print(\"head -->\", node.data, end=' ')\n while node.next:\n node = node.next\n print(\"-->\", node.data, end=' ')\n print(\" \")", "def printShipsToSink(self):\r\n sb = []\r\n for sinkingShip in self.shipsToSink:\r\n shot = self.mapToShot(sinkingShip.bullseye)\r\n sb.append(str(shot))\r\n sb.append(\":\")\r\n sb.append(str(sinkingShip.size))\r\n sb.append(\" \")\r\n logging.debug(\"\".join(sb))", "def print_queue(self):\n for value in self.data:\n element = f'| {value} |'\n print(element)", "def print_list(self):\n p = self.head\n i = 0\n\n while i < self.size():\n print(p.data)\n i += 1\n p = p.next_node", "def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)", "def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)", "def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for completed_model in completed_models:\n print(completed_model)", "def print_list(self) -> None:\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next", "def make_sandwich(*toppings):\r\n print(\"Making a sandwich with the following toppings:\")\r\n for topping in toppings:\r\n print(\"- \" + topping)", "def printStories(self):\n\t\tself.printHeader()\n\t\tfor i in range(self.firstStoryToShow, self.lastStoryToShow):\n\t\t\tself.outputStory(self.stories[i], self.showDomains, self.showFullTitles, self.collapseOldStories)\n\t\t\n\t\tif self.karmaChange:\n\t\t\tprint self.hnUserName + \"'s karma has changed since the last refresh.\"", "def displayTicker(self):\n for pair in self.config.pairs:\n if self.config.pairs[pair]:\n self.printTicker(pair, self.trader.tickerData)", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def print_requests(requests):\n\n if not _debug: return\n keys = get_sorted_keys(requests)\n\n print \"\\nIn Memory Structure:\"\n print \"{\"\n for key in keys:\n\tprint \" %s:[\" % (key)\n for request in requests[key]:\n\t\tprint \" (%s, %s),\" % (key, request.url)\n\tprint \" ]\"\n print \"}\\n\"", "def show_completed_models (completed_models):\n print (\"\\nThe followin models have been printed: \")\n for completed_model in completed_models:\n print (completed_model)", "def print_json_stdout(results):\n for json in results:\n print(\"\\n########## Result for IP {} ##########\".format(json['ip']))\n pprint.pprint(json)\n print('######################################')\n print()", "def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()", "def print_pairing_info(all_melon_types):\n for melon in all_melon_types:\n # print melon type\n print(f'Parings for {melon.name} are:')\n #for loop for pairings\n for pairing in melon.pairings:\n print (f'{pairing}')", "def print_routes(self):\n\n for route in self.app.router.routes():\n route_info = route.get_info()\n if \"formatter\" in route_info:\n url = route_info[\"formatter\"]\n elif \"path\" in route_info:\n url = route_info[\"path\"]\n elif \"prefix\" in route_info:\n url = route_info[\"prefix\"]\n else:\n url = \"Unknown type of route %s\" % route_info\n\n self.logger.info(\"Route has been setup %s at %s\", route.method, url)", "def cmd_pagetplaylists(self, data, client, cmd):\n for n, p in sorted(self._playlists.iteritems()):\n cmd.sayLoudOrPM(client, '%s - %s' % (n, p))\n time.sleep(1)", "def listPrinters(self):\n raise NotImplementedError(\"listPrinters not implemented\")", "def show_readings(self, logged_only):\n for data,ptr,_ in self.station.live_data(logged_only):\n print('%04x' % ptr, end=' ')\n print(data['idx'].strftime('%H:%M:%S'), end=' ')\n del data['idx']\n print(data)", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Test Case ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-JIRA URL:\", self.JIRA_URL, sep='')", "def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")", "def print_list(self):\n self.print_avec_separateur(\" \")", "def generate_plant_report(self):\n print('PLANTS IN ' + self.name)\n for species, count in self.plants.items():\n print(f'{species}: {count}')", "def _printTruckRec(self, tNode):\n count = self.countTrucks(tNode)\n print(f'Total number of vehicles entered in the warehouse: {count}')\n self.inorder(tNode)\n print('------------------------------------')", "def list_requesters():\n from mephisto.core.local_database import LocalMephistoDB\n from tabulate import tabulate\n\n db = LocalMephistoDB()\n requesters = db.find_requesters()\n dict_requesters = [r.to_dict() for r in requesters]\n click.echo(tabulate(dict_requesters, headers=\"keys\"))", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def Print(self, f=sys.stdout):\n print>>f, \"\"\n print>>f, \"Item number:\", self.n.encode('utf8')\n print>>f, \"Auction:\", self.auction.encode('utf8')\n print>>f, \"Lot number:\", self.nlot.encode('utf8')\n print>>f, \"Lot:\", self.lot.encode('utf8')\n print>>f, \"Start price:\", self.startPrice.encode('utf8')\n print>>f, \"Organizer:\", self.organizer.encode('utf8')\n print>>f, \"Application end date:\", self.applicationEndDate.encode('utf8')\n print>>f, \"Auction date:\", self.date.encode('utf8')\n print>>f, \"State:\", self.state.encode('utf8')\n print>>f, \"Winner:\", \n if self.winner: print>>f, self.winner.encode('utf8')\n else: print>>f, \"\"", "def print_all(self, lim=100):\n for item in islice(weekdays.return_all(), 0, 100):\n print(item)", "def tprint(*args, **kwargs):\r\n tprint_worker(*args, **kwargs)", "def _printEventList(bot, event):\n conv_event = bot.memory.get_by_path(['_event', event.conv_id])\n html = []\n for num, key in enumerate(sorted(conv_event, key=str)):\n segment = key.split(':')\n if segment[0] == \"event\":\n html.append(\"{}. <b>{}</b> [{} people]\".format(str(num), conv_event[\n key]['title'], len(conv_event[key]['participants'])))\n\n if len(html) == 0:\n yield from bot.coro_send_message(event.conv_id, '<i>No events available yet. Use <b>/event <eventname></b> to create your own event</i>')\n return\n # Generate the output list. Title first followed by the participants\n html.insert(0, _(\"<b>Current available events:</b>\"))\n message = _(\"<br />\".join(html))\n\n yield from bot.coro_send_message(event.conv_id, message)\n return", "def get(self, request, *args, **kwargs):\n items = self.get_items()\n return self.print(request, items)", "def printall():\n all_tasks = {\n 'Name': [],\n 'Deadline':[],\n 'Priority':[],\n 'Autodelete':[]\n }\n with open(todofile, 'r') as todo:\n try: #list compre for loading dict objs in to list, sorting by deadline\n tasks = sorted([json.loads(task) for task in todo.readlines()], \n key= lambda task: task['deadline'])\n except json.decoder.JSONDecodeError:\n return 1\n if not tasks:\n return None\n for task in tasks:\n all_tasks['Name'].append(task['name'])\n all_tasks['Deadline'].append(task['deadline'])\n all_tasks['Priority'].append(task['priority'])\n all_tasks['Autodelete'].append(\n 'No' if task['no_del'] else 'Yes')\n return all_tasks", "def print_event(self):\n\n list_of_names = [str(c) for c in self.__list_of_contacts]\n joined_names = ', '.join(list_of_names)\n table = [[str(self._title)],[\"Date: \"+str(self._date)],[\"Time: \"+str(self._start)+\" - \"+str(self._end)],[\"Participants: \"+str(joined_names)]]\n print(tabulate(table, tablefmt='grid'))", "def print(self):\n for fiction in self.fictions:\n print(fiction.__dict__)", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def print_paging_menu():\n print(\"\\n~ Enter 'n' to view next page of tickets\")\n print(\"~ Enter 'p' to view previous page of tickets\")\n print(\"~ Enter 'q' to quit viewing list of tickets\")", "def print_action(player):\n [print(str(i)+': '+player.available_actions[i]) for i in range(len(player.available_actions))]", "def print_all(jobs):\n\n if len(jobs) == 0:\n print('print_all() recieved empty input')\n return\n\n for job in jobs:\n if job.is_relevant:\n print(job)\n else:\n continue", "def print_items(items): \n print(items)", "def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))", "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)" ]
[ "0.6459545", "0.6434612", "0.64327085", "0.6416531", "0.61851096", "0.61739594", "0.60970765", "0.6081574", "0.60734904", "0.60210615", "0.60160345", "0.5975071", "0.59174913", "0.5913153", "0.5871116", "0.58542", "0.58345544", "0.58035886", "0.57819474", "0.5701928", "0.5669071", "0.5668998", "0.5653469", "0.5622433", "0.5598034", "0.5583216", "0.5576635", "0.5556882", "0.5539096", "0.55320835", "0.5516702", "0.55127937", "0.5507951", "0.55032605", "0.5494063", "0.54916036", "0.548892", "0.5484742", "0.5477531", "0.5476664", "0.54717547", "0.5469741", "0.546561", "0.5458551", "0.54518574", "0.5444868", "0.54260886", "0.54231846", "0.54122937", "0.5411473", "0.5408606", "0.54067445", "0.5401566", "0.54007876", "0.5393052", "0.5391151", "0.5389203", "0.5387615", "0.53865886", "0.5384513", "0.53840345", "0.53840345", "0.53840345", "0.53808284", "0.53796035", "0.5372206", "0.53657573", "0.536529", "0.53620464", "0.5360325", "0.53568727", "0.5350119", "0.5346792", "0.5334935", "0.5332475", "0.53319573", "0.532649", "0.531988", "0.53134125", "0.53112674", "0.53112495", "0.53090566", "0.53083897", "0.5306364", "0.53042215", "0.5304168", "0.5299655", "0.5298032", "0.52968335", "0.5296012", "0.5294016", "0.52905923", "0.52851486", "0.52843934", "0.5281172", "0.5280319", "0.5276482", "0.5271507", "0.5266078" ]
0.5833372
18
Summarize the pizza we are about to make.
def make_pizza(*toppings): print("\nMaking a pizza with the following toppings:") for topping in toppings: print(f"- {topping}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_summarize_recipe(self):\n pass", "def summarize_food_data(unprocessed_food_list: List[str]) -> List[Dict[str, str]]:\n summary: List[Dict[str, str]] = []\n item_count_data: Dict[str, int] = {}\n\n for item in unprocessed_food_list:\n if item not in item_count_data:\n item_count_data[item] = 1\n else:\n item_count_data[item] += 1\n \n for product in item_count_data:\n item_information: Dict[str, str] = {}\n item_information[\"name\"] = product\n item_information[\"quantity\"] = str(item_count_data[product])\n item_information[\"units\"] = \"-\"\n summary.append(item_information)\n \n return summary", "def make_pizza(size, *toppings):\n print(f\"\\nMaking {size}-inch pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(size, *toppings):\n print(f\"\\nMaking a {size}-inch pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(\n size, *toppings):\n print(\"\\n Making a {size}-inch pizza with the following toppings: \")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(topping='bacon'):\r\n print(\"Have a \" + topping + \" pizza!\")", "def make_pizza(*toppings):\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)", "def make_pizza(size,*toppings):\r\n print(\"\\nMaking a \"+str(size)+\r\n \"-inch pizza with the following toppings:\")\r\n for topping in toppings:\r\n print(\"- \"+topping)\r\n print('----')", "def make_pizza(topping='bacon'):\n print(\"Have a \" + topping + \" pizza!\")", "def make_pizza(topping='bacon'):\n print(\"Have a \" + topping + \" pizza!\")", "def make_pizza(size, *toppings):\n\tprint(\"\\nMaking a \" + str(size) +\n\t\"-inch pizza with the following toppings:\")\n\tfor topping in toppings:\n\t\tprint(\"- \" + topping)", "def describeRestaurant(self):\n print (f\"{self.name} has the best {self.cuisineType}\")", "def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)", "def make_pizza(size,*args):\n print(f\"\\nMaking a {size}-inch pizza with the following toppings:\")\n for arg in args:\n print(f\"+ {arg}\")", "def inventory_report(self):\n mean_price = sum(Product.price for Product in sample) / len(sample)\n mean_weight = sum(Product.weight for Product in sample) / len(sample)\n mean_flam = sum(Product.flammability for Product in sample) / len(sample)\n return 'Unique Product Names: ', sample.unique, '/n Average Price: ', mean_price, \n '/n Average Weight: ', mean_weight, '/n Average Flammability: ', mean_flam", "def make_pizza(*toppings):\n print(toppings)", "def make_pizza(*toppings):\n print(toppings)", "def main():\n splitted_file = convert_input_to_list()\n encyclopedia_of_pizza = parse_pizza_info(splitted_file)\n pizza_winner = choose_pizza(encyclopedia_of_pizza)\n print_winner(pizza_winner)", "def make_pizza(size, *toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)", "def create_pizza(pizza_type):\n pass", "def make_pizza(size, *toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n for topping in toppings:\n if size==12:\n print(\"The price is 10 with: \" + topping)\n elif size==16:\n print(\"The price is 20 with: \" + topping)", "def make_pizza(size,*toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following topppings:\")\n for topping in toppings:\n print(\"-\" + topping)", "def make(size, *toppings):\r\n print(\"The size of the pizza is \"+ str(size))\r\n for topping in toppings:\r\n print('- '+ topping)", "def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary", "def computeMealFrecuency(self):\n self.getOrdersData()\n self.getOrderValues()\n meals = set(self.meals)\n for meal in meals:\n self.labels.append(meal)\n self.quantity.append(self.meals.count(meal))", "def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total", "def summarise(self):\n self.summary = az.summary(self.trace, var_names=[\"~chol\"], round_to=2)\n print(self.summary)\n return self.summary", "async def pizza(ctx):\r\n author = ctx.message.author\r\n await ctx.send(author.mention + \" has eaten \" + str(randint(2, 120)) + \" slices of pizza today.\")\r\n ctx.counter(n)", "def printSummary(self):\n pass", "def inventory_report(prod_list):\n prod_list = list(set(prod_list))\n x = 0\n price = 0\n weight = 0\n flammability = 0\n stealability = 0\n for item in prod_list:\n x += 1\n price += item.price\n weight += item.weight\n flammability += item.flammability\n if stealability != 'Not so stealable...':\n stealability += 1\n\n avg_price = price / x\n avg_weight = weight / x\n avg_flammability = flammability / x\n print(f'There are {x} unique products in this list. The average price is {avg_price}, '\n f'average weight is {avg_weight},'\n f'and the average flammability is {avg_flammability}.')\n if stealability >= len(prod_list) / 2:\n print('Many of these items are highly stealable!')\n return avg_price, avg_weight, avg_flammability", "def total(proportions):\n final = {}\n for i in proportions:\n if i in running_total:\n final[i] = proportions[i] * running_total[i]\n print(final)\n else:\n final[i] = 0\n print(final)\n\n total_sum = sum(final.values())\n return total_sum", "def print_winner(chosen_pizza):\n print(\"Tonight's pizza is {0}\".format(chosen_pizza))", "def print_food(self):\n for dish in self.food:\n print(dish.get_name())", "def inventory_report(products):\n unique_names = []\n total_price = 0\n total_weight = 0\n total_flammability = 0\n num_products = len(products)\n for i in range(num_products):\n if products[i].name not in unique_names:\n unique_names.append(products[i].name) \n total_price += products[i].price\n total_weight += products[i].weight\n total_flammability += products[i].flammability\n mean_price = total_price / num_products\n mean_weight = total_weight / num_products\n mean_flammability = total_flammability / num_products\n print('ACME CORPORATION OFFICIAL INVENTORY REPORT')\n print(f'Unique product names: {len(unique_names)}')\n print(f'Average price: {mean_price}')\n print(f'Average weight {mean_weight}')\n print(f'Average flammabilitiy {mean_flammability}')\n return unique_names, mean_price, mean_weight, mean_flammability", "def totals(lines, sport):\n value = []\n for game in lines:\n combos = it.product(game['unders'].items(), game['overs'].items())\n value.extend([f'{sport} {game[\"game\"]} {combo[0][0]}: {combo[0][1][0]} {combo[0][1][1]} and '\n f'{combo[1][0]}: {combo[1][1][0]} {combo[1][1][1]}\\n\\n' for combo in combos\n if combo[0][1][0] - combo[1][1][0] >= 0 and combo[0][1][1] + combo[1][1][1] >= 0])\n\n return value", "def describe_restaurant(self):\n msg = f\"{self.name} serves wonderful {self.cuisine_type}.\"\n print(f\"\\n{msg}\")", "def PrintSummary(self, dollarsPerKiloWattHour = 0.1149, dollarsPerDTH = 6.53535):\n\t\tprint()\n\t\tprint(\" RESULTS \")\n\t\tprint()\n\t\tprint(\"The Number of times the furnace turns on: \" + str(self.building_hvac.NumberOfTimesHeatingTurnedOn))\n\t\tprint(\"The Number of times the AC turns on: \" + str(self.building_hvac.NumberOfTimesCoolingTurnedOn))\n\t\tprint(\"The Current Temperature: \" + str(self.current_temperature) + \"C\")\n\t\tprint(\"The total Electrical power used: \" + str(self.building_hvac.GetElectricKilowattHours()) + \"KWH\")\n\t\tprint(\"The total Time: \" + str(self.building_hvac.TotalTimeInSeconds))\n\t\tprint(\"The total Time Heating was on: \" + str(self.building_hvac.TotalDurationHeatingOn))\n\t\tprint(\"The total Time Cooling was on: \" + str(self.building_hvac.TotalDurationCoolingOn))\n\t\tprint(\"The Total Gas Energy Used: \" + str(self.building_hvac.GetGasDTH()) + \" DTH\")\n\t\tprint(\"Electrical Cost: $\" + str(self.CalculateElectricEneregyCost()))\n\t\tprint(\"Gas Cost: $\" + str(self.CalculateGasEneregyCost()))", "def __puntuacion_total(self):\n disparos = []\n for disparo in self.__disparos:\n total = 0\n for puntaje in disparo['disparos']:\n total += puntaje\n disparo['puntaje_total'] = total\n disparos.append(disparo)\n return disparos", "def describe_restaurant(self):\n print(\"The Restaurant is called {} and offers {} cuisine.\".format(self.restaurant_name, self.cuisine_type))\n print(\"It has served {} clients.\".format(self.number_served))", "def total_points(self):\n total_points = 0.0\n for ingredient in self.ingredients:\n if (ingredient.has_property('ppg')):\n # Use given value if specified\n total_points += ingredient.property('ppg').to('ppg') * ingredient.quantity.to('lb')\n else:\n total_points += EXTRACTS[ingredient.type] * ingredient.quantity.to('lb')\n return(Quantity(total_points, 'points'))", "def ExamineAllEvents(self, do_print):\n total = 0.0\n for purno in self.data:\n event = self.data[purno]\n randomcountry = event.keys()[0]\n randomrow = event[randomcountry]\n total += self.GetTotal(randomrow)\n if do_print:\n print purno, randomrow[0], randomrow[2], randomrow[6]\n for country in event:\n print \" %s: %.2f%%\" % (\n country, self.GetCountryPercentage(event[country], country) * 100)\n return total", "def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr", "def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")", "def total_number_of_animals(self):\n animals = self.animal()\n print 'Total number of animals on island: {:4}'.format(\n animals[\"Herbivores\"] + animals[\"Carnivores\"])", "def dp_all(foods, cal_goal, pro_goal, carb_goal, fat_goal):\n costs = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n 999999999)\n foods_used = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for k in range(carb_goal):\n for l in range(fat_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i\n or int(food['protein']) > j\n or int(food['carbs']) > k\n or int(food['fat']) > l):\n continue\n if (costs[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n [j - int(food['carbs'])]\n [j - int(food['fat'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]).copy()\n new_cal = calories(\n foods, prev_foods_used) + food['calories']\n new_pro = protein(\n foods, prev_foods_used) + food['protein']\n new_car = carbs(\n foods, prev_foods_used) + food['protein']\n new_fat = fat(\n foods, prev_foods_used) + food['protein']\n if (costs[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 20 and new_cal < i + 10\n and new_pro < j + 5 and new_pro < j + 5\n and new_car < j + 5 and new_car < j + 5\n and new_fat < j + 5 and new_fat < j + 5):\n costs[i][j][k][l] = prev_cost + \\\n food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j][k][l] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1][carb_goal - 1][fat_goal - 1]", "def show_summary(self) -> None:\n all_averages = []\n\n for i in self.album_statistics.values():\n try:\n all_averages.append(i['avg'])\n except (TypeError, ValueError):\n pass\n # print(all_averages)\n try:\n final_average = math.ceil(np.mean(all_averages))\n except ValueError:\n click.echo(\n 'Oops! https://lyrics.ovh couldn\\'t find any lyrics across any'\n ' album. This is caused by inconsistent Artist names from'\n ' Musicbrainz and lyrics.ovh. Try another artist.'\n )\n raise (SystemExit)\n output = BeautifulTable(max_width=200)\n output.set_style(BeautifulTable.STYLE_BOX_ROUNDED)\n output.column_headers = [\n 'Average number of words in tracks across all albums\\n'\n f'for {self.artist}'\n ]\n output.append_row([final_average])\n click.echo(output)\n\n return self", "def summary(self):\n raise NotImplementedError", "def carnivore_eats(self):\n self.order_by_fitness()\n for carn in self.fauna_list['Carnivore']:\n food_required = carn.parameters['F']\n amount_to_eat = 0\n not_eaten_animals = []\n for i, herb in enumerate(self.fauna_list['Herbivore']):\n if food_required <= amount_to_eat:\n not_eaten_animals.extend(self.fauna_list['Herbivore'][i:])\n break\n elif np.random.random() < carn.probability_of_kill(herb):\n if food_required - amount_to_eat < herb.weight:\n amount_to_eat += herb.weight\n elif food_required - amount_to_eat > herb.weight:\n amount_to_eat += food_required - amount_to_eat\n else:\n not_eaten_animals.append(herb)\n carn.animal_eats(amount_to_eat)\n self.fauna_list['Herbivore'] = not_eaten_animals", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def _get_book_summary(self):\n self._get_book_prices()\n for price in self.ask_prices:\n volume = 0\n for k in self.ask_snapshot.keys():\n if self.ask_snapshot[k].price == price:\n volume += self.ask_snapshot[k].volume\n self.ask_volumes.append(volume)\n for price in self.bid_prices:\n volume = 0\n for k in self.bid_snapshot.keys():\n if self.bid_snapshot[k].price == price:\n volume += self.bid_snapshot[k].volume\n self.bid_volumes.append(volume)", "def summarize(self, data):\n\n return self.summary(data).flatten()", "def get_single_shopping_list(recipe):\n total_ingredients = []\n\n for key, value in recipe.ingredient_amounts.items():\n if key in recipe.ingredient_units:\n unit = recipe.ingredient_units[key]\n total_ingredients.append([key, round((float(value)/gram_conversions[unit]), 2), unit])\n else:\n total_ingredients.append([key, value, None])\n\n return total_ingredients", "def describe_restaurant(self):\r\n print(\"\\n==========This is our restaurant \" + self.restaurant.title() + \"===============\")\r\n print(\"We serve you amazing \" + self.cuisine + \" 's cusine\")", "def inventory_report(products):\n name_list = set()\n price_list = []\n wt_list = []\n flamablity_list = []\n\n for p in products:\n name_list.add(p.name)\n price_list.append(p.price)\n wt_list.append(p.weight)\n flamablity_list.append(p.flammability)\n# Calculating average for report\n unique_names = len(name_list)\n avg_price = sum(price_list)/len(price_list)\n avg_weight = sum(wt_list)/len(wt_list)\n avg_flammability = sum(flamablity_list)/len(flamablity_list)\n# Printing\n print(\"$ python acme_report.py \")\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\n print(\"Unique product names:\", unique_names)\n print(\"Average price:\", round(avg_price, 2))\n print(\"Average weight:\", avg_weight)\n print(\"Average flammability:\", avg_flammability)", "def food_eaten(self):\r\n # get values from GUI\r\n\r\n foodList = \"\"\r\n foodCost=0\r\n if self.is_eggs.get():\r\n foodList += \"eggs $2.00\\n\"\r\n foodCost+=2\r\n if self.is_bacon.get():\r\n foodList += \"bacon $4.00\\n\"\r\n foodCost += 4\r\n if self.is_sausage.get():\r\n foodList += \"sausage $4.00\\n\"\r\n foodCost += 4\r\n if self.is_oj.get():\r\n foodList += \"OrangeJuice $3.00\\n\"\r\n foodCost += 3\r\n foodCost = ('%.2f' % foodCost)\r\n\r\n # Create the output to screen of foodlist\r\n story = (\"\\nThank you for joining us here at Order Up!\\n\\nThe foods that you ate are as follows:\\n\\n\\n\"+foodList+\"\\nThe total amount owed is: $\"+foodCost)\r\n # display the summary\r\n self.story_txt.delete(0.0, END)\r\n self.story_txt.insert(0.0, story)", "def generate_fish_report(self):\n if len(self.fish) == 0:\n print('No fish in here, come back later')\n\n for species, count in self.fish.items():\n print(f'{species}: {count}')", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def test_visualize_recipe_taste(self):\n pass", "def calculate_tip(meal_base, tip_rate):", "def inventory_report(products):\r\n names = set()\r\n total_price = 0\r\n total_weight = 0\r\n total_flammability = 0\r\n for product in products:\r\n names.add(product.name)\r\n total_price += product.price\r\n total_weight += product.weight\r\n total_flammability += product.flammability\r\n\r\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\r\n print(\"Unique product names: {}\".format(len(names)))\r\n print(\"Average price: {}\".format(total_price / len(products)))\r\n print(\"Average weight: {}\".format(total_weight / len(products)))\r\n print(\"Average flammability:{}\".format(\r\n total_flammability / len(products)))\r\n\r\n print(\"Following is useful starting code for acme_report.py:\")", "def report():\n print(\"Donor Name | Total Given | Num Gifts | Average Gift\")\n print(\"------------------------------------------------------------------\")\n for key, val in data.items():\n print(f\"{key:25} $ {float(sum(val)):>12.2f} {len(val):>8} $ {float(sum(val))/len(val):>11.2f}\")", "def toppings(request, pizza_id):\r\n pizza = Pizza.objects.get(id=pizza_id)\r\n toppings = pizza.topping_set.order_by('name')\r\n context = {'pizza': pizza, 'toppings': toppings}\r\n return render(request, 'pizzas/toppings.html', context)", "def generate_animal_report(self):\n print('ANIMALS IN ' + self.name)\n for species, count in self.animals.items():\n print(f'{species}: {count}')", "def show_prop(self):\n print(self.population_size)\n print(self.max_generation)\n print(self.mutate_rate)\n print(self.elite_rate)\n print(self.cross_rate)\n print(self.cross_type)\n print(self.verify_num)\n print(self.proof)", "def summary(app):\n click.echo(get_summary(app))", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats", "def calories(foods, foods_used):\n calories = 0.0\n for i, count in foods_used.items():\n calories += (foods[i]['calories'] * count)\n return calories", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def summarize(self):\n txtSumm = ''\n\n if self.legtype == 'Takeoff':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Landing':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Other':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Observing':\n txtSumm = \"%02d -- %s, RA: %s, Dec: %s, LegDur: %s, ObsDur: %s\" %\\\n (self.legno, self.target, self.ra, self.dec,\n str(self.duration),\n str(self.obsdur))\n txtSumm += \"\\n\"\n if self.nonsid is True:\n txtSumm += \"NONSIDERIAL TARGET -- NAIFID: %d\" % (self.naifid)\n txtSumm += \"\\n\"\n txtSumm += \"(The SOFIA project sincerely hopes you enjoy \"\n txtSumm += \"your observing breaks due to XFORMS crashes)\"\n txtSumm += \"\\n\"\n txtSumm += \"ObsPlan: %s, ObsBlk: %s\" % (self.obsplan, self.obsblk)\n txtSumm += \"\\n\\n\"\n txtSumm += \"Elevation Range: %.1f, %.1f\" % (self.range_elev[0],\n self.range_elev[1])\n txtSumm += \"\\n\\n\"\n txtSumm += \"ROF Range: %.1f, %.1f\" % (self.range_rof[0],\n self.range_rof[1])\n txtSumm += \"\\n\"\n txtSumm += \"ROF Rate Range: %.1f, %.1f %s\" % (self.range_rofrt[0],\n self.range_rofrt[1],\n self.range_rofrtu)\n txtSumm += \"\\n\\n\"\n txtSumm += \"True Heading Range: %.1f, %.1f\" % (self.range_thdg[0],\n self.range_thdg[1])\n txtSumm += \"\\n\"\n txtSumm += \"True Heading Rate Range: %.1f, %.1f %s\" %\\\n (self.range_thdgrt[0],\n self.range_thdgrt[1],\n self.range_thdgrtu)\n txtSumm += \"\\n\"\n txtSumm += \"Moon Angle: %.1f, Moon Illumination: %s\" %\\\n (self.moonangle, self.moonillum)\n\n return txtSumm", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def choose_pizza(pizzas):\n possible_pizzas = []\n for i in pizzas.keys():\n suitable = True\n for element in pizzas[i]:\n if \"tuna\" in element:\n suitable = False\n elif \"garlic sauce\" in element:\n suitable = False\n elif \"anchovy\" in element:\n suitable = False\n elif \"mussels\" in element:\n suitable = False\n elif \"shrimp\" in element:\n suitable = False\n if suitable:\n possible_pizzas.append(i)\n\n pizza_choice = random.choice(possible_pizzas)\n\n return pizza_choice", "def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict", "def summary(self) -> str:\n pass", "def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())", "def summary(aggregate):\n (airport, (count, total, square, minimum, maximum)) = aggregate\n\n try:\n mean = total / float(count)\n stddev = math.sqrt((square-(total**2)/count)/count-1)\n\n return (airport, (count, mean, stddev, minimum, maximum))\n except Exception:\n return (airport, (count, None, None, minimum, maximum))", "def describe_restaurant(self):\n\t\tdetails = f\"{self.restaurant_name} is a {self.cuisine_type} restaurant.\"\n\t\tprint(f\"\\n{details}\")", "def restaurant():\n\n # Initialize variables.\n menu = {'egusi': 150, \n 'akpu': 150, \n 'onugbu': 200, \n 'okro': 150, \n 'garri': 150, \n 'nsala': 300, \n 'rice': 150, \n 'stew': 150, \n 'isiewu': 1000\n }\n total = 0.0\n\n print()\n # Request input from user. Exit program if blank line is entered.\n while True:\n order = input(\"Order: \").strip().lower()\n if not order:\n break\n \n # Check if customer order is available in the menu. Increment total\n # if order is available and display appropriate message.\n if order in menu:\n total += menu[order]\n print(f'{order} cost {menu[order]}, total is {total}')\n else:\n print(f'Sorry, we are fresh out of {order} today.')\n\n # print(f'Your total is {total}')\n\n return total", "def fat(foods, foods_used):\n fat = 0.0\n for i, count in foods_used.items():\n fat += (foods[i]['fat'] * count)\n return fat", "def __str__(self):\n return self.summarize()", "def __str__(self):\n return self.summarize()", "def summary(self, fromdt, todt):\r\n totalSaved = self.miser.totalSaved(fromdt, todt) \r\n sumStr = \"%s: %s to %s\\n\" % (self.miser.name, fromdt, todt)\r\n sumStr += \"Total saved: %.2f\" % totalSaved\r\n\r\n sumStr += \"\\n\\nGoals:\\n\"\r\n sumStr += self._goalsMetStr(fromdt, todt, totalSaved)\r\n\r\n return sumStr", "def show_results(bill, tip, pct):\n \n total = tip + bill\n\n print(\"Bill amount: $\" + str(bill))\n print(\"Tip percentage: \" + str(pct) + \"%\")\n print(\"Tip amount due: $\" + str(tip))\n print(\"Total with tip: $\" + str(total))\n\n print(\"\"\"\n-----------------------------------\n GOOD BYE \n-----------------------------------\n\"\"\")", "def summary(self, printed=True):\n raise NotImplementedError", "def protein(foods, foods_used):\n protein = 0.0\n for i, count in foods_used.items():\n protein += (foods[i]['protein'] * count)\n return protein", "def consolidate_ingredients(breakfasts, lunches, dinners):\n total_ingredients = {}\n meals = [breakfasts, lunches, dinners]\n\n for meal in meals:\n for collection in meal:\n ingredients = fetch_ingredients(collection)\n for lst in ingredients:\n if lst[0] in total_ingredients:\n total_ingredients[lst[0]][0] += lst[1]\n total_ingredients[lst[0]][1].add(lst[2])\n else:\n total_ingredients[lst[0]] = [lst[1], set([lst[2]])]\n\n return total_ingredients", "def test_extra_chocolates_single(self):\n _inpc = ChocolateFeast(12,4,4)\n self.assertEquals(3,_inpc.get_total_chocolates())", "def main():\n arrs = Arrivals(27, 37, 1)\n s = 0.0\n for i in range(50):\n n = arrs.get_arrivals()\n s += n\n print 'Arrival {}: {}'.format(i, n)\n print 'Average arrivals: {}'.format(s / 50.0)", "def calories() -> None:\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")\n while new_item != \"q\":\n insert_calorie_value(new_item)\n total_calories = 0\n total_calories = adding_total_calories(total_calories)\n food_item_names = []\n appending_food_item_names(food_item_names)\n printing_food_and_calories(food_item_names, total_calories)\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def get_one_meal():", "def affiche_favoris():\r\n # Liste des favoris utilisés pour la fonction \"select_favorite\"\r\n favorite_dict = {}\r\n # pour les produits dans Count\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"SELECT F1.name as Product, F2.name as Substitute \\\r\n FROM Backup \\\r\n INNER JOIN Food F1 ON Backup.produit_id = F1.id \r\n INNER JOIN Food F2 ON Backup.substitut_id = F2.id\"\"\")\r\n favorite = cursor.fetchall()\r\n index = 1\r\n for i in favorite:\r\n favorite_tuple = (i[0], i[1])\r\n print(\"\\n {}. {}, Peut être remplacé par {}.\".format(index, \\\r\n favorite_tuple[0], favorite_tuple[1]))\r\n favorite_dict[index] = favorite_tuple\r\n index += 1\r\n\r\n if not favorite_dict:\r\n print (\"La liste des favoris est vide.\")\r\n else:\r\n print('Choisissez un chiffre pour plus de détail.')\r\n select_favorite(favorite_dict)", "def venmo_calc(my_dic, total, description, tax=0, tip=0, misc_fees=0, clean=False):\n precheck_sum = round(sum(my_dic.values())+tax+tip+misc_fees,2)\n total = round(total,2) # otherwise get weird 23.00000005 raw totals\n if total != precheck_sum:\n return st.write(f\"You provided {total} as the total, but I calculated {precheck_sum}\")\n else:\n num_ppl = len(my_dic.keys())\n tax_perc = tax/(total-tip-misc_fees-tax)\n tip_perc = tip/(total-tip-misc_fees-tax)\n fee_part = round(misc_fees/num_ppl,2)\n request = {}\n rounded_sum = 0\n for key in my_dic.keys(): \n my_total = my_dic[key]\n\n tax_part = tax_perc * my_total\n tip_part = tip_perc * my_total\n\n person_total = my_total + tax_part + fee_part + tip_part\n rounded_sum += person_total\n request[key] = person_total\n ### Explain the calculation for transparency ###\n with st.beta_expander(label='What just happened?'):\n st.write(f\"\"\"\n 1. Tax% ($p_x$) was calculated using tax/(food_total): __{round(tax_perc*100,2)}%__\n 2. Tip% ($p_p$) was calculated using tip/(food_total): __{round(tip_perc*100,2)}%__\n 3. Fees were distributed equally: __${fee_part}__ per person\n 4. Each person's sum was calculated using: $m_t=d_s + (d_s * p_x) + (d_s*p_p) + d_f$\n * $m_t$ = total money to request\n * $d_s$ = dollars spent on food\n * $p_x$ = percent tax\n * $p_p$ = percent tip\n * $d_f$ = dollars spent on fee\n \"\"\")\n rounded_sum = round(rounded_sum,2)\n ### Error catcher ###\n if (rounded_sum > total+0.1):\n return st.write(f\"Uh oh! My calculated venmo charge sum is ${rounded_sum} but the receipt total was ${round(total,2)}\")\n\n ### Round the calculated request amounts ###\n request_money = {}\n for key in request.keys():\n request_money[key] = [round(request[key],2)]\n from apps import manual_mode as mm\n # get dictionary of name:message\n messages = venmo_message_maker(description,request_money,my_dic,tip_perc,tax_perc,fee_part,tip,tax,misc_fees, clean_message=clean)\n \n data = {\"request_money\":request_money,\n \"messages\":messages} \n return data", "def __str__(self):\n return f'{self._name} has {self._calories} calories, {self._carbohydrates}' +\\\n f'g. carbohydrates, {self._fat}g. of fat and {self._proteins}g. of proteins'", "def test_extra_chocolates_multiple(self):\n _inpc = ChocolateFeast(6,2,2)\n self.assertEquals(5,_inpc.get_total_chocolates())", "def add_food_to_bag(self):\n self.food_eaten.set(sum([species.food.get() for species in self.ecosystem]))", "def func(self):\n from commands.base_commands.guest import census_of_fealty\n\n fealties = census_of_fealty()\n table = PrettyTable([\"{wFealty{n\", \"{w#{n\"])\n for fealty in fealties:\n table.add_row([fealty, fealties[fealty]])\n self.msg(table)", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total" ]
[ "0.600868", "0.6002266", "0.593303", "0.59114504", "0.5896875", "0.5879344", "0.58790183", "0.5830151", "0.58242345", "0.58242345", "0.58150595", "0.5803258", "0.57938284", "0.57802445", "0.57720184", "0.57685405", "0.57685405", "0.5758228", "0.5754923", "0.57318443", "0.57145965", "0.57122093", "0.5611701", "0.55763674", "0.5571644", "0.5571415", "0.5496812", "0.545135", "0.544935", "0.54354566", "0.54299146", "0.54263943", "0.5393129", "0.53838027", "0.5374992", "0.53608793", "0.53242666", "0.5315155", "0.5264564", "0.5261272", "0.5245013", "0.5243312", "0.524294", "0.5223371", "0.5222609", "0.52059567", "0.5188855", "0.5188592", "0.51874685", "0.51849425", "0.5179223", "0.5151264", "0.51486826", "0.5147458", "0.5136431", "0.51292694", "0.51055026", "0.5100168", "0.5083701", "0.5082808", "0.50803643", "0.5079786", "0.507928", "0.5069375", "0.5067589", "0.5063179", "0.5056102", "0.50455594", "0.50433934", "0.503642", "0.5035859", "0.50356746", "0.5033242", "0.50318104", "0.50214547", "0.5016467", "0.5011643", "0.50097036", "0.4991131", "0.49891227", "0.49891227", "0.498749", "0.49850294", "0.49827355", "0.49805564", "0.49696666", "0.49679977", "0.49652043", "0.49600908", "0.4959893", "0.49550572", "0.49517143", "0.4948632", "0.493878", "0.4927642", "0.49192318", "0.4917958", "0.4914932", "0.4914594", "0.49143893" ]
0.59282035
3
Summarize the pizza we are about to make.
def make_pizza(size, *toppings): print(f"\nMaking {size}-inch pizza with the following toppings:") for topping in toppings: print(f"- {topping}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_summarize_recipe(self):\n pass", "def summarize_food_data(unprocessed_food_list: List[str]) -> List[Dict[str, str]]:\n summary: List[Dict[str, str]] = []\n item_count_data: Dict[str, int] = {}\n\n for item in unprocessed_food_list:\n if item not in item_count_data:\n item_count_data[item] = 1\n else:\n item_count_data[item] += 1\n \n for product in item_count_data:\n item_information: Dict[str, str] = {}\n item_information[\"name\"] = product\n item_information[\"quantity\"] = str(item_count_data[product])\n item_information[\"units\"] = \"-\"\n summary.append(item_information)\n \n return summary", "def make_pizza(*toppings):\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(size, *toppings):\n print(f\"\\nMaking a {size}-inch pizza with the following toppings:\")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(\n size, *toppings):\n print(\"\\n Making a {size}-inch pizza with the following toppings: \")\n for topping in toppings:\n print(f\"- {topping}\")", "def make_pizza(topping='bacon'):\r\n print(\"Have a \" + topping + \" pizza!\")", "def make_pizza(*toppings):\n print(\"\\nMaking a pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)", "def make_pizza(size,*toppings):\r\n print(\"\\nMaking a \"+str(size)+\r\n \"-inch pizza with the following toppings:\")\r\n for topping in toppings:\r\n print(\"- \"+topping)\r\n print('----')", "def make_pizza(topping='bacon'):\n print(\"Have a \" + topping + \" pizza!\")", "def make_pizza(topping='bacon'):\n print(\"Have a \" + topping + \" pizza!\")", "def make_pizza(size, *toppings):\n\tprint(\"\\nMaking a \" + str(size) +\n\t\"-inch pizza with the following toppings:\")\n\tfor topping in toppings:\n\t\tprint(\"- \" + topping)", "def describeRestaurant(self):\n print (f\"{self.name} has the best {self.cuisineType}\")", "def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)", "def make_pizza(size,*args):\n print(f\"\\nMaking a {size}-inch pizza with the following toppings:\")\n for arg in args:\n print(f\"+ {arg}\")", "def inventory_report(self):\n mean_price = sum(Product.price for Product in sample) / len(sample)\n mean_weight = sum(Product.weight for Product in sample) / len(sample)\n mean_flam = sum(Product.flammability for Product in sample) / len(sample)\n return 'Unique Product Names: ', sample.unique, '/n Average Price: ', mean_price, \n '/n Average Weight: ', mean_weight, '/n Average Flammability: ', mean_flam", "def make_pizza(*toppings):\n print(toppings)", "def make_pizza(*toppings):\n print(toppings)", "def main():\n splitted_file = convert_input_to_list()\n encyclopedia_of_pizza = parse_pizza_info(splitted_file)\n pizza_winner = choose_pizza(encyclopedia_of_pizza)\n print_winner(pizza_winner)", "def make_pizza(size, *toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n for topping in toppings:\n print(\"- \" + topping)", "def create_pizza(pizza_type):\n pass", "def make_pizza(size, *toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following toppings:\")\n for topping in toppings:\n if size==12:\n print(\"The price is 10 with: \" + topping)\n elif size==16:\n print(\"The price is 20 with: \" + topping)", "def make_pizza(size,*toppings):\n print(\"\\nMaking a \" + str(size) + \"-inch pizza with the following topppings:\")\n for topping in toppings:\n print(\"-\" + topping)", "def make(size, *toppings):\r\n print(\"The size of the pizza is \"+ str(size))\r\n for topping in toppings:\r\n print('- '+ topping)", "def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary", "def computeMealFrecuency(self):\n self.getOrdersData()\n self.getOrderValues()\n meals = set(self.meals)\n for meal in meals:\n self.labels.append(meal)\n self.quantity.append(self.meals.count(meal))", "def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total", "def summarise(self):\n self.summary = az.summary(self.trace, var_names=[\"~chol\"], round_to=2)\n print(self.summary)\n return self.summary", "async def pizza(ctx):\r\n author = ctx.message.author\r\n await ctx.send(author.mention + \" has eaten \" + str(randint(2, 120)) + \" slices of pizza today.\")\r\n ctx.counter(n)", "def printSummary(self):\n pass", "def inventory_report(prod_list):\n prod_list = list(set(prod_list))\n x = 0\n price = 0\n weight = 0\n flammability = 0\n stealability = 0\n for item in prod_list:\n x += 1\n price += item.price\n weight += item.weight\n flammability += item.flammability\n if stealability != 'Not so stealable...':\n stealability += 1\n\n avg_price = price / x\n avg_weight = weight / x\n avg_flammability = flammability / x\n print(f'There are {x} unique products in this list. The average price is {avg_price}, '\n f'average weight is {avg_weight},'\n f'and the average flammability is {avg_flammability}.')\n if stealability >= len(prod_list) / 2:\n print('Many of these items are highly stealable!')\n return avg_price, avg_weight, avg_flammability", "def total(proportions):\n final = {}\n for i in proportions:\n if i in running_total:\n final[i] = proportions[i] * running_total[i]\n print(final)\n else:\n final[i] = 0\n print(final)\n\n total_sum = sum(final.values())\n return total_sum", "def print_winner(chosen_pizza):\n print(\"Tonight's pizza is {0}\".format(chosen_pizza))", "def print_food(self):\n for dish in self.food:\n print(dish.get_name())", "def inventory_report(products):\n unique_names = []\n total_price = 0\n total_weight = 0\n total_flammability = 0\n num_products = len(products)\n for i in range(num_products):\n if products[i].name not in unique_names:\n unique_names.append(products[i].name) \n total_price += products[i].price\n total_weight += products[i].weight\n total_flammability += products[i].flammability\n mean_price = total_price / num_products\n mean_weight = total_weight / num_products\n mean_flammability = total_flammability / num_products\n print('ACME CORPORATION OFFICIAL INVENTORY REPORT')\n print(f'Unique product names: {len(unique_names)}')\n print(f'Average price: {mean_price}')\n print(f'Average weight {mean_weight}')\n print(f'Average flammabilitiy {mean_flammability}')\n return unique_names, mean_price, mean_weight, mean_flammability", "def totals(lines, sport):\n value = []\n for game in lines:\n combos = it.product(game['unders'].items(), game['overs'].items())\n value.extend([f'{sport} {game[\"game\"]} {combo[0][0]}: {combo[0][1][0]} {combo[0][1][1]} and '\n f'{combo[1][0]}: {combo[1][1][0]} {combo[1][1][1]}\\n\\n' for combo in combos\n if combo[0][1][0] - combo[1][1][0] >= 0 and combo[0][1][1] + combo[1][1][1] >= 0])\n\n return value", "def describe_restaurant(self):\n msg = f\"{self.name} serves wonderful {self.cuisine_type}.\"\n print(f\"\\n{msg}\")", "def PrintSummary(self, dollarsPerKiloWattHour = 0.1149, dollarsPerDTH = 6.53535):\n\t\tprint()\n\t\tprint(\" RESULTS \")\n\t\tprint()\n\t\tprint(\"The Number of times the furnace turns on: \" + str(self.building_hvac.NumberOfTimesHeatingTurnedOn))\n\t\tprint(\"The Number of times the AC turns on: \" + str(self.building_hvac.NumberOfTimesCoolingTurnedOn))\n\t\tprint(\"The Current Temperature: \" + str(self.current_temperature) + \"C\")\n\t\tprint(\"The total Electrical power used: \" + str(self.building_hvac.GetElectricKilowattHours()) + \"KWH\")\n\t\tprint(\"The total Time: \" + str(self.building_hvac.TotalTimeInSeconds))\n\t\tprint(\"The total Time Heating was on: \" + str(self.building_hvac.TotalDurationHeatingOn))\n\t\tprint(\"The total Time Cooling was on: \" + str(self.building_hvac.TotalDurationCoolingOn))\n\t\tprint(\"The Total Gas Energy Used: \" + str(self.building_hvac.GetGasDTH()) + \" DTH\")\n\t\tprint(\"Electrical Cost: $\" + str(self.CalculateElectricEneregyCost()))\n\t\tprint(\"Gas Cost: $\" + str(self.CalculateGasEneregyCost()))", "def __puntuacion_total(self):\n disparos = []\n for disparo in self.__disparos:\n total = 0\n for puntaje in disparo['disparos']:\n total += puntaje\n disparo['puntaje_total'] = total\n disparos.append(disparo)\n return disparos", "def describe_restaurant(self):\n print(\"The Restaurant is called {} and offers {} cuisine.\".format(self.restaurant_name, self.cuisine_type))\n print(\"It has served {} clients.\".format(self.number_served))", "def total_points(self):\n total_points = 0.0\n for ingredient in self.ingredients:\n if (ingredient.has_property('ppg')):\n # Use given value if specified\n total_points += ingredient.property('ppg').to('ppg') * ingredient.quantity.to('lb')\n else:\n total_points += EXTRACTS[ingredient.type] * ingredient.quantity.to('lb')\n return(Quantity(total_points, 'points'))", "def ExamineAllEvents(self, do_print):\n total = 0.0\n for purno in self.data:\n event = self.data[purno]\n randomcountry = event.keys()[0]\n randomrow = event[randomcountry]\n total += self.GetTotal(randomrow)\n if do_print:\n print purno, randomrow[0], randomrow[2], randomrow[6]\n for country in event:\n print \" %s: %.2f%%\" % (\n country, self.GetCountryPercentage(event[country], country) * 100)\n return total", "def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr", "def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")", "def total_number_of_animals(self):\n animals = self.animal()\n print 'Total number of animals on island: {:4}'.format(\n animals[\"Herbivores\"] + animals[\"Carnivores\"])", "def dp_all(foods, cal_goal, pro_goal, carb_goal, fat_goal):\n costs = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n 999999999)\n foods_used = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for k in range(carb_goal):\n for l in range(fat_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i\n or int(food['protein']) > j\n or int(food['carbs']) > k\n or int(food['fat']) > l):\n continue\n if (costs[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n [j - int(food['carbs'])]\n [j - int(food['fat'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]).copy()\n new_cal = calories(\n foods, prev_foods_used) + food['calories']\n new_pro = protein(\n foods, prev_foods_used) + food['protein']\n new_car = carbs(\n foods, prev_foods_used) + food['protein']\n new_fat = fat(\n foods, prev_foods_used) + food['protein']\n if (costs[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 20 and new_cal < i + 10\n and new_pro < j + 5 and new_pro < j + 5\n and new_car < j + 5 and new_car < j + 5\n and new_fat < j + 5 and new_fat < j + 5):\n costs[i][j][k][l] = prev_cost + \\\n food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j][k][l] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1][carb_goal - 1][fat_goal - 1]", "def show_summary(self) -> None:\n all_averages = []\n\n for i in self.album_statistics.values():\n try:\n all_averages.append(i['avg'])\n except (TypeError, ValueError):\n pass\n # print(all_averages)\n try:\n final_average = math.ceil(np.mean(all_averages))\n except ValueError:\n click.echo(\n 'Oops! https://lyrics.ovh couldn\\'t find any lyrics across any'\n ' album. This is caused by inconsistent Artist names from'\n ' Musicbrainz and lyrics.ovh. Try another artist.'\n )\n raise (SystemExit)\n output = BeautifulTable(max_width=200)\n output.set_style(BeautifulTable.STYLE_BOX_ROUNDED)\n output.column_headers = [\n 'Average number of words in tracks across all albums\\n'\n f'for {self.artist}'\n ]\n output.append_row([final_average])\n click.echo(output)\n\n return self", "def summary(self):\n raise NotImplementedError", "def carnivore_eats(self):\n self.order_by_fitness()\n for carn in self.fauna_list['Carnivore']:\n food_required = carn.parameters['F']\n amount_to_eat = 0\n not_eaten_animals = []\n for i, herb in enumerate(self.fauna_list['Herbivore']):\n if food_required <= amount_to_eat:\n not_eaten_animals.extend(self.fauna_list['Herbivore'][i:])\n break\n elif np.random.random() < carn.probability_of_kill(herb):\n if food_required - amount_to_eat < herb.weight:\n amount_to_eat += herb.weight\n elif food_required - amount_to_eat > herb.weight:\n amount_to_eat += food_required - amount_to_eat\n else:\n not_eaten_animals.append(herb)\n carn.animal_eats(amount_to_eat)\n self.fauna_list['Herbivore'] = not_eaten_animals", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def _get_book_summary(self):\n self._get_book_prices()\n for price in self.ask_prices:\n volume = 0\n for k in self.ask_snapshot.keys():\n if self.ask_snapshot[k].price == price:\n volume += self.ask_snapshot[k].volume\n self.ask_volumes.append(volume)\n for price in self.bid_prices:\n volume = 0\n for k in self.bid_snapshot.keys():\n if self.bid_snapshot[k].price == price:\n volume += self.bid_snapshot[k].volume\n self.bid_volumes.append(volume)", "def summarize(self, data):\n\n return self.summary(data).flatten()", "def get_single_shopping_list(recipe):\n total_ingredients = []\n\n for key, value in recipe.ingredient_amounts.items():\n if key in recipe.ingredient_units:\n unit = recipe.ingredient_units[key]\n total_ingredients.append([key, round((float(value)/gram_conversions[unit]), 2), unit])\n else:\n total_ingredients.append([key, value, None])\n\n return total_ingredients", "def describe_restaurant(self):\r\n print(\"\\n==========This is our restaurant \" + self.restaurant.title() + \"===============\")\r\n print(\"We serve you amazing \" + self.cuisine + \" 's cusine\")", "def inventory_report(products):\n name_list = set()\n price_list = []\n wt_list = []\n flamablity_list = []\n\n for p in products:\n name_list.add(p.name)\n price_list.append(p.price)\n wt_list.append(p.weight)\n flamablity_list.append(p.flammability)\n# Calculating average for report\n unique_names = len(name_list)\n avg_price = sum(price_list)/len(price_list)\n avg_weight = sum(wt_list)/len(wt_list)\n avg_flammability = sum(flamablity_list)/len(flamablity_list)\n# Printing\n print(\"$ python acme_report.py \")\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\n print(\"Unique product names:\", unique_names)\n print(\"Average price:\", round(avg_price, 2))\n print(\"Average weight:\", avg_weight)\n print(\"Average flammability:\", avg_flammability)", "def food_eaten(self):\r\n # get values from GUI\r\n\r\n foodList = \"\"\r\n foodCost=0\r\n if self.is_eggs.get():\r\n foodList += \"eggs $2.00\\n\"\r\n foodCost+=2\r\n if self.is_bacon.get():\r\n foodList += \"bacon $4.00\\n\"\r\n foodCost += 4\r\n if self.is_sausage.get():\r\n foodList += \"sausage $4.00\\n\"\r\n foodCost += 4\r\n if self.is_oj.get():\r\n foodList += \"OrangeJuice $3.00\\n\"\r\n foodCost += 3\r\n foodCost = ('%.2f' % foodCost)\r\n\r\n # Create the output to screen of foodlist\r\n story = (\"\\nThank you for joining us here at Order Up!\\n\\nThe foods that you ate are as follows:\\n\\n\\n\"+foodList+\"\\nThe total amount owed is: $\"+foodCost)\r\n # display the summary\r\n self.story_txt.delete(0.0, END)\r\n self.story_txt.insert(0.0, story)", "def generate_fish_report(self):\n if len(self.fish) == 0:\n print('No fish in here, come back later')\n\n for species, count in self.fish.items():\n print(f'{species}: {count}')", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def test_visualize_recipe_taste(self):\n pass", "def calculate_tip(meal_base, tip_rate):", "def inventory_report(products):\r\n names = set()\r\n total_price = 0\r\n total_weight = 0\r\n total_flammability = 0\r\n for product in products:\r\n names.add(product.name)\r\n total_price += product.price\r\n total_weight += product.weight\r\n total_flammability += product.flammability\r\n\r\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\r\n print(\"Unique product names: {}\".format(len(names)))\r\n print(\"Average price: {}\".format(total_price / len(products)))\r\n print(\"Average weight: {}\".format(total_weight / len(products)))\r\n print(\"Average flammability:{}\".format(\r\n total_flammability / len(products)))\r\n\r\n print(\"Following is useful starting code for acme_report.py:\")", "def report():\n print(\"Donor Name | Total Given | Num Gifts | Average Gift\")\n print(\"------------------------------------------------------------------\")\n for key, val in data.items():\n print(f\"{key:25} $ {float(sum(val)):>12.2f} {len(val):>8} $ {float(sum(val))/len(val):>11.2f}\")", "def toppings(request, pizza_id):\r\n pizza = Pizza.objects.get(id=pizza_id)\r\n toppings = pizza.topping_set.order_by('name')\r\n context = {'pizza': pizza, 'toppings': toppings}\r\n return render(request, 'pizzas/toppings.html', context)", "def generate_animal_report(self):\n print('ANIMALS IN ' + self.name)\n for species, count in self.animals.items():\n print(f'{species}: {count}')", "def show_prop(self):\n print(self.population_size)\n print(self.max_generation)\n print(self.mutate_rate)\n print(self.elite_rate)\n print(self.cross_rate)\n print(self.cross_type)\n print(self.verify_num)\n print(self.proof)", "def summary(app):\n click.echo(get_summary(app))", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats", "def calories(foods, foods_used):\n calories = 0.0\n for i, count in foods_used.items():\n calories += (foods[i]['calories'] * count)\n return calories", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def summarize(self):\n txtSumm = ''\n\n if self.legtype == 'Takeoff':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Landing':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Other':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Observing':\n txtSumm = \"%02d -- %s, RA: %s, Dec: %s, LegDur: %s, ObsDur: %s\" %\\\n (self.legno, self.target, self.ra, self.dec,\n str(self.duration),\n str(self.obsdur))\n txtSumm += \"\\n\"\n if self.nonsid is True:\n txtSumm += \"NONSIDERIAL TARGET -- NAIFID: %d\" % (self.naifid)\n txtSumm += \"\\n\"\n txtSumm += \"(The SOFIA project sincerely hopes you enjoy \"\n txtSumm += \"your observing breaks due to XFORMS crashes)\"\n txtSumm += \"\\n\"\n txtSumm += \"ObsPlan: %s, ObsBlk: %s\" % (self.obsplan, self.obsblk)\n txtSumm += \"\\n\\n\"\n txtSumm += \"Elevation Range: %.1f, %.1f\" % (self.range_elev[0],\n self.range_elev[1])\n txtSumm += \"\\n\\n\"\n txtSumm += \"ROF Range: %.1f, %.1f\" % (self.range_rof[0],\n self.range_rof[1])\n txtSumm += \"\\n\"\n txtSumm += \"ROF Rate Range: %.1f, %.1f %s\" % (self.range_rofrt[0],\n self.range_rofrt[1],\n self.range_rofrtu)\n txtSumm += \"\\n\\n\"\n txtSumm += \"True Heading Range: %.1f, %.1f\" % (self.range_thdg[0],\n self.range_thdg[1])\n txtSumm += \"\\n\"\n txtSumm += \"True Heading Rate Range: %.1f, %.1f %s\" %\\\n (self.range_thdgrt[0],\n self.range_thdgrt[1],\n self.range_thdgrtu)\n txtSumm += \"\\n\"\n txtSumm += \"Moon Angle: %.1f, Moon Illumination: %s\" %\\\n (self.moonangle, self.moonillum)\n\n return txtSumm", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def choose_pizza(pizzas):\n possible_pizzas = []\n for i in pizzas.keys():\n suitable = True\n for element in pizzas[i]:\n if \"tuna\" in element:\n suitable = False\n elif \"garlic sauce\" in element:\n suitable = False\n elif \"anchovy\" in element:\n suitable = False\n elif \"mussels\" in element:\n suitable = False\n elif \"shrimp\" in element:\n suitable = False\n if suitable:\n possible_pizzas.append(i)\n\n pizza_choice = random.choice(possible_pizzas)\n\n return pizza_choice", "def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict", "def summary(self) -> str:\n pass", "def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())", "def summary(aggregate):\n (airport, (count, total, square, minimum, maximum)) = aggregate\n\n try:\n mean = total / float(count)\n stddev = math.sqrt((square-(total**2)/count)/count-1)\n\n return (airport, (count, mean, stddev, minimum, maximum))\n except Exception:\n return (airport, (count, None, None, minimum, maximum))", "def describe_restaurant(self):\n\t\tdetails = f\"{self.restaurant_name} is a {self.cuisine_type} restaurant.\"\n\t\tprint(f\"\\n{details}\")", "def restaurant():\n\n # Initialize variables.\n menu = {'egusi': 150, \n 'akpu': 150, \n 'onugbu': 200, \n 'okro': 150, \n 'garri': 150, \n 'nsala': 300, \n 'rice': 150, \n 'stew': 150, \n 'isiewu': 1000\n }\n total = 0.0\n\n print()\n # Request input from user. Exit program if blank line is entered.\n while True:\n order = input(\"Order: \").strip().lower()\n if not order:\n break\n \n # Check if customer order is available in the menu. Increment total\n # if order is available and display appropriate message.\n if order in menu:\n total += menu[order]\n print(f'{order} cost {menu[order]}, total is {total}')\n else:\n print(f'Sorry, we are fresh out of {order} today.')\n\n # print(f'Your total is {total}')\n\n return total", "def fat(foods, foods_used):\n fat = 0.0\n for i, count in foods_used.items():\n fat += (foods[i]['fat'] * count)\n return fat", "def __str__(self):\n return self.summarize()", "def __str__(self):\n return self.summarize()", "def summary(self, fromdt, todt):\r\n totalSaved = self.miser.totalSaved(fromdt, todt) \r\n sumStr = \"%s: %s to %s\\n\" % (self.miser.name, fromdt, todt)\r\n sumStr += \"Total saved: %.2f\" % totalSaved\r\n\r\n sumStr += \"\\n\\nGoals:\\n\"\r\n sumStr += self._goalsMetStr(fromdt, todt, totalSaved)\r\n\r\n return sumStr", "def show_results(bill, tip, pct):\n \n total = tip + bill\n\n print(\"Bill amount: $\" + str(bill))\n print(\"Tip percentage: \" + str(pct) + \"%\")\n print(\"Tip amount due: $\" + str(tip))\n print(\"Total with tip: $\" + str(total))\n\n print(\"\"\"\n-----------------------------------\n GOOD BYE \n-----------------------------------\n\"\"\")", "def summary(self, printed=True):\n raise NotImplementedError", "def protein(foods, foods_used):\n protein = 0.0\n for i, count in foods_used.items():\n protein += (foods[i]['protein'] * count)\n return protein", "def consolidate_ingredients(breakfasts, lunches, dinners):\n total_ingredients = {}\n meals = [breakfasts, lunches, dinners]\n\n for meal in meals:\n for collection in meal:\n ingredients = fetch_ingredients(collection)\n for lst in ingredients:\n if lst[0] in total_ingredients:\n total_ingredients[lst[0]][0] += lst[1]\n total_ingredients[lst[0]][1].add(lst[2])\n else:\n total_ingredients[lst[0]] = [lst[1], set([lst[2]])]\n\n return total_ingredients", "def test_extra_chocolates_single(self):\n _inpc = ChocolateFeast(12,4,4)\n self.assertEquals(3,_inpc.get_total_chocolates())", "def main():\n arrs = Arrivals(27, 37, 1)\n s = 0.0\n for i in range(50):\n n = arrs.get_arrivals()\n s += n\n print 'Arrival {}: {}'.format(i, n)\n print 'Average arrivals: {}'.format(s / 50.0)", "def calories() -> None:\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")\n while new_item != \"q\":\n insert_calorie_value(new_item)\n total_calories = 0\n total_calories = adding_total_calories(total_calories)\n food_item_names = []\n appending_food_item_names(food_item_names)\n printing_food_and_calories(food_item_names, total_calories)\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def get_one_meal():", "def affiche_favoris():\r\n # Liste des favoris utilisés pour la fonction \"select_favorite\"\r\n favorite_dict = {}\r\n # pour les produits dans Count\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"SELECT F1.name as Product, F2.name as Substitute \\\r\n FROM Backup \\\r\n INNER JOIN Food F1 ON Backup.produit_id = F1.id \r\n INNER JOIN Food F2 ON Backup.substitut_id = F2.id\"\"\")\r\n favorite = cursor.fetchall()\r\n index = 1\r\n for i in favorite:\r\n favorite_tuple = (i[0], i[1])\r\n print(\"\\n {}. {}, Peut être remplacé par {}.\".format(index, \\\r\n favorite_tuple[0], favorite_tuple[1]))\r\n favorite_dict[index] = favorite_tuple\r\n index += 1\r\n\r\n if not favorite_dict:\r\n print (\"La liste des favoris est vide.\")\r\n else:\r\n print('Choisissez un chiffre pour plus de détail.')\r\n select_favorite(favorite_dict)", "def venmo_calc(my_dic, total, description, tax=0, tip=0, misc_fees=0, clean=False):\n precheck_sum = round(sum(my_dic.values())+tax+tip+misc_fees,2)\n total = round(total,2) # otherwise get weird 23.00000005 raw totals\n if total != precheck_sum:\n return st.write(f\"You provided {total} as the total, but I calculated {precheck_sum}\")\n else:\n num_ppl = len(my_dic.keys())\n tax_perc = tax/(total-tip-misc_fees-tax)\n tip_perc = tip/(total-tip-misc_fees-tax)\n fee_part = round(misc_fees/num_ppl,2)\n request = {}\n rounded_sum = 0\n for key in my_dic.keys(): \n my_total = my_dic[key]\n\n tax_part = tax_perc * my_total\n tip_part = tip_perc * my_total\n\n person_total = my_total + tax_part + fee_part + tip_part\n rounded_sum += person_total\n request[key] = person_total\n ### Explain the calculation for transparency ###\n with st.beta_expander(label='What just happened?'):\n st.write(f\"\"\"\n 1. Tax% ($p_x$) was calculated using tax/(food_total): __{round(tax_perc*100,2)}%__\n 2. Tip% ($p_p$) was calculated using tip/(food_total): __{round(tip_perc*100,2)}%__\n 3. Fees were distributed equally: __${fee_part}__ per person\n 4. Each person's sum was calculated using: $m_t=d_s + (d_s * p_x) + (d_s*p_p) + d_f$\n * $m_t$ = total money to request\n * $d_s$ = dollars spent on food\n * $p_x$ = percent tax\n * $p_p$ = percent tip\n * $d_f$ = dollars spent on fee\n \"\"\")\n rounded_sum = round(rounded_sum,2)\n ### Error catcher ###\n if (rounded_sum > total+0.1):\n return st.write(f\"Uh oh! My calculated venmo charge sum is ${rounded_sum} but the receipt total was ${round(total,2)}\")\n\n ### Round the calculated request amounts ###\n request_money = {}\n for key in request.keys():\n request_money[key] = [round(request[key],2)]\n from apps import manual_mode as mm\n # get dictionary of name:message\n messages = venmo_message_maker(description,request_money,my_dic,tip_perc,tax_perc,fee_part,tip,tax,misc_fees, clean_message=clean)\n \n data = {\"request_money\":request_money,\n \"messages\":messages} \n return data", "def __str__(self):\n return f'{self._name} has {self._calories} calories, {self._carbohydrates}' +\\\n f'g. carbohydrates, {self._fat}g. of fat and {self._proteins}g. of proteins'", "def test_extra_chocolates_multiple(self):\n _inpc = ChocolateFeast(6,2,2)\n self.assertEquals(5,_inpc.get_total_chocolates())", "def add_food_to_bag(self):\n self.food_eaten.set(sum([species.food.get() for species in self.ecosystem]))", "def func(self):\n from commands.base_commands.guest import census_of_fealty\n\n fealties = census_of_fealty()\n table = PrettyTable([\"{wFealty{n\", \"{w#{n\"])\n for fealty in fealties:\n table.add_row([fealty, fealties[fealty]])\n self.msg(table)", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total" ]
[ "0.600868", "0.6002266", "0.59282035", "0.59114504", "0.5896875", "0.5879344", "0.58790183", "0.5830151", "0.58242345", "0.58242345", "0.58150595", "0.5803258", "0.57938284", "0.57802445", "0.57720184", "0.57685405", "0.57685405", "0.5758228", "0.5754923", "0.57318443", "0.57145965", "0.57122093", "0.5611701", "0.55763674", "0.5571644", "0.5571415", "0.5496812", "0.545135", "0.544935", "0.54354566", "0.54299146", "0.54263943", "0.5393129", "0.53838027", "0.5374992", "0.53608793", "0.53242666", "0.5315155", "0.5264564", "0.5261272", "0.5245013", "0.5243312", "0.524294", "0.5223371", "0.5222609", "0.52059567", "0.5188855", "0.5188592", "0.51874685", "0.51849425", "0.5179223", "0.5151264", "0.51486826", "0.5147458", "0.5136431", "0.51292694", "0.51055026", "0.5100168", "0.5083701", "0.5082808", "0.50803643", "0.5079786", "0.507928", "0.5069375", "0.5067589", "0.5063179", "0.5056102", "0.50455594", "0.50433934", "0.503642", "0.5035859", "0.50356746", "0.5033242", "0.50318104", "0.50214547", "0.5016467", "0.5011643", "0.50097036", "0.4991131", "0.49891227", "0.49891227", "0.498749", "0.49850294", "0.49827355", "0.49805564", "0.49696666", "0.49679977", "0.49652043", "0.49600908", "0.4959893", "0.49550572", "0.49517143", "0.4948632", "0.493878", "0.4927642", "0.49192318", "0.4917958", "0.4914932", "0.4914594", "0.49143893" ]
0.593303
2
Takes a user and a group name, and returns `True` if the user is in that group.
def is_in_group(user, group_name): return is_in_group_user_id(user.id, group_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def is_user_in_group(user, group):\n\n if user == group.get_name():\n return True\n elif user in group.get_users():\n return True\n else:\n for group in group.get_groups():\n return is_user_in_group(user, group)\n\n return False", "def is_user_in_group(user: str, group: Group) -> bool:\n if group is None or user is None or user is \"\":\n return False\n if user in group.get_users():\n return True\n for sub_group in group.get_groups():\n user_exists = is_user_in_group(user, sub_group)\n if user_exists:\n return True\n return False", "def is_user_in_group(user, group):\r\n if type(group) is not Group:\r\n raise ValueError(\"Not a valid group\")\r\n\r\n if type(user) is not str:\r\n raise ValueError(\"Not a valid user\")\r\n\r\n user_name = find_user(user, group)\r\n if user_name == \"\":\r\n return False\r\n\r\n return True", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def is_user_in_group(user, group):\n # Check group\n if user in group.users: # O(N)\n return True\n\n # Check subgroups\n for sub_group in group.groups: # O(N)\n if is_user_in_group(user, sub_group):\n return True\n\n return False", "def is_user_in_group(_cls, user, group):\n if user is None or group is None:\n return \"Please enter a valid user and group\"\n\n if user in group.get_users():\n return True\n else:\n for sub_group in group.get_groups():\n if Group.is_user_in_group(user, sub_group):\n return True\n\n return False", "def user_in_group(user, *group_names):\n\treturn bool(user.groups.filter(name__in=group_names)) | user.is_superuser", "def is_user_in_group(user, group):\n sub_user=group.get_users() # Get all the users within the group\n\n if user in sub_user: # If user is within the group, return True\n return True\n\n sub_group=group.get_groups() # Get all the sub groups within the group\n\n if len(sub_group)==0: # Base case if there are no sub groups within group\n return False\n\n for item in sub_group: # Recursively search within sub groups for the user\n return is_user_in_group(user,item)\n return False", "def _user_belongs_to(group_name):\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups", "def has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()", "def is_in_group_user_id(user_id, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists()\n except Group.DoesNotExist:\n return None", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def group_authenticated(self, user_token, group):\n if self.authenticated(user_token):\n token = self.token_storage.get(user_token)\n groups = self.get_groups(token.username)\n if group in groups:\n return True\n\n return False", "def is_group(self, group_name):\n\n return group_name in self._group", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def userMemebership(self, username, group):\r\n return group in self.getUserGroups(username)", "def is_member_of_group(self, mail, group):\n members = self.get_group_members(group)\n\n if mail in members:\n return True\n return False", "def belongs_to(self, group):\n return self in group.users", "def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False", "def has_permission(user, required_groups):\n user_groups = set([g.name for g in user.groups.all()])\n return user_groups.issuperset(required_groups)", "def IsObjectInGroup(object_id, group_name=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n count = rhobj.GroupCount\n if count<1: return False\n if not group_name: return True\n index = scriptcontext.doc.Groups.Find(group_name, True)\n if index<0: raise ValueError(\"%s group does not exist\"%group_name)\n group_ids = rhobj.GetGroupList()\n for id in group_ids:\n if id==index: return True\n return False", "def in_projects_admin_group(user):\n if user:\n return user.groups.filter(name='projects_admin').count() != 0", "def test_has_access_is_in_group(self):\n user, usrmgr_mock = self.__get_test_instance(\n \"@foouser\", 1337, group=\"foogroup\")\n usrmgr_mock.return_value.user_is_in_group.return_value = True\n with patch.object(user, \"save\"):\n user.has_access(\"foogroup\")", "def allowed_group_access_use(user, group):\n return (user.has_perm(\"vnswww.group_use_any\")\n or (user.has_perm(\"vnswww.group_use_org\")\n and group.org == user.get_profile().org))", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False", "def check_presence_groups(self, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} where id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with checking the groups for users. Error: {e}\"\n self.proceed_error(msg)\n return False", "def __is_permission_in_groups(self, name: str) -> bool:\n permission = Permission.objects.get(codename=name)\n\n for group_name in main_app_groups:\n group = Group.objects.get(name=group_name)\n if permission in group.permissions.all():\n return True\n\n return False", "def test_by_user_user_is_in_group(self):\n recipient = self.create_user()\n thread = self.create_thread(recipient=recipient)\n result = Thread.public.by_user(user=recipient)\n self.assertIn(thread, result)", "def get_member_from_group(member, group_name):\n query= \"SELECT * FROM groupmembers WHERE member='{}' AND group_id='{}'\".format(member, group_name)\n cur.execute(query)\n result = cur.fetchall()\n if len(result) > 1:\n return True\n return False", "def group_required(*group_names):\n\n def in_groups(current_user):\n if not settings.ENABLE_PERMISSIONS:\n return True\n if current_user.is_authenticated:\n if current_user.groups.filter(name__in=group_names).exists():\n return True\n return False\n\n return user_passes_test(in_groups)", "def group_required(group_names):\n\ttry:\n\t\tuser = CrequestMiddleware.get_request().user\n\t\tif user.is_authenticated():\n\t\t\ttest = user.groups.filter(name=group_names).exists()\n\texcept (AttributeError):\n\t\ttest = False\n\n\n\treturn user_passes_test(test)", "def check_user_group(required_groups):\n\n if current_user.is_anonymous:\n raise UnauthorisedAccessError\n\n master_group = (PermissionGroups.query \n .filter_by(group_name='Master')\n .first())\n if master_group in current_user.access_groups:\n return True\n\n access = [current_user.has_auth_access(PermissionGroups.query.filter_by(\n group_name=group).first())\n for group in required_groups]\n if not any(access):\n raise UnauthorisedAccessError", "def is_group(group_name):\n\n try:\n r_json = requests.get(\n 'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()\n message_text = r_json['message']\n if message_text == 'Ok':\n return True\n elif message_text == 'Group not found':\n return False\n else:\n logger.error(message_text)\n except ConnectionError as error_text:\n logger.error(error_text)\n except IndexError as error_text:\n logger.error(error_text)", "def group_required(*group_names):\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def is_in_retina_graders_group(user):\n return user.groups.filter(name=settings.RETINA_GRADERS_GROUP_NAME).exists()", "def is_in_retina_group(user):\n return is_in_retina_graders_group(user) or is_in_retina_admins_group(user)", "def is_in_retina_admins_group(user):\n return user.groups.filter(name=settings.RETINA_ADMINS_GROUP_NAME).exists()", "def group_required(*group_names):\n\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))", "def check_user_group_connection(self, id_group:int, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} WHERE id_group={id_group} AND id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We have problem with getting values from the {table_users_groups}. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def security_group_exists(self, sg_id=None, name=None):\n if sg_id:\n return sg_id in [sg.id for sg in self.get_all_security_groups()]\n elif name:\n return name in [sg.name for sg in self.get_all_security_groups()]", "def verify_user_group_details(connection_obj, uid, group, device=\"server\"):\n output = get_user_group_details(connection_obj,device=device)\n if not output:\n st.log(\"Output not found {}\".format(output))\n return False\n if uid:\n user_data = re.findall(r\"uid=\\d+\\({}\\)\".format(uid), output)\n if not user_data:\n st.log(\"User data not found -- {}\".format(uid))\n return False\n if group:\n group_data = re.findall(r\"gid=\\d+\\({}\\)\".format(group), output)\n if not group_data:\n st.log(\"Group data not found -- {}\".format(group))\n return False\n return True", "def group_required(*group_names):\n\tdef in_groups(u):\n\t\tif u.is_authenticated():\n\t\t\tif bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n\t\t\t\treturn True\n\t\treturn False\n\treturn user_passes_test(in_groups, login_url='/')", "def check_gadm(user_id):\n cur = g.db.execute('select gadm from user_group where id_user == ?', [user_id])\n for row in cur.fetchall():\n if row[0] == 1:\n return True\n return False", "def has_call_permission_for_local_group(user, local_group, permission):\n\n \"\"\"Check Feature Access and Local Group Permissions\"\"\"\n if hasattr(user, 'localgroupprofile'):\n local_group_profile = user.localgroupprofile\n if has_call_feature_access_for_local_group(local_group):\n return local_group_profile.has_permission_for_local_group(\n local_group,\n permission\n )\n\n \"\"\"Otherwise False\"\"\"\n return False", "def is_create_group(string, nickname):\n if string == f\"{nickname} created the group.\":\n return True\n return False", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def test_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def has_group_address(self, group_address):\n return self.switch.has_group_address(group_address)", "def get_group(self, obj):\n group = Group.objects.filter(name=\"teachers\")\n users = User.objects.filter(groups__in=group)\n if obj in users:\n return \"teachers\"\n else:\n return \"students\"", "def is_member(user: User) -> bool:\n if not user:\n raise TypeError('user should not be None')\n return user.name.startswith('L')", "def is_participant(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.participants_group.pk).exists()\n )", "def es_utilizado(self):\n group = Group.objects.filter(id=self.id)\n group = group.all()[0] if group.exists() else None\n # group = Group.objects.get(name=self.nombre)\n return group.user_set.all().exists() if group is not None else False", "def check_ldap_group_existence(group_id):\n endpoint = f\"/identities/groups/{group_id}\"\n http_response = call_rest_api(endpoint, \"head\", **config.DEFAULT_REST_KWARGS)\n if http_response.status_code == 200: # 200 = 'OK. Group exists.'\n return True\n return False", "def perform_graph_call(token, user) -> bool:\n _dict = perform_request(app_config.ENDPOINT, token)\n _ids = get_all_group_ids(token)\n for _id in app_config.GROUP_ID:\n if _id in set(_ids):\n return True\n return False", "def check_group_user_existence(self, group_id, user_id):\n resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))\n self.expected_success(204, resp.status)\n return rest_client.ResponseBody(resp)", "def has_privileges_for_group(self, group_id: int) -> bool:\n from apps.enrollment.courses.models.group import Group\n\n try:\n group = Group.objects.get(pk=group_id)\n return group.teacher == self or group.course.owner == self or self.user.is_staff\n except Group.DoesNotExist:\n logger.error(\n 'Function Employee.has_privileges_for_group(group_id = %d) throws Group.DoesNotExist exception.' %\n group_id)\n return False", "def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def check_user_has_read_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n members = dataBase['userGroups'][userGroup]['members']\n return userName in owners or userName in members", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "def check_group_exists(self, group_name):\n for grp in self.get_list_groups():\n if grp[\"name\"] == group_name:\n return grp[\"id\"], grp[\"members\"]\n\n return None", "def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()", "def is_member(self, username):\n usernames = [user.username for user in self.members]\n return True if username in usernames else False", "def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False", "def test_logged_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def check_policy_groups(zone, org, verbose=False):\n complies = True\n\n if not zone:\n zone = get_local_zone(verbose)\n if not zone:\n # some error\n return False\n\n\n # Check that the groups 'ids-user#localzone' and\n # (if org provided) check that 'ids-<org>#localzone'\n # also exists. An error from underlying function\n # calls will cause non-compliance to be flagged.\n if org:\n u = 'ids-%s#%s' % (org, zone)\n rc = irods_user_exists(u, verbose)\n if rc < 1:\n if rc == 0 and verbose:\n print(' group %s does not exist.' % (u,))\n complies = False\n elif verbose:\n print(' group %s exists according to policy.' % (u,))\n \n\n u = 'ids-user#%s' % (zone,)\n rc = irods_user_exists(u, verbose)\n if rc < 1:\n if rc == 0 and verbose:\n print(' group %s does not exist.' % (u,))\n complies = False\n elif verbose:\n print(' group %s exists according to policy.' % (u,))\n \n return complies", "def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False", "def has_user(self, username):\n\t\treturn username in self.users", "def can_substitute(userid, group):", "def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def contains(self, user_id: int, client_name: str) -> bool:\n return client_name in self.clients[user_id]", "def synchronize_group(self, group, prefix, blacklist):\n\n try:\n group_name = group[1]['cn'][0]\n group_members = group[1]['member']\n except Exception as e:\n self.logger.error(\"Failed to retrieve group name and members: {0}\".format(e))\n return False\n\n self.logger.debug(\n \"Group '{0}' has members: {1}\".format(\n group_name, group_members\n )\n )\n\n role_match = None\n role_match = re.search(\n '^{}(?P<role_name>[a-zA-Z0-9_]+)'.format(prefix), group_name\n )\n\n if role_match:\n role_name = role_match.groups('role_name')[0]\n else:\n self.logger.warning(\n \"Group '{0}' did not match the pattern, skipping...\".format(\n group_name\n )\n )\n return False\n\n if role_name in blacklist:\n self.logger.info(\n \"Skipping group '{0}' which is on the blacklist.\".format(\n group_name\n )\n )\n return False\n\n # First, ensure that the role exists\n try:\n self.psql_cur.execute(\n \"SELECT 1 FROM pg_roles WHERE rolname='{0}'\".format(role_name)\n )\n result = self.psql_cur.fetchone()\n except psycopg2.Error as e:\n self.logger.error(unicode(e.message).encode('utf-8'))\n return False\n\n if not result or result[0] == 0:\n self.logger.warning(\n \"Group {0} does not have a PG role, skipping...\".format(\n group_name\n )\n )\n return False\n\n # Second, extract each member from the list.\n try:\n authorized_users = self.extract_users(group_members)\n except Exception as e:\n self.logger.error(\n \"Failed to extract users from LDAP for {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n # Third, add authorized users to the role\n try:\n self.add_authorized_users(role_name, authorized_users)\n except Exception as e:\n self.logger.error(\n \"Failed to add users to the PG role for group {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n # Lastly, remove all users that are not on the list\n try:\n self.purge_unauthorized_users(role_name, authorized_users)\n except Exception as e:\n self.logger.error(\n \"Failed to remove unauthorized users from group {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n return True", "def test_user_is_group_member(self):\n self.user.add_to_group(self.thread.group.pk)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def user_is_student(userobj):\n from .assignment_group import AssignmentGroup\n return AssignmentGroup.published_where_is_candidate(userobj).exists()", "def group_exists(name):\n with fabric.api.settings(fabric.api.hide('warnings', 'stderr', 'stdout', 'running'), warn_only=True):\n group_data = fabric.api.run(\n \"cat /etc/group | egrep '^%s:' ; true\" %\n (name))\n\n if group_data:\n name, _, gid, members = group_data.split(\":\", 4)\n return dict(name=name, gid=gid, members=tuple(m.strip()\n for m in members.split(\",\")))\n else:\n return None", "def is_membership(self, gid, membership):\n if membership not in [ 'member', 'manager', 'owner']:\n raise Exception(\"Membership request is unexpect as: {m}. Only member, owner or manager inquery allowed.\".format(m=membership))\n url = \"{b}/group/is-{m}/{gid}\".format(b=self.base_url, m=membership, gid=gid)\n r = self.get(url)\n print r", "def load_user_groups(user):\n if not user.is_authenticated:\n return False\n \n user.is_faculty = len(user.groups.filter(name='faculty')) > 0\n user.is_student = not user.is_faculty\n\n return True", "def is_group(id):\n return id.startswith('G')", "def is_group(g, node):\n if node not in g.nodes():\n print('Not a node in the graph')\n return False\n elif g.node[node]['type'] == 'group':\n return True\n else:\n return False", "def is_evaluador(user):\n return user.groups.filter(name='Evaluadores').exists()", "def has_permission(self, request, view):\n usuario = request.user\n grupo = usuario.grupo\n return grupo.name in [\"SuperUsuario\", \"Administrador\"]", "def user_exists(cls, name):\n\n for user in cls.user_list:\n if user.user_name == name:\n return True\n\n return False", "def check_insert_group_user(self, id_user:int, id_group:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with the check previous insertion on th. Mistake: {e} \"\n self.proceed_error(msg)\n return False", "def IsCorpUser(cnxn, services, user_id):\n user_group_ids = services.usergroup.LookupMemberships(cnxn, user_id)\n corp_mode_groups_dict = services.user.LookupUserIDs(\n cnxn, settings.corp_mode_user_groups, autocreate=True)\n corp_mode_group_ids = set(corp_mode_groups_dict.values())\n corp_mode = any(gid in corp_mode_group_ids for gid in user_group_ids)\n return corp_mode", "def has_permission(self, request, view):\n authenticated = super(IsRpcRacker, self).has_permission(request, view)\n user_groups = getattr(request.user, 'roles', set())\n if not isinstance(user_groups, set):\n user_groups = set(user_groups)\n return authenticated and bool(self.rpc_groups & user_groups)", "def isOp(self, user, channel=None):\n if channel is not None:\n return user in self.opsets[channel]\n\n for ch in self.opsets:\n if user in self.opsets[ch]:\n return True\n return False", "def group_exists(self, path_to_group, groupname):\n self.open_db()\n try:\n group = self.h5file.get_node(path_to_group,\n name=groupname)\n except tb.NoSuchNodeError:\n group = False\n return group", "def has_group(self):\n # first-party\n from tcex.api.tc.v3.groups.group_filter import GroupFilter\n\n groups = GroupFilter(Tql())\n self._tql.add_filter('hasGroup', TqlOperator.EQ, groups, TqlType.SUB_QUERY)\n return groups", "def has_group_address(self, group_address):\n return (\n # self.datetime.has_group_address(group_address)\n self.date.has_group_address(group_address)\n or self.time.has_group_address(group_address) # noqa W503\n )", "def is_valid_group(self, destination):\n # TODO: for now we just check if this is not an email\n if '@' in destination: # is this an email ?\n return False\n else:\n return True", "def test_is_member_ok(self):\n self.add_group('testgroup', ['user:[email protected]'])\n\n # baphomet is not a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': False}, response.json)\n\n # mithras is a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': True}, response.json)", "def get_in_users(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user in obj.users.all():\n return True\n else:\n return False", "def user_is_admin(user):\n return user in admins" ]
[ "0.90404195", "0.89909446", "0.8921993", "0.88956964", "0.8691311", "0.8651488", "0.85226196", "0.8471016", "0.8424121", "0.83981097", "0.83976525", "0.8325658", "0.8266118", "0.8199991", "0.81931895", "0.7539715", "0.7505139", "0.7332905", "0.726839", "0.7217727", "0.7149777", "0.70672965", "0.7025459", "0.6690672", "0.66790277", "0.66556174", "0.65745276", "0.64787424", "0.64599943", "0.6438533", "0.64373606", "0.6390754", "0.63735694", "0.6358276", "0.63396734", "0.62987417", "0.62770003", "0.62487996", "0.6217103", "0.6173215", "0.616613", "0.61519307", "0.6139528", "0.6127016", "0.610568", "0.60711217", "0.60367084", "0.6019135", "0.60033154", "0.59905773", "0.59526557", "0.5913453", "0.5900932", "0.58843905", "0.58823127", "0.58717406", "0.58624905", "0.5852626", "0.5846344", "0.5839214", "0.5836654", "0.5831581", "0.58104044", "0.58035696", "0.57856023", "0.57706773", "0.57550436", "0.5737449", "0.5719927", "0.57027304", "0.56996137", "0.56918013", "0.5683228", "0.566756", "0.56555116", "0.56537306", "0.5652808", "0.5640099", "0.56352943", "0.5608059", "0.5601321", "0.5600844", "0.5583052", "0.5578873", "0.5555122", "0.55414474", "0.55336124", "0.55211115", "0.5516436", "0.549699", "0.548945", "0.5474519", "0.54682827", "0.5466391", "0.54632413", "0.5454919", "0.54539335", "0.544934", "0.54428005", "0.5438819" ]
0.90948594
0
Takes a user id and a group name, and returns `True` if the user is in that group.
def is_in_group_user_id(user_id, group_name): try: return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists() except Group.DoesNotExist: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_in_group(user, group_name):\n return is_in_group_user_id(user.id, group_name)", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def is_user_in_group(user, group):\n\n if user == group.get_name():\n return True\n elif user in group.get_users():\n return True\n else:\n for group in group.get_groups():\n return is_user_in_group(user, group)\n\n return False", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def is_user_in_group(user: str, group: Group) -> bool:\n if group is None or user is None or user is \"\":\n return False\n if user in group.get_users():\n return True\n for sub_group in group.get_groups():\n user_exists = is_user_in_group(user, sub_group)\n if user_exists:\n return True\n return False", "def is_user_in_group(user, group):\r\n if type(group) is not Group:\r\n raise ValueError(\"Not a valid group\")\r\n\r\n if type(user) is not str:\r\n raise ValueError(\"Not a valid user\")\r\n\r\n user_name = find_user(user, group)\r\n if user_name == \"\":\r\n return False\r\n\r\n return True", "def is_user_in_group(user, group):\n # Check group\n if user in group.users: # O(N)\n return True\n\n # Check subgroups\n for sub_group in group.groups: # O(N)\n if is_user_in_group(user, sub_group):\n return True\n\n return False", "def is_user_in_group(user, group):\n sub_user=group.get_users() # Get all the users within the group\n\n if user in sub_user: # If user is within the group, return True\n return True\n\n sub_group=group.get_groups() # Get all the sub groups within the group\n\n if len(sub_group)==0: # Base case if there are no sub groups within group\n return False\n\n for item in sub_group: # Recursively search within sub groups for the user\n return is_user_in_group(user,item)\n return False", "def _user_belongs_to(group_name):\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()", "def user_in_group(user, *group_names):\n\treturn bool(user.groups.filter(name__in=group_names)) | user.is_superuser", "def is_user_in_group(_cls, user, group):\n if user is None or group is None:\n return \"Please enter a valid user and group\"\n\n if user in group.get_users():\n return True\n else:\n for sub_group in group.get_groups():\n if Group.is_user_in_group(user, sub_group):\n return True\n\n return False", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def IsObjectInGroup(object_id, group_name=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n count = rhobj.GroupCount\n if count<1: return False\n if not group_name: return True\n index = scriptcontext.doc.Groups.Find(group_name, True)\n if index<0: raise ValueError(\"%s group does not exist\"%group_name)\n group_ids = rhobj.GetGroupList()\n for id in group_ids:\n if id==index: return True\n return False", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def is_group(self, group_name):\n\n return group_name in self._group", "def group_authenticated(self, user_token, group):\n if self.authenticated(user_token):\n token = self.token_storage.get(user_token)\n groups = self.get_groups(token.username)\n if group in groups:\n return True\n\n return False", "def belongs_to(self, group):\n return self in group.users", "def check_presence_groups(self, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} where id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with checking the groups for users. Error: {e}\"\n self.proceed_error(msg)\n return False", "def userMemebership(self, username, group):\r\n return group in self.getUserGroups(username)", "def is_member_of_group(self, mail, group):\n members = self.get_group_members(group)\n\n if mail in members:\n return True\n return False", "def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False", "def test_has_access_is_in_group(self):\n user, usrmgr_mock = self.__get_test_instance(\n \"@foouser\", 1337, group=\"foogroup\")\n usrmgr_mock.return_value.user_is_in_group.return_value = True\n with patch.object(user, \"save\"):\n user.has_access(\"foogroup\")", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False", "def check_gadm(user_id):\n cur = g.db.execute('select gadm from user_group where id_user == ?', [user_id])\n for row in cur.fetchall():\n if row[0] == 1:\n return True\n return False", "def check_user_group_connection(self, id_group:int, id_user:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_users_groups} WHERE id_group={id_group} AND id_user={id_user};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We have problem with getting values from the {table_users_groups}. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def get_member_from_group(member, group_name):\n query= \"SELECT * FROM groupmembers WHERE member='{}' AND group_id='{}'\".format(member, group_name)\n cur.execute(query)\n result = cur.fetchall()\n if len(result) > 1:\n return True\n return False", "def check_ldap_group_existence(group_id):\n endpoint = f\"/identities/groups/{group_id}\"\n http_response = call_rest_api(endpoint, \"head\", **config.DEFAULT_REST_KWARGS)\n if http_response.status_code == 200: # 200 = 'OK. Group exists.'\n return True\n return False", "def in_projects_admin_group(user):\n if user:\n return user.groups.filter(name='projects_admin').count() != 0", "def test_by_user_user_is_in_group(self):\n recipient = self.create_user()\n thread = self.create_thread(recipient=recipient)\n result = Thread.public.by_user(user=recipient)\n self.assertIn(thread, result)", "def security_group_exists(self, sg_id=None, name=None):\n if sg_id:\n return sg_id in [sg.id for sg in self.get_all_security_groups()]\n elif name:\n return name in [sg.name for sg in self.get_all_security_groups()]", "def is_group(group_name):\n\n try:\n r_json = requests.get(\n 'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()\n message_text = r_json['message']\n if message_text == 'Ok':\n return True\n elif message_text == 'Group not found':\n return False\n else:\n logger.error(message_text)\n except ConnectionError as error_text:\n logger.error(error_text)\n except IndexError as error_text:\n logger.error(error_text)", "def has_permission(user, required_groups):\n user_groups = set([g.name for g in user.groups.all()])\n return user_groups.issuperset(required_groups)", "def check_group_user_existence(self, group_id, user_id):\n resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))\n self.expected_success(204, resp.status)\n return rest_client.ResponseBody(resp)", "def is_group(id):\n return id.startswith('G')", "def __is_permission_in_groups(self, name: str) -> bool:\n permission = Permission.objects.get(codename=name)\n\n for group_name in main_app_groups:\n group = Group.objects.get(name=group_name)\n if permission in group.permissions.all():\n return True\n\n return False", "def contains(self, user_id: int, client_name: str) -> bool:\n return client_name in self.clients[user_id]", "def check_group_exists(self, group_name):\n for grp in self.get_list_groups():\n if grp[\"name\"] == group_name:\n return grp[\"id\"], grp[\"members\"]\n\n return None", "def group_required(*group_names):\n\n def in_groups(current_user):\n if not settings.ENABLE_PERMISSIONS:\n return True\n if current_user.is_authenticated:\n if current_user.groups.filter(name__in=group_names).exists():\n return True\n return False\n\n return user_passes_test(in_groups)", "def verify_user_group_details(connection_obj, uid, group, device=\"server\"):\n output = get_user_group_details(connection_obj,device=device)\n if not output:\n st.log(\"Output not found {}\".format(output))\n return False\n if uid:\n user_data = re.findall(r\"uid=\\d+\\({}\\)\".format(uid), output)\n if not user_data:\n st.log(\"User data not found -- {}\".format(uid))\n return False\n if group:\n group_data = re.findall(r\"gid=\\d+\\({}\\)\".format(group), output)\n if not group_data:\n st.log(\"Group data not found -- {}\".format(group))\n return False\n return True", "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def group_required(group_names):\n\ttry:\n\t\tuser = CrequestMiddleware.get_request().user\n\t\tif user.is_authenticated():\n\t\t\ttest = user.groups.filter(name=group_names).exists()\n\texcept (AttributeError):\n\t\ttest = False\n\n\n\treturn user_passes_test(test)", "def is_in_retina_graders_group(user):\n return user.groups.filter(name=settings.RETINA_GRADERS_GROUP_NAME).exists()", "def get_group(self, obj):\n group = Group.objects.filter(name=\"teachers\")\n users = User.objects.filter(groups__in=group)\n if obj in users:\n return \"teachers\"\n else:\n return \"students\"", "def allowed_group_access_use(user, group):\n return (user.has_perm(\"vnswww.group_use_any\")\n or (user.has_perm(\"vnswww.group_use_org\")\n and group.org == user.get_profile().org))", "def check_user_group(required_groups):\n\n if current_user.is_anonymous:\n raise UnauthorisedAccessError\n\n master_group = (PermissionGroups.query \n .filter_by(group_name='Master')\n .first())\n if master_group in current_user.access_groups:\n return True\n\n access = [current_user.has_auth_access(PermissionGroups.query.filter_by(\n group_name=group).first())\n for group in required_groups]\n if not any(access):\n raise UnauthorisedAccessError", "def test_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))", "def group_required(*group_names):\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False", "def perform_graph_call(token, user) -> bool:\n _dict = perform_request(app_config.ENDPOINT, token)\n _ids = get_all_group_ids(token)\n for _id in app_config.GROUP_ID:\n if _id in set(_ids):\n return True\n return False", "def check_insert_group_user(self, id_user:int, id_group:int) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};\").fetchone()\n if value_list:\n return True\n return False\n except Exception as e:\n msg = f\"We faced problems with the check previous insertion on th. Mistake: {e} \"\n self.proceed_error(msg)\n return False", "def has_privileges_for_group(self, group_id: int) -> bool:\n from apps.enrollment.courses.models.group import Group\n\n try:\n group = Group.objects.get(pk=group_id)\n return group.teacher == self or group.course.owner == self or self.user.is_staff\n except Group.DoesNotExist:\n logger.error(\n 'Function Employee.has_privileges_for_group(group_id = %d) throws Group.DoesNotExist exception.' %\n group_id)\n return False", "def is_registered(user_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return str(user_id) in inventories", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def can_substitute(userid, group):", "def is_in_retina_group(user):\n return is_in_retina_graders_group(user) or is_in_retina_admins_group(user)", "def group_required(*group_names):\n\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def is_create_group(string, nickname):\n if string == f\"{nickname} created the group.\":\n return True\n return False", "def es_utilizado(self):\n group = Group.objects.filter(id=self.id)\n group = group.all()[0] if group.exists() else None\n # group = Group.objects.get(name=self.nombre)\n return group.user_set.all().exists() if group is not None else False", "def is_in_retina_admins_group(user):\n return user.groups.filter(name=settings.RETINA_ADMINS_GROUP_NAME).exists()", "def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()", "def is_member(self, id, user):\n request = self.request_builder('orgs.teams.is_member',\n id=id, user=user)\n return self._bool(request)", "def test_logged_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def group_exists(name):\n with fabric.api.settings(fabric.api.hide('warnings', 'stderr', 'stdout', 'running'), warn_only=True):\n group_data = fabric.api.run(\n \"cat /etc/group | egrep '^%s:' ; true\" %\n (name))\n\n if group_data:\n name, _, gid, members = group_data.split(\":\", 4)\n return dict(name=name, gid=gid, members=tuple(m.strip()\n for m in members.split(\",\")))\n else:\n return None", "def is_membership(self, gid, membership):\n if membership not in [ 'member', 'manager', 'owner']:\n raise Exception(\"Membership request is unexpect as: {m}. Only member, owner or manager inquery allowed.\".format(m=membership))\n url = \"{b}/group/is-{m}/{gid}\".format(b=self.base_url, m=membership, gid=gid)\n r = self.get(url)\n print r", "def _is_server_in_group(group, server_id):\n try:\n response, server_info = yield Effect(TenantScope(\n retry_effect(get_server_details(server_id),\n retry_times(3),\n exponential_backoff_interval(2)),\n group.tenant_id))\n except NoSuchServerError:\n raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)\n\n group_id = group_id_from_metadata(\n get_in(('server', 'metadata'), server_info, {}))\n\n if group_id != group.uuid:\n raise ServerNotFoundError(group.tenant_id, group.uuid, server_id)", "def group_required(*group_names):\n\tdef in_groups(u):\n\t\tif u.is_authenticated():\n\t\t\tif bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n\t\t\t\treturn True\n\t\treturn False\n\treturn user_passes_test(in_groups, login_url='/')", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def check_policy_groups(zone, org, verbose=False):\n complies = True\n\n if not zone:\n zone = get_local_zone(verbose)\n if not zone:\n # some error\n return False\n\n\n # Check that the groups 'ids-user#localzone' and\n # (if org provided) check that 'ids-<org>#localzone'\n # also exists. An error from underlying function\n # calls will cause non-compliance to be flagged.\n if org:\n u = 'ids-%s#%s' % (org, zone)\n rc = irods_user_exists(u, verbose)\n if rc < 1:\n if rc == 0 and verbose:\n print(' group %s does not exist.' % (u,))\n complies = False\n elif verbose:\n print(' group %s exists according to policy.' % (u,))\n \n\n u = 'ids-user#%s' % (zone,)\n rc = irods_user_exists(u, verbose)\n if rc < 1:\n if rc == 0 and verbose:\n print(' group %s does not exist.' % (u,))\n complies = False\n elif verbose:\n print(' group %s exists according to policy.' % (u,))\n \n return complies", "def has_call_permission_for_local_group(user, local_group, permission):\n\n \"\"\"Check Feature Access and Local Group Permissions\"\"\"\n if hasattr(user, 'localgroupprofile'):\n local_group_profile = user.localgroupprofile\n if has_call_feature_access_for_local_group(local_group):\n return local_group_profile.has_permission_for_local_group(\n local_group,\n permission\n )\n\n \"\"\"Otherwise False\"\"\"\n return False", "def connect_user_group(self, id_group:int, id_user:int) -> bool:\n try:\n self.cursor.execute(f\"INSERT INTO {table_users_groups} (id_user, id_group) VALUES (?, ?);\", (id_user, id_group))\n self.connection.commit()\n return True\n except Exception as e:\n msg = f'We have problems with the connection between user and group. Mistake: {e}'\n self.proceed_error(msg)\n return False", "def isValidGroup(expense_group_id, cursor):\n query = \"\"\"\n SELECT * FROM expense_group WHERE id = ?\n \"\"\"\n cursor.execute(query, (expense_group_id,))\n return len(cursor.fetchall()) == 1", "def test_user_is_group_member(self):\n self.user.add_to_group(self.thread.group.pk)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def __contains__(self, userid):\r\n userid = int(userid)\r\n return bool(userid in self.players)", "def group_exists(self, path_to_group, groupname):\n self.open_db()\n try:\n group = self.h5file.get_node(path_to_group,\n name=groupname)\n except tb.NoSuchNodeError:\n group = False\n return group", "def is_user(id):\n return id.startswith('U')", "def is_eionet_group(self, role_id):\n for role in EIONET_GROUPS:\n if role in role_id:\n return True\n\n return False", "def is_member(user: User) -> bool:\n if not user:\n raise TypeError('user should not be None')\n return user.name.startswith('L')", "def test_all_users_in_group_are_seen(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n test_users_in_group = []\n test_users_not_in_group = []\n number_of_users = 10\n\n # Create test users that are in group.\n for i in range(number_of_users):\n user = scripts.create_user(f'test_in{i}', f'test_in{i}')\n test_users_in_group.append(user)\n self.group.users.add(user)\n\n # Create test users that are not in group.\n for i in range(number_of_users):\n user = scripts.create_user(f'test_not_in{i}', f'test_not_in{i}')\n test_users_not_in_group.append(user)\n\n response = self.client.get(self.url)\n members = response.context['group'].users.all()\n\n self.assertIn(logged_user, members)\n\n for user in test_users_in_group:\n self.assertIn(user, members)\n\n for user in test_users_not_in_group:\n self.assertNotIn(user, members)", "def insert_group(self, group_id:int, group_name:str, id_user:int, username:str, name_first:str, name_last:str) -> bool:\n try:\n self.insert_settings(id_user)\n if not self.get_user_values(id_user):\n self.insert_username(id_user, username, name_first, name_last)\n self.insert_group_additional(group_id, group_name)\n self.insert_user_group_additional(group_id, id_user)\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problem with inserting the group. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def get_in_users(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user in obj.users.all():\n return True\n else:\n return False", "def has_group_address(self, group_address):\n return self.switch.has_group_address(group_address)", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()", "def is_participant(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.participants_group.pk).exists()\n )", "def check_member_groups(self, group_ids):\n result = ClientResult(self.context)\n qry = ServiceOperationQuery(self, \"checkMemberGroups\", None, group_ids, None, result)\n self.context.add_query(qry)\n return result", "def groupfinder(user_id, request):\n ret = DBSession.query(User).filter_by(user_id=user_id).all()\n if len(ret) == 0:\n return None\n user = ret[0]\n groups = [x.group_name for x in user.groups]\n return groups", "def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False", "def has_user(self, username):\n\t\treturn username in self.users", "def load_user_groups(user):\n if not user.is_authenticated:\n return False\n \n user.is_faculty = len(user.groups.filter(name='faculty')) > 0\n user.is_student = not user.is_faculty\n\n return True" ]
[ "0.9006803", "0.8860701", "0.8772584", "0.87357795", "0.85706574", "0.85022044", "0.8340125", "0.8216126", "0.8146766", "0.810608", "0.8100358", "0.803607", "0.7980639", "0.79746544", "0.7930329", "0.7888287", "0.74050176", "0.7149647", "0.71302086", "0.70612216", "0.70261055", "0.7003442", "0.6891792", "0.68714523", "0.6782863", "0.65846574", "0.65418273", "0.63850063", "0.6380608", "0.63623744", "0.63386935", "0.6318658", "0.63119555", "0.6308414", "0.6294898", "0.6255417", "0.6196006", "0.61918813", "0.6148831", "0.6134683", "0.61291987", "0.61196285", "0.61141056", "0.6055622", "0.60483676", "0.6034026", "0.6033962", "0.6023611", "0.6013889", "0.60002995", "0.5960953", "0.59524286", "0.5929025", "0.5915399", "0.591046", "0.590436", "0.5901027", "0.58874404", "0.58799547", "0.5862146", "0.5859924", "0.5859903", "0.5848992", "0.58370215", "0.5833923", "0.58301234", "0.58171993", "0.5814764", "0.5777581", "0.576745", "0.5745715", "0.5737229", "0.57224554", "0.56950307", "0.56822234", "0.56789833", "0.56700695", "0.5650413", "0.5647074", "0.5643706", "0.56378", "0.5624368", "0.56238276", "0.56197363", "0.5615819", "0.5607053", "0.5604733", "0.56011057", "0.5591246", "0.55749923", "0.5566914", "0.55660295", "0.5560177", "0.5557115", "0.55520374", "0.55358475", "0.5526525", "0.55169046", "0.5506024", "0.5483873" ]
0.8786698
2
Perform single crossovers on a given population.
def __init__(self, population_fraction, max_crossover_probability, **kwargs): if not (0 < population_fraction <= 1.0): raise ValueError("num_crossover_fraction must be in ]0. 1]") for val in max_crossover_probability: if not (0 < val <= 1.0): raise ValueError( "max_crossover_probability values must be between ]0. 1]") self.population_fraction = population_fraction self.max_crossover_probability = max_crossover_probability super(SingleCrossover, self).__init__(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def single_point_crossover(population):\r\n global decryption_key\r\n\r\n decryption_key += single_point_crossover_del\r\n\r\n new_population = []\r\n for i in range(0, len(population) - 1, 2):\r\n candidate1 = population[i]\r\n candidate2 = population[i + 1]\r\n\r\n # chromosomes have the same length\r\n # choose a random point\r\n length = len(candidate1)\r\n crossover_point = random.randint(0, length - 1)\r\n\r\n decryption_key += str(crossover_point) + \"|\"\r\n\r\n offspring1 = candidate2[0: crossover_point] + candidate1[crossover_point:]\r\n offspring2 = candidate1[0: crossover_point] + candidate2[crossover_point:]\r\n new_population.append(offspring1)\r\n new_population.append(offspring2)\r\n\r\n # append last chromosome if odd population size\r\n if len(population) % 2 == 1:\r\n new_population.append(population[len(population) - 1])\r\n\r\n decryption_key += single_point_crossover_del\r\n\r\n return new_population", "def crossover(population, kw=None, **kwargs):\n future_population = []\n while len(future_population) < len(population):\n p1, p2 = random.choice(population)['notes'], random.choice(population)['notes']\n split = random.randint(1, len(p1) - 1)\n map(future_population.append, [p1[:split] + p2[split:], p2[:split] + p1[split:]])\n return future_population", "def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population", "def member_crossover(population):\n gene1 = population[random.randint(0, int(len(population) - 1))]\n gene2 = population[random.randint(0, int(len(population) - 1))]\n split = random.randint(1, int(len(population[0]) - 1))\n new_gene1 = gene1[:split] + gene2[split:]\n new_gene2 = gene2[:split] + gene1[split:]\n\n return new_gene1, new_gene2", "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()", "def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)", "def cross(self):\n\n for i in range(self.pop_num): # Put in the first pop_num elements of the \"Parents and Sons\" array our entire input population.\n self.par_and_sons[i].A=self.population[i].A.copy()\n\n random.shuffle(self.population) # Shuffle population.\n\n tt=0 # The counter that is needed to implement a non-trivial crossing.\n for s in range(0,self.pop_num,2): # From 0 to pop_num with step 2. That is. here we take pop_num / 2 pairs of parents.\n self.mother.A=self.population[tt+int(self.pop_num/2)].A # Let the last pop_num / 2 individuals of our population be our mothers.\n self.father.A=self.population[tt].A # And let first pop_num / 2 individuals of our population be dads.\n \n tt=tt+1 \n ran=random.random()\n\n for n in range(self.length): # Crossover.\n if random.random()>0.5:\n self.son1.A[n] = self.father.A[n]\n self.son2.A[self.length-1-n] = self.father.A[n]\n self.son3.A[n] = self.mother.A[n]\n self.son4.A[self.length-1-n] = self.mother.A[n]\n else:\n self.son1.A[n] = self.mother.A[n]\n self.son2.A[self.length-1-n] = self.mother.A[n]\n self.son3.A[n] = self.father.A[n]\n self.son4.A[self.length-1-n] = self.father.A[n]\n\n self.par_and_sons[self.pop_num+2*s].A = self.son1.A.copy()\n self.par_and_sons[self.pop_num+2*s+1].A = self.son2.A.copy()\n self.par_and_sons[self.pop_num+2*s+2].A = self.son3.A.copy()\n self.par_and_sons[self.pop_num+2*s+3].A = self.son4.A.copy()", "def uniform_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs uniform crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if random_number <= self.crossover_prob:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_node_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_node_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_node_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_node_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict", "def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos", "def one_point_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n idx = numpy.random.randint(1, size)\n numpy.put(genotype1, range(0, idx), another_individual.get_genotype()[0:idx])\n numpy.put(genotype1, range(idx, size), self.get_genotype()[idx:size])\n numpy.put(genotype2, range(0, idx), self.get_genotype()[0:idx])\n numpy.put(genotype2, range(idx, size), another_individual.get_genotype()[idx:size])\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.crossover_method, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.crossover_method, self.mutation_method)", "def c_test__cross_inp(self, old_population, population_weighting, run_locals):\r\n return 1", "def _cross_over(self,mp,cross_rate,eta):", "def crossover(cross):\n @functools.wraps(cross)\n def inspyred_crossover(random, candidates, args):\n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for i, (mom, dad) in enumerate(zip(moms, dads)):\n cross.index = i\n offspring = cross(random, mom, dad, args)\n for o in offspring:\n children.append(o)\n return children\n inspyred_crossover.single_crossover = cross\n return inspyred_crossover", "def segmented_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs segmented crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n swap = False\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if swap is False:\n if random_number <= self.swap_start_prob:\n swap = True\n else:\n swap = False\n elif swap is True:\n if random_number <= self.swap_stop_prob:\n swap = False\n else:\n swap = True\n\n if swap is True:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict", "def crossover(cross):\r\n @functools.wraps(cross)\r\n def ecspy_crossover(random, candidates, args):\r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for i, (mom, dad) in enumerate(zip(moms, dads)):\r\n cross.index = i\r\n offspring = cross(random, mom, dad, args)\r\n for o in offspring:\r\n children.append(o)\r\n return children\r\n ecspy_crossover.single_crossover = cross\r\n return ecspy_crossover", "def doCrossover(self, cross_func, member1, member2):\n\t\tif cross_func in self.crossover_external_data:\n\t\t\treturn cross_func(member1, member2, *(self.crossover_external_data[cross_func]))\n\t\telse:\n\t\t\treturn cross_func(member1, member2)", "def _next(self, population):\n # split the population for crossover\n selected, the_rest = self._split_population(\n population, self._get_selected_number(population,\n self._selection_crossover))\n\n # crossover\n generated_items_crossover = []\n while len(selected) >= 2:\n male, female = random.sample(selected, 2)\n selected.remove(male)\n selected.remove(female)\n generated_items_crossover.extend(\n self._crossover.crossover(male, female))\n\n # if there is a impar number of selected items\n # add it back to the list\n the_rest.extend(selected)\n\n # Make the mutations\n selected, the_rest = self._split_population(\n the_rest, self._get_selected_number(population,\n self._selection_mutation))\n # mutation\n generated_items_mutation = []\n for item in selected:\n generated_items_mutation.append(self._mutation.mutate(item))\n\n # compute the population\n population = []\n population.extend(the_rest)\n population.extend(generated_items_crossover)\n population.extend(generated_items_mutation)\n\n return population", "def crossover(new_pop, k):\n shuffle(new_pop)\n for i in range(len(new_pop) // 2):\n points = random.sample(range(1, len(new_pop[i])), k)\n points.sort()\n for fold in range(k):\n x = points[fold]\n tmp = new_pop[2 * i][:x].copy()\n new_pop[2 * i][:x], new_pop[2 * i + 1][:x] = new_pop[2 * i +\n 1][:x], tmp\n return new_pop", "def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n new_cxpb=cxpb/(cxpb+mutpb)\n new_mutpb=mutpb/(cxpb+mutpb)\n \n #num_cx=int(new_cxpb*len(offspring))\n #num_mu=len(offspring)-num_cx\n #print(new_cxpb, new_mutpb)\n # Apply crossover and mutation on the offspring\n i = 1\n while i < len(offspring):\n if random.random() < new_cxpb:\n if (offspring[i - 1] == offspring[i]):\n offspring[i - 1], = toolbox.mutate(offspring[i - 1])\n offspring[i], = toolbox.mutate(offspring[i])\n else:\n offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i])\n del offspring[i - 1].fitness.values, offspring[i].fitness.values\n i = i + 2\n else:\n offspring[i], = toolbox.mutate(offspring[i])\n del offspring[i].fitness.values\n i = i + 1\n return offspring", "def doCrossover(parentPop, parSize, rosterSize):\n\n firstPar = random.randint(0, parSize - 1)\n secondPar = random.randint(0, parSize - 1)\n while secondPar == firstPar:\n secondPar = random.randint(0, parSize - 1)\n\n crossOverPt = random.randint(1, rosterSize - 2) # random num between second and second-to-last entry\n\n # debugging code\n # for i in range(rosterSize):\n # parentPop[firstPar].roster[i] = 2*i\n # parentPop[secondPar].roster[i] = 2*i + 1\n\n # first parent mapping\n chromosome = [parentPop[firstPar].roster[i] for i in range(crossOverPt)]\n\n # second parent mapping\n remainingLoops = rosterSize - len(chromosome)\n for i in range(remainingLoops):\n chromosome.append(parentPop[secondPar].roster[crossOverPt + i])\n return chromosome", "def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n \n # Apply crossover and mutation on the offspring\n for ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < cxpb:\n toolbox.mate(ind1, ind2)\n del ind1.fitness.values, ind2.fitness.values\n \n for ind in offspring:\n if random.random() < mutpb:\n toolbox.mutate(ind)\n del ind.fitness.values\n \n return offspring", "def crossover(chromosome_1, chromosome_2):\n (x1, y1) = (randrange(col_count), randrange(row_count))\n (x2, y2) = (randrange(x1+1, col_count+1), randrange(y1+1, row_count+1))\n def mate(chromosome_1, chromosome_2):\n used = set(chromosome_1[x+y*col_count] for x in range(x1, x2) for y in range(y1, y2))\n not_used = (allele for allele in chromosome_2 if allele not in used)\n return [chromosome_1[x+y*col_count] if x1 <= x < x2 and y1 <= y < y2 else next(not_used) for y in range(row_count) for x in range(col_count)]\n return (mate(chromosome_1, chromosome_2), mate(chromosome_2, chromosome_1))", "def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)", "def evolve_generation(pop, probs, best_member, p_c, p_m):\n if best_member is None:\n new_pop = []\n else:\n new_pop = [best_member]\n while len(new_pop) < len(pop):\n NN1, NN2 = np.random.choice(pop, size=2, p=probs)\n new_pop.append(crossover(NN1, NN2, p_c, p_m))\n return new_pop", "def genetic_algorithm(population, lamda):\n maxGenerations = 5000\n generations_count = 0\n while generations_count <= maxGenerations:\n new_population = []\n generations_count += 1\n for i in range(0, len(population)):\n x = random_select(population, lamda)\n y = random_select(population, lamda)\n child = cross_over(x, y)\n child = mutate(child)\n new_population.append(child)\n population = new_population\n # Test for result\n conflicts = find_conflicts(population[i])\n if conflicts == 0:\n return True, population[i], generations_count\n return False, None, maxGenerations", "def crossOver(self):\n # copy all the chromosomes from the current generation to a regular python list\n # start with an empty list\n lstChromosomes = []\n # loop through all the items in the queue\n while not self.generation.empty():\n # take a chromosome off the queue\n chromosome = self.generation.get()\n # append the chromosome to the list\n lstChromosomes.append(chromosome)\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # cross-over all chromosomes in turn - start with the beginning of the list\n for chrom1Index in range(0, len(lstChromosomes)-1):\n # cross-over with all chromosomes that come after it\n for chrom2Index in range(chrom1Index, len(lstChromosomes)):\n # get the chromosomes we are crossing over\n chrom1 = lstChromosomes[chrom1Index]\n chrom2 = lstChromosomes[chrom2Index]\n # perform the cross-over operation\n xOver = chrom1.crossOver(chrom2)\n # create two new chromosome objects\n newChrom1 = self.chromosomeClass()\n newChrom2 = self.chromosomeClass()\n # set their genes to the values created by crossover operation\n newChrom1.genes = xOver[0]\n newChrom2.genes = xOver[1]\n # save the new chromosomes we just created\n newGeneration.put(newChrom1)\n newGeneration.put(newChrom2)\n # save all the original chromosomes\n for chromosome in lstChromosomes:\n newGeneration.put(chromosome)\n # keep track of all the chromosomes we create\n lstChromosomes = []\n # keep track of how many we are keeping\n chromosomesKept = 0\n # as long as we haven't added more chromosomes than the population is supposed to have\n # and we have more chromosomes to add...\n while chromosomesKept < self.populationSize and not newGeneration.empty():\n # take a chromosome off the new generation queue\n newChromosome = newGeneration.get()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1\n # as long as we haven't added more chromosomes than the population is supposed to have, create\n # random chromosomes\n while chromosomesKept < self.populationSize:\n # create a random chromosome\n newChromosome = self.chromosomeClass()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1", "def Crossover_Function(data1, data2):\n\n # for this function, I modified the uniform crossover function to take care of duplicates after crossover.\n\n data1[1] = 0\n data2[1] = 0\n chromosome1 = list.copy(data1[0])\n chromosome2 = list.copy(data2[0])\n\n #print(\"\\nChromosomes before crossover - \")\n #print(chromosome1)\n #print(chromosome2)\n\n # for each index in both chromosomes, use a coin toss to determine which index is crossed over\n for i in range(len(chromosome1)):\n\n cointoss = random.randrange(2)\n if cointoss == 0:\n chromosome1[i], chromosome2[i] = chromosome2[i], chromosome1[i]\n\n # find duplicates after crossing over\n dupes_in_ch1 = list(duplicates(chromosome1))\n dupes_in_ch2 = list(duplicates(chromosome2))\n\n\n # handle duplicates if any are found\n for i in dupes_in_ch1:\n if i in chromosome1: chromosome1.remove(i)\n chromosome2.append(i)\n \n for i in dupes_in_ch2:\n if i in chromosome2: chromosome2.remove(i)\n chromosome1.append(i)\n\n # replaced the modified chromosomes in the data\n data1[0] = chromosome1\n data2[0] = chromosome2\n\n #print(\"\\nChromsomes after crossover - \")\n #print(data1[0])\n #print(data2[0])\n\n return [data1, data2]", "def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))", "def cross_over(ind1, ind2):\n \n network1 = ind1.network\n network2 = ind2.network\n \n size = min(len(network1.index), len(network2.index))\n cx = random.randint(1, size - 1)\n \n temp = network1.copy()\n temp.iloc[:cx,:cx] = network2.iloc[:cx,:cx]\n network2.iloc[:cx,:cx] = network1.iloc[:cx,:cx]\n network1 = temp \n \n ind1.network = network1\n ind2.network = network2\n ind1.age = 1\n ind2.age = 1\n \n return ind1, ind2", "def crossover(p1, p2):\n genotype = []\n \n #Your code here\n \n return {'genotype': genotype, 'fitness': None}", "def uniform_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n mask = numpy.random.choice([True,False], size=size)\n not_mask = numpy.logical_not(mask)\n genotype1[mask] = self.get_genotype()[mask]\n genotype1[not_mask] = another_individual.get_genotype()[not_mask]\n genotype2[mask] = another_individual.get_genotype()[mask]\n genotype2[not_mask] = self.get_genotype()[not_mask]\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.uniform_crossover, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.uniform_crossover, self.mutation_method)", "def GTreeGPCrossoverSinglePoint(genome, **args):\n # print \"CrossoverAAAAAAAAAAA\"\n sister = None\n brother = None\n\n gMom = args[\"mom\"].clone()\n gDad = args[\"dad\"].clone()\n\n gMom.resetStats()\n gDad.resetStats()\n\n max_depth = gMom.getParam(\"max_depth\", None)\n max_attempt = gMom.getParam(\"max_attempt\", 15)\n\n if max_depth is None:\n Util.raiseException(\"You must specify the max_depth genome parameter !\", ValueError)\n\n if max_depth < 0:\n Util.raiseException(\n \"The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !\", ValueError)\n\n momRandom = None\n dadRandom = None\n\n for i in xrange(max_attempt):\n\n dadRandom = gDad.getRandomNode()\n\n if dadRandom.getType() == Consts.nodeType[\"TERMINAL\"]:\n momRandom = gMom.getRandomNode(1)\n elif dadRandom.getType() == Consts.nodeType[\"NONTERMINAL\"]:\n momRandom = gMom.getRandomNode(2)\n\n mD = gMom.getNodeDepth(momRandom)\n dD = gDad.getNodeDepth(dadRandom)\n\n # Two nodes are root\n if mD == 0 and dD == 0:\n continue\n\n mH = gMom.getNodeHeight(momRandom)\n if dD + mH > max_depth:\n continue\n\n dH = gDad.getNodeHeight(dadRandom)\n if mD + dH > max_depth:\n continue\n\n break\n\n if i == (max_attempt - 1):\n assert gMom.getHeight() <= max_depth\n return gMom, gDad\n else:\n nodeMom, nodeDad = momRandom, dadRandom\n\n nodeMom_parent = nodeMom.getParent()\n nodeDad_parent = nodeDad.getParent()\n\n # Sister\n if args[\"count\"] >= 1:\n sister = gMom\n nodeDad.setParent(nodeMom_parent)\n\n if nodeMom_parent is None:\n sister.setRoot(nodeDad)\n else:\n nodeMom_parent.replaceChild(nodeMom, nodeDad)\n sister.processNodes()\n assert sister.getHeight() <= max_depth\n\n # Brother\n if args[\"count\"] == 2:\n brother = gDad\n nodeMom.setParent(nodeDad_parent)\n\n if nodeDad_parent is None:\n brother.setRoot(nodeMom)\n else:\n nodeDad_parent.replaceChild(nodeDad, nodeMom)\n brother.processNodes()\n assert brother.getHeight() <= max_depth\n\n return sister, brother", "def crossover_arith(pa, ma):\n alpha = nprand.uniform(0, 2.)\n newdata = np.empty(pa.data.shape)\n if alpha < 1.0:\n newdata[:, R:] = pa.data[:, R:].copy()\n else:\n newdata[:, R:] = ma.data[:, R:].copy()\n newdata[:, :R] = alpha * pa.data[:, :R]+ (1 - alpha) * ma.data[:, :R]\n newdata.clip(0., 1., out = newdata)\n newind = Individual(newdata)\n return newind", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def crossover(NN1, NN2, p_c, p_m):\n if np.random.choice([0, 1], p=[1-p_c, p_c]):\n return nn.mate_neural_nets(NN1, NN2, p_m)\n else:\n return np.random.choice([NN1, NN2])", "def _step(self):\n self.sort()\n selection = self._select()\n offspring = self._crossover(selection)\n self._mutate(offspring)\n\n self.sort()\n if self.elite_num > 0:\n offspring[:self.elite_num] = self.population[:self.elite_num]\n\n self.population[:] = offspring\n\n self.sort()\n if self.cull_num > 0:\n self.population[-self.cull_num:] = self._initialize(self.cull_num)", "def uniform_clause_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n if random.random() < 0.5:\n temp = ind1[i]\n ind1[i] = ind2[i]\n ind2[i] = temp", "def crossover(self, chrom1, chrom2):\n pass", "def rotate_crossover(population):\r\n global chromosome_length\r\n global decryption_key\r\n\r\n new_population = []\r\n\r\n decryption_key += rotate_crossover_del\r\n\r\n # predefined rotation value, varied every round\r\n rotation_offset = random.randint(1, chromosome_length)\r\n\r\n decryption_key += rotation_offset_del + str(rotation_offset) + rotation_offset_del\r\n\r\n decryption_key += rotation_types_del\r\n\r\n for chromosome in population:\r\n\r\n p = random.uniform(0, 1)\r\n\r\n if p > 0.5:\r\n decryption_key += \"right|\"\r\n right_first = chromosome[0: len(chromosome) - rotation_offset]\r\n right_second = chromosome[len(chromosome) - rotation_offset:]\r\n new_population.append(right_second + right_first)\r\n else:\r\n decryption_key += \"left|\"\r\n left_first = chromosome[0: rotation_offset]\r\n left_second = chromosome[rotation_offset:]\r\n new_population.append(left_second + left_first)\r\n\r\n decryption_key += rotation_types_del\r\n\r\n decryption_key += rotate_crossover_del\r\n\r\n return new_population", "def cloning(population, context=None, offspring_per_ind=1):\n assert(population is not None)\n assert(offspring_per_ind > 0)\n\n result = []\n for ind in population:\n for i in range(offspring_per_ind):\n result.append(ind.clone())\n\n assert(len(result) == offspring_per_ind*len(population))\n return result, context", "def crossOver(self, x, y):\n if random.uniform(0, 1) < self.probCrossOver:\n # generate berapa banyak perpindahan\n pindah = random.randint(0, self.panjangKromosom-1)\n for i in range(pindah):\n # melakukan swap nilai x dan y\n x[i], y[i] = y[i], x[i]\n return [x, y]", "def uniform_crossover(tup1,tup2):\n a1,b1,c1,d1,e1 = tup1\n a2,b2,c2,d2,e2 = tup2\n tup3 = (a1,b1,c1,d2,e2)\n tup4 = (a2,b2,c2,d1,e1)\n tup5 = (a1,b1,c2,d2,e2)\n tup6 = (a2,b2,c1,d1,e1)\n return [tup3,tup4,tup5,tup6]", "def reproduce(population:list):\n new_gen = []\n probs = []\n for p in population:\n probs.append(p[3])\n while len(new_gen) != len(probs):\n parents = selection(probs)\n son,eval_son,daughter,eval_daughter = xo(population[parents[0]][0],population[parents[0]][1], population[parents[1]][0],population[parents[1]][1],2)\n new_gen.append([son,eval_son])\n new_gen.append([daughter,eval_daughter])\n # mutation\n # lets say 5% of the population gets mutated\n how_many_to_mutate = int(NUM_OF_CHROMOZOMS * (1/100))\n t = [i for i in range(NUM_OF_CHROMOZOMS)]\n # choose percent of the population randomly, uniformly\n indices_to_mutate = choice(t, how_many_to_mutate, replace=False)\n for i in range(len(indices_to_mutate)):\n mutate(new_gen[indices_to_mutate[i]])\n\n evaluateAll(new_gen)\n return new_gen", "def crossover(f,P_c_min,P_c_max,i,D,V,P,U):\n #ADAPTIVE Crossover\n if f[i] < np.mean(f):\n P_c = P_c_min + (P_c_max-P_c_min)*((f[i]-np.mean(f))/(np.max(f)-np.mean(f)))\n else:\n P_c = P_c_min\n\n delta = np.random.randint(0,D-1) \n for j in np.arange(D):\n if np.random.uniform(0,1) <= P_c or delta == j:\n U[i,j] = V[j]\n else:\n U[i,j]=P[i,j]\n\n return U", "def uniform_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n for j in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][j]\n ind1[i][j] = ind2[i][j]\n ind2[i][j] = temp", "def clause_crossover_1x(ind1, ind2):\n k = len(ind1)\n cx_point = random.randint(1, k - 1)\n temp = ind1[cx_point:]\n ind1[cx_point:] = ind2[cx_point:]\n ind2[cx_point:] = temp", "def crossover(p1, p2, gamma=0.1):\n c1 = p1.deepcopy()\n c2 = p2.deepcopy()\n alpha = np.random.uniform(0, gamma, 1)\n c1.position = alpha * p1.position + (1 - alpha) * p2.position\n c2.position = alpha * p2.position + (1 - alpha) * p1.position\n return c1, c2", "def call(self):\n\n self.cross()\n self.mutation()\n self.selection()\n \n return self.population[0]", "def crossoverFunc(parents, size, bits):\n\tchildren = np.zeros((size), np.dtype('a6'))\n\n\tfor i in range(0, int(size/2)):\n\t\tx_site = np.random.randint(0, bits - 1)\n\t\tx1 = parents[i]\n\t\tx2 = parents[size - i - 1]\n\t\tif (np.random.randint(0, 100)) > 40 :\t# Crossover Probability = 60 percent\n\t\t\tch1 = x1[0:x_site] + x2[x_site:bits]\n\t\t\tch2 = x2[0:x_site] + x1[x_site:bits]\n\n\t\t\tchildren[i] = ch1\n\t\t\tchildren[size - i - 1] = ch2\n\t\t\n\t\telse:\n\t\t\tchildren[i] = x1\n\t\t\tchildren[size - i - 1] = x2\n\n\treturn children", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def onepoint_crossover(p_0, p_1, within_used=True):\n # Get the chromosomes\n c_p_0, c_p_1 = p_0.genome, p_1.genome\n # Uniformly generate crossover points. If within_used==True,\n # points will be within the used section.\n if within_used:\n max_p_0, max_p_1 = p_0.used_codons, p_1.used_codons\n else:\n max_p_0, max_p_1 = len(c_p_0), len(c_p_1)\n pt_p_0, pt_p_1 = random.randint(1, max_p_0), random.randint(1, max_p_1)\n # Make new chromosomes by crossover: these slices perform copies\n if random.random() < CROSSOVER_PROBABILITY:\n c_0 = c_p_0[:pt_p_0] + c_p_1[pt_p_1:]\n c_1 = c_p_1[:pt_p_1] + c_p_0[pt_p_0:]\n else:\n c_0, c_1 = c_p_0[:], c_p_1[:]\n # Put the new chromosomes into new individuals\n return [Individual(c_0), Individual(c_1)]", "def Genetic_Algorithm(Population, Lambda, l, data):\n if Population.Population_size == 1: # Used in case of different population sizes\n picked_individuals = np.random.permutation(Population.Population_size)[:4].tolist()*4\n else:\n # Selecting 4 different individuals from the population\n picked_individuals = np.random.permutation(Population.Population_size)[:4].tolist()\n\n # Initializing child of the selected individuals\n child_assets = []\n child_proportions = []\n child_weights = np.zeros(N)\n l = 0\n\n #Pool_1\n pair_1_assets = [Population.population_assets[picked_individuals[0]], Population.population_assets[picked_individuals[1]]]\n pair_1_fitness = [Population.fitness[picked_individuals[0]], Population.fitness[picked_individuals[1]]]\n pair_1_proportions = [Population.population_proportions[picked_individuals[0]], Population.population_proportions[picked_individuals[1]]]\n\n # Pool_2\n pair_2_assets = [Population.population_assets[picked_individuals[2]], Population.population_assets[picked_individuals[3]]]\n pair_2_fitness = [Population.fitness[picked_individuals[2]], Population.fitness[picked_individuals[3]]]\n pair_2_proportions = [Population.population_proportions[picked_individuals[2]], Population.population_proportions[picked_individuals[3]]]\n\n # Selecting parents for the uniform crossover\n parent_1_assets = pair_1_assets[pair_1_fitness.index(min(pair_1_fitness))]\n parent_1_proportions = pair_1_proportions[pair_1_fitness.index(min(pair_1_fitness))]\n\n parent_2_assets = pair_2_assets[pair_2_fitness.index(min(pair_2_fitness))]\n parent_2_proportions = pair_2_proportions[pair_2_fitness.index(min(pair_2_fitness))]\n\n # Looking for same assets in parents and inputting them into child\n common_assets = []\n for i in parent_1_assets:\n if i in parent_2_assets:\n common_assets.append(i)\n child_assets += common_assets\n\n # Finding out what are the indexes of those assets in parents\n indexes_1 = []\n indexes_2 = []\n for i in common_assets:\n indexes_1.append(parent_1_assets.index(i))\n indexes_2.append(parent_2_assets.index(i))\n\n # Adding the proportions of same assets to child with 50% chance\n for m, h in zip(indexes_1, indexes_2):\n rand_1 = np.random.rand()\n if rand_1 > 0.5:\n child_proportions.append(parent_1_proportions[m])\n else:\n child_proportions.append(parent_2_proportions[h])\n\n # Creating new lists with assets that each parent don't have in common\n temp_parent_1_assets = []\n temp_parent_2_assets = []\n for m, h in zip(parent_1_assets, parent_2_assets):\n temp_parent_1_assets.append(m)\n temp_parent_2_assets.append(h)\n\n for i in common_assets:\n if i in temp_parent_1_assets:\n temp_parent_1_assets.remove(i)\n\n for i in common_assets:\n if i in temp_parent_2_assets:\n temp_parent_2_assets.remove(i)\n\n # Adding other assets and their corresponding proportions to the child\n for m, h in zip(temp_parent_1_assets, temp_parent_2_assets):\n rand_2 = np.random.rand()\n if rand_2 > 0.5:\n child_assets.append(m)\n child_proportions.append(parent_1_proportions[parent_1_assets.index(m)])\n else:\n child_assets.append(h)\n child_proportions.append(parent_2_proportions[parent_2_assets.index(h)])\n\n # Creating A*\n # A* is a set of assets that are in the parents, but are not in the child (together with their associated values)\n parent_minus_child_assets = []\n parent_minus_child_proportions = []\n for m, h in zip(parent_1_assets, parent_2_assets):\n if m not in child_assets:\n parent_minus_child_assets.append(m)\n parent_minus_child_proportions.append(parent_1_proportions[parent_1_assets.index(m)])\n if h not in child_assets:\n parent_minus_child_assets.append(h)\n parent_minus_child_proportions.append(parent_2_proportions[parent_2_assets.index(h)])\n\n # Assets that can be potentially added to the child in case parent_minus_child assets (A*) are empty\n other_assets = np.random.permutation(N).tolist()\n for i in other_assets:\n if i in child_assets:\n other_assets.remove(i)\n\n # Mutation\n mutated_asset = np.random.choice(child_proportions)\n rand_3 = np.random.rand()\n if rand_3 > 0.5:\n child_proportions[child_proportions.index(mutated_asset)] = (0.9 * (data.epsilon + mutated_asset) - data.epsilon) # m=1\n else:\n child_proportions[child_proportions.index(mutated_asset)] = (1.1 * (data.epsilon + mutated_asset) - data.epsilon) # m=2\n mutated_child_proportions = child_proportions\n\n # Making sure the child does not have two identical assets\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n # Making sure all child proportion are between 0 and 1 (if not they get excluded)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n\n # Ensure that child has exactly 10 assets and proportions\n while len(child_assets) > data.K and len(mutated_child_proportions) > data.K:\n child_assets.remove(child_assets.index(min(mutated_child_proportions)))\n mutated_child_proportions.remove(min(mutated_child_proportions))\n\n # Add assets from A* to child\n while len(child_assets) < data.K and len(mutated_child_proportions) < data.K:\n if len(parent_minus_child_assets) != 0:\n rand_4 = np.random.choice(parent_minus_child_assets)\n child_assets.append(rand_4)\n mutated_child_proportions.append(parent_minus_child_proportions[parent_minus_child_assets.index(rand_4)])\n parent_minus_child_proportions.remove(parent_minus_child_proportions[parent_minus_child_assets.index(rand_4)])\n parent_minus_child_assets.remove(rand_4)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n else: #In case A* is empty\n rand_5=np.random.choice(other_assets)\n child_assets.append(rand_5)\n other_assets.remove(rand_5)\n mutated_child_proportions.append(0)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n # Given large amount of iterations and randomness all child proportions could be 0 hence set 1 at random to 0.01\n # Does not influence the overall result as it ist immediately replaced by a stronger individual\n if sum(mutated_child_proportions) == 0:\n mutated_child_proportions[mutated_child_proportions.index(np.random.choice(mutated_child_proportions))]= 0.01\n\n # Evaluating child\n mutated_child_proportions = np.array(mutated_child_proportions)\n L = mutated_child_proportions.sum()\n w_temp = data.epsilon + mutated_child_proportions * data.F / L\n is_too_large = (w_temp > data.delta)\n while is_too_large.sum() > 0:\n is_not_too_large = np.logical_not(is_too_large)\n L = mutated_child_proportions[is_not_too_large].sum()\n F_temp = 1.0 - (data.epsilon * is_not_too_large.sum() + data.delta * is_too_large.sum())\n w_temp = data.epsilon + mutated_child_proportions * F_temp / L\n w_temp[is_too_large] = data.delta\n is_too_large = (w_temp > data.delta)\n\n # Assigning weights to child\n child_weights[:] = 0\n child_weights[child_assets] = w_temp\n mutated_child_proportions = w_temp - data.epsilon\n\n # Calculating child fitness\n obj1 = np.sum((child_weights * child_weights.reshape((child_weights.shape[0], 1))) * data.sigma)\n obj2 = np.sum(child_weights * data.mu)\n child_fitness = Lambda[l] * obj1 - (1 - Lambda[l]) * obj2\n\n # Checking whether child is valid\n Population.check_valid_solution(child_weights, mutated_child_proportions, child_assets, data)\n\n # Substituting child into the population and removing the weakest member\n index_worst_member = np.argmax(Population.fitness)\n Population.fitness[index_worst_member] = child_fitness\n Population.population_proportions[index_worst_member] = mutated_child_proportions\n Population.population_weights[index_worst_member] = child_weights\n Population.population_assets[index_worst_member] = child_assets\n Population.Obj1[index_worst_member] = obj1\n Population.Obj2[index_worst_member] = obj2\n\n # Finding the best member of the population\n index_best_member = np.argmin(Population.fitness)\n Population.best_fitness = Population.fitness[index_best_member]\n Population.best_proportions = Population.population_proportions[index_best_member]\n Population.best_weights = Population.population_weights[index_best_member]\n Population.best_assets = Population.population_assets[index_best_member]\n Population.best_covariance = Population.Obj1[index_best_member]\n Population.best_return = Population.Obj2[index_best_member]\n\n\n return Population.best_fitness, Population.best_proportions, Population.best_assets, Population.best_weights, Population.best_covariance, Population.best_return", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def __init__(self, crossover_locations, n_crossovers=1):\n self.crossover_locations = np.array(crossover_locations, copy=False)\n self.n_crossovers = n_crossovers", "def selection(self,parents,popSize):\n for i in range(popSize):\n idx1 = np.random.randint(0,popSize)\n idx2 = np.random.randint(0,popSize)\n if parents.individuals[idx1].violationSum < parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx1]\n elif parents.individuals[idx1].violationSum > parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx2]\n elif parents.individuals[idx1].objectiveFunction[0] < parents.individuals[idx2].objectiveFunction[0]:\n self.individuals[i] = parents.individuals[idx1]\n else:\n self.individuals[i] = parents.individuals[idx2]\n \"\"\"\n print(\"Offsprings(self) Impresso dentro de selection (FIM).\")\n self.printPopulation(popSize)\n print(\"Parents Impresso dentro de selection (FIM).\")\n parents.printPopulation(popSize)\n \"\"\"", "def general_cross_function(verbosity, function):\r\n ret = 1\r\n first_errors = [False, False]\r\n for count in range(10, 25, 5):\r\n for points in range(5, 10):\r\n for ax_c in range(3, 5):\r\n axes = []\r\n for _ in range(ax_c):\r\n axes.append(((np.random.random_sample() * 2), (3 + np.random.random_sample() * 4)))\r\n population = GeneticAlgorithms.random_population(count, points, axes) # assumes this works\r\n for _ in range(len(population)):\r\n rd1 = np.random.choice(population)\r\n rd2 = np.random.choice(population)\r\n crs = function(rd1, rd2)\r\n if crs.shape != rd1.shape:\r\n ret = 0\r\n if verbosity > 0 and first_errors[0]:\r\n first_errors[0] = True\r\n print(\"ERROR: cross function doesn't return correct shape\")\r\n for i in range(points):\r\n for j in range(ax_c):\r\n if crs[i][j] < min(rd1[i][j], rd2[i][j]) or crs[i][j] > max(rd1[i][j], rd2[i][j]):\r\n ret = 0\r\n if verbosity > 0 and first_errors[1]:\r\n first_errors[1] = True\r\n print(\"ERROR: cross function doesn't return in correct range\")\r\n return ret", "def CrossoverOX1(p1,p2):\n countryNo=len(p1)\n [start,end] = sorted(random.sample(range(1,countryNo),2))\n ch1 = [0]+[-1 for i in range(1,len(p1))]\n ch2 = [0]+[-1 for i in range(1,len(p1))]\n for i in range(1,countryNo):\n if i>=start and i<=end:\n ch1[i]=p1[i]\n ch2[i]=p2[i]\n for i in range(1,countryNo):\n if p2[i] not in ch1:\n ch1[ch1.index(-1)]=p2[i]\n for i in range(1,countryNo):\n if p1[i] not in ch2:\n ch2[ch2.index(-1)]=p1[i]\n return ch1, ch2", "def cross_curr2pbest1(pop, ic, f, cr, rng, p_num, archive, arc_ind_cnt, task, **_kwargs):\n # Note: the population passed in the argument must be sorted by fitness!\n x_pbest = rng.integers(p_num)\n # a random individual is selected from the best p_num individuals of the population rng.integers\n p = [1 / (len(pop) - 1.0) if i != ic else 0 for i in range(len(pop))]\n r1 = rng.choice(len(pop), p=p) # a random individual != to the current individual is selected from the population\n p = [1 / (len(pop) + arc_ind_cnt - 2.0) if i != ic and i != r1 else 0 for i in range(len(pop) + arc_ind_cnt)]\n r2 = rng.choice(len(pop) + arc_ind_cnt, p=p)\n # a second random individual != to the current individual and r1 is selected from the population U archive\n j = rng.integers(task.dimension)\n if r2 >= len(pop):\n r2 -= len(pop)\n v = [pop[ic][i] + f * (pop[x_pbest][i] - pop[ic][i]) + f * (pop[r1][i] - archive[r2][i])\n if rng.random() < cr or i == j else pop[ic][i] for i in range(task.dimension)]\n return parent_medium(np.asarray(v), pop[ic].x, task.lower, task.upper)\n # the mutant vector is repaired if needed\n\n else:\n v = [pop[ic][i] + f * (pop[x_pbest][i] - pop[ic][i]) + f * (pop[r1][i] - pop[r2][i])\n if rng.random() < cr or i == j else pop[ic][i] for i in range(task.dimension)]\n return parent_medium(np.asarray(v), pop[ic].x, task.lower, task.upper)\n # the mutant vector is repaired if needed", "def crossover(self, parent_1, parent_2):\r\n start = randrange(1, NUM_OF_GENETIC_UNITS + 1)\r\n end = randrange(1, NUM_OF_GENETIC_UNITS + 1)\r\n if end < start:\r\n start, end = end, start\r\n if start == end:\r\n end = NUM_OF_GENETIC_UNITS + 1\r\n if randrange(1, 3) == 1:\r\n parent_a = parent_1\r\n parent_b = parent_2\r\n else:\r\n parent_a = parent_2\r\n parent_b = parent_1\r\n crossover_of_genetic_units = {genetic_unit_num: parent_a.genetic_units[genetic_unit_num] for genetic_unit_num\r\n in parent_a.genetic_units}\r\n for genetic_unit_num in range(start, end):\r\n crossover_of_genetic_units[genetic_unit_num] = parent_b.genetic_units[genetic_unit_num]\r\n return crossover_of_genetic_units", "def steady_state(individuals):\r\n\r\n # Initialise counter for new individuals.\r\n ind_counter = 0\r\n\r\n while ind_counter < params['POPULATION_SIZE']:\r\n \r\n # Select parents from the original population.\r\n parents = selection(individuals)\r\n\r\n # Perform crossover on selected parents.\r\n cross_pop = crossover_inds(parents[0], parents[1])\r\n \r\n if cross_pop is None:\r\n # Crossover failed.\r\n pass\r\n\r\n else:\r\n # Mutate the new population.\r\n new_pop = mutation(cross_pop)\r\n \r\n # Evaluate the fitness of the new population.\r\n new_pop = evaluate_fitness(new_pop)\r\n \r\n # Sort the original population\r\n individuals.sort(reverse=True)\r\n \r\n # Combine both populations\r\n total_pop = individuals[:-len(new_pop)] + new_pop\r\n \r\n # Increment the ind counter\r\n ind_counter += params['GENERATION_SIZE']\r\n\r\n # Return the combined population.\r\n return total_pop", "def dynamic_crossover(nn1, nn2):\n # Lists for respective weights\n nn1_weights = get_weights(nn1.layers)\n nn2_weights = get_weights(nn2.layers)\n child_weights = []\n\n # Iterate through all weights from all layers for crossover\n for index, _ in enumerate(nn1_weights):\n # Get single point to split the matrix in parents based on # of cols\n coulmns = np.shape(nn1_weights[index])[1]-1\n split = random.randint(0, coulmns)\n # Iterate through after a single point and set the remaing cols to nn_2\n for j in range(split, coulmns):\n nn1_weights[index][:, j] = nn2_weights[index][:, j]\n\n # After crossover add weights to child\n child_weights.append(nn1_weights[index])\n\n # Add a chance for mutation\n mutation(child_weights)\n\n # Create and return child object\n return NeuralNetwork(child_weights)", "def evolve(population, targetSum, targetProduct, retain=0.2, random_select=0.05, mutate=0.01):\n\n graded = [ ( fitness(x, targetSum,targetProduct), x ) for x in population]\n graded = [ x[1] for x in sorted(graded) ]\n retain_length = int(len(graded) * retain)\n parents = graded[:retain_length]\n\n # randomly add other individuals to promote genetic\n # diversity\n for individual in graded[retain_length:]:\n if random_select > random.random():\n parents.append(individual)\n\n # crossover parents to create offspring\n #print(\"starting on crossover\")\n desired_length = len(population) - len(parents)\n children = []\n while len(children) < desired_length:\n male = randint(0, len(parents) - 1)\n female = randint(0, len(parents) -1)\n if male != female:\n male = parents[male]\n female = parents[female]\n half = int(len(male) / 2)\n child = male[: half] + female[half:]\n children.append(child)\n\n # mutate some individuals\n #print(\"starting on mutation\")\n for individual in children:\n if mutate > random.random():\n half = int(len(individual) / 2 )\n pos_geneSum = randint(0, (half - 1))\n pos_geneProd = randint(half, (len(individual) - 1))\n tmp = individual[pos_geneSum]\n individual[pos_geneSum] = individual[pos_geneProd]\n individual[pos_geneProd] = tmp\n\n parents.extend(children)\n return parents", "def crossover(parent1, parent2, crossover_rate):\n if random.random() < crossover_rate:\n crossover = int(random.random() * len(parent1.sequence))\n chromo1 = \"{}{}\".format(\n parent1.sequence[:crossover],\n parent2.sequence[crossover:])\n chromo2 = \"{}{}\".format(\n parent2.sequence[:crossover],\n parent1.sequence[crossover:])\n return Chromosome(sequence=chromo1), Chromosome(sequence=chromo2)\n else:\n return parent1, parent2", "def crossover(self, parents: ChromList) -> ChromList:\n raise NotImplementedError", "def uniform_crossover(random, mom, dad, args):\n ux_bias = args.setdefault('ux_bias', 0.5)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d) in enumerate(zip(mom, dad)):\n if random.random() < ux_bias:\n bro[i] = m\n sis[i] = d\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children", "def arithmetic_crossover(random, mom, dad, args):\n ax_alpha = args.setdefault('ax_alpha', 0.5)\n ax_points = args.setdefault('ax_points', None)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if ax_points is None:\n ax_points = list(range(min(len(bro), len(sis))))\n for i in ax_points:\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children", "def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def uniform_crossover(random, mom, dad, args):\r\n ux_bias = args.setdefault('ux_bias', 0.5)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n if random.random() < ux_bias:\r\n bro[i] = m\r\n sis[i] = d\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children", "def cross_below_cross_rate(self):\n p1_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n p2_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n gene_of_p1 = self.population[p1_index]\n gene_of_p2 = self.population[p2_index]\n cross_point = randint(0, int_min(len(gene_of_p1), len(gene_of_p2))-1)\n new_chromosome = []\n new_chromosome += gene_of_p1.chromosome[:cross_point]\n new_chromosome += gene_of_p2.chromosome[cross_point:]\n if (self.tactics.is_unrepeatable(new_chromosome[cross_point])\n and cross_point < len(new_chromosome)-1):\n if new_chromosome[cross_point] == new_chromosome[cross_point+1]:\n del new_chromosome[cross_point]\n return Gene(chromosome=new_chromosome)", "def crossing(self, *args):\n return self.phy2abs.crossing(*args)", "def _evolve_population(self):\n # save the old population\n self.old_population = self.population.copy()\n self.old_population_errors = self.population_errors.copy()\n\n # index pointers\n rind = numpy.random.permutation(4)+1\n\n # shuffle the locations of the individuals\n ind1 = numpy.random.permutation(self.population_size)\n pop1 = self.old_population[ind1,:]\n \n # rotate for remaining indices\n rot = numpy.remainder(self.rot_ind + rind[0], self.population_size)\n ind2 = ind1[rot,:]\n pop2 = self.old_population[ind2,:]\n\n rot = numpy.remainder(self.rot_ind + rind[1], self.population_size)\n ind3 = ind2[rot,:]\n pop3 = self.old_population[ind3,:]\n\n rot = numpy.remainder(self.rot_ind + rind[2], self.population_size)\n ind4 = ind3[rot,:]\n pop4 = self.old_population[ind4,:]\n\n rot = numpy.remainder(self.rot_ind + rind[3], self.population_size)\n ind5 = ind4[rot,:]\n pop5 = self.old_population[ind5,:]\n \n # population filled with best individual\n best_population = self.best_individual[numpy.newaxis,:].repeat(self.population_size,axis=0)\n\n # figure out the crossover ind\n xold_ind = numpy.random.rand(self.population_size,self.num_params) >= \\\n self.crossover_prob\n\n # get new population based on desired strategy\n # DE/rand/1\n if self.method == DE_RAND_1:\n population = pop3 + self.scale*(pop1 - pop2)\n population_orig = pop3\n # DE/BEST/1\n if self.method == DE_BEST_1:\n population = best_population + self.scale*(pop1 - pop2)\n population_orig = best_population\n # DE/best/2\n elif self.method == DE_BEST_2:\n population = best_population + self.scale * \\\n (pop1 + pop2 - pop3 - pop4)\n population_orig = best_population\n # DE/BEST/1/JITTER\n elif self.method == DE_BEST_1_JITTER:\n population = best_population + (pop1 - pop2) * \\\n ((1.0-0.9999) * \\\n numpy.random.rand(self.population_size,self.num_params) + \\\n self.scale)\n population_orig = best_population\n # DE/LOCAL_TO_BEST/1\n elif self.method == DE_LOCAL_TO_BEST_1:\n population = self.old_population + \\\n self.scale*(best_population - self.old_population) + \\\n self.scale*(pop1 - pop2)\n population_orig = self.old_population\n \n # crossover\n population[xold_ind] = self.old_population[xold_ind]\n\n # apply the boundary constraints\n for p in xrange(self.num_params):\n # get min and max\n min_val = self.param_ranges[p][0]\n max_val = self.param_ranges[p][1]\n\n # find where exceeded max\n ind = population[:,p] > max_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = max_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-max_val)\n\n # find where below min\n ind = population[:,p] < min_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = min_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-min_val)\n\n # set the class members\n self.population = population\n self.population_orig = population", "def laplace_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n bounder = args['_ec'].bounder\n a = args.setdefault('lx_location', 0)\n b = args.setdefault('lx_scale', 0.5)\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d) in enumerate(zip(mom, dad)):\n u = random.random()\n if random.random() <= 0.5:\n beta = a - b * math.log(u)\n else:\n beta = a + b * math.log(u)\n bro[i] = m + beta * abs(m - d)\n sis[i] = d + beta * abs(m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n return [bro, sis]\n else:\n return [mom, dad]", "def arithmetic_crossover(random, mom, dad, args):\r\n ax_alpha = args.setdefault('ax_alpha', 0.5)\r\n ax_points = args.setdefault('ax_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if ax_points is None:\r\n ax_points = list(range(min(len(bro), len(sis))))\r\n for i in ax_points:\r\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\r\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children", "def laplace_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n bounder = args['_ec'].bounder\r\n a = args.setdefault('lx_location', 0)\r\n b = args.setdefault('lx_scale', 0.5)\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n u = random.random()\r\n if random.random() <= 0.5:\r\n beta = a - b * math.log(u)\r\n else:\r\n beta = a + b * math.log(u)\r\n bro[i] = m + beta * abs(m - d)\r\n sis[i] = d + beta * abs(m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]", "def crossover(self, mom, dad):\r\n select_mask = np.random.binomial(1, 0.5, size=(20, 20)).astype('bool')\r\n child1, child2 = np.copy(mom), np.copy(dad)\r\n child1[select_mask] = dad[select_mask]\r\n child2[select_mask] = mom[select_mask]\r\n return child1, child2", "def crossing(self, *args):\n return self.overlap(*args, type='point')", "def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children", "def mutatePopulation(self, population):\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]", "def best_crossover(feature_set, population):\n new = []\n pop = [x for y in population for x in y]\n most = pop[0]\n max = float(\"-inf\")\n all = list(set(pop))\n for j in range(0, len(feature_set)):\n for i in all:\n x = pop.count(i)\n if x > max:\n max = x\n most = i\n new.append(most)\n pop = filter(lambda a: a != most, pop)\n max = float(\"-inf\")\n\n return set(new).union(feature_set)", "def next_population():\n result = [best]\n while len(result) < population_size:\n chromosomes = crossover(tournament(), tournament()) if random() < crossover_rate else [tournament()]\n for chromosome in chromosomes:\n for i in range(box_count):\n if random() < mutation_rate:\n j = randrange(box_count)\n (chromosome[i], chromosome[j]) = (chromosome[j], chromosome[i])\n result.append(Individual(evaluate(chromosome), chromosome))\n return result[:population_size]", "def step(individuals):\r\n\r\n if params['BASELINE_STEPS']:\r\n individuals = evaluation(individuals)\r\n else:\r\n # Select parents\r\n parents = selection(individuals)\r\n\r\n # Crossover parents and add to the new population\r\n cross_pop = crossover(parents)\r\n\r\n # Mutate the new population\r\n new_pop = mutation(cross_pop)\r\n\r\n # Evaluate the fitness of the new population\r\n new_pop = evaluation(new_pop)\r\n\r\n # Replace the sorted individuals with the new populations\r\n individuals = replacement(new_pop, individuals)\r\n\r\n return individuals", "def crossover_unif(pa, ma):\n mask = nprand.random_integers(0, 1, len(pa.data))\n kiddata = np.array([pa_r if mask_r else ma_r for (pa_r, ma_r, mask_r) in\n zip(pa.data, ma.data, mask)])\n return Individual(kiddata.copy())", "def __init__(self, firstParent, secondParent):\n CrossOver.__init__(self, \"Group Point CrossOver\", firstParent, secondParent)", "def single_crossover(self, original1, original2):\n point=self.r.uniform(0.1,0.6)\n cut1=int(point*len(original1))\n cut2=int(point*len(original2))\n child1=original1[:cut1]+original2[cut2:]\n child2=original2[:cut2]+original1[cut1:]\n return child1, child2", "def crossover(a, b):\n new_a = [] #Clearing previous \n cut_a = random.randint(1, len(a)-1) #Makes sure there is always a cut\n\n new_a1 = a[0 : cut_a]\n new_a2 = b[cut_a : len(b)]\n\n #Creates the new crossed-over list\n new_a = new_a1 + new_a2\n return new_a", "def _next_generation(self, previous_generation):\n self._fullInput, self._fullOutput = previous_generation.get_data()\n self.power = self.settings.population_count\n for forest_iteration in range(self.power):\n first, second = previous_generation.selection()\n print 'selected for crossover ->', first.fitness, second.fitness\n self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second))", "def evolve(self, population):\n n = len(population)\n\n # Create offspring as crossover of parents\n offspring = []\n while len(offspring) < n:\n parent_1 = copy.deepcopy(random.choice(population))\n parent_2 = copy.deepcopy(random.choice(population))\n try:\n self.crossover.crossover(parent_1, parent_2)\n except CrossoverError:\n pass # Just keep parents\n offspring += [parent_1, parent_2]\n\n # Mutate offspring\n offspring = [self.mutator.mutate(tree) for tree in offspring]\n\n # Add it to population\n population += offspring\n\n # Keep the fitter part of the population\n population.sort(key=self.fitness_key, reverse=True)\n population = population[:n]\n\n return population", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def simulated_binary_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n di = args.setdefault('sbx_distribution_index', 10)\r\n bounder = args['_ec'].bounder\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\r\n try:\r\n if m > d:\r\n m, d = d, m\r\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\r\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\r\n u = random.random() \r\n if u <= (1.0 / alpha):\r\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\r\n else:\r\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\r\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\r\n bro_val = max(min(bro_val, ub), lb) \r\n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\r\n sis_val = max(min(sis_val, ub), lb)\r\n if random.random() > 0.5:\r\n bro_val, sis_val = sis_val, bro_val\r\n bro[i] = bro_val\r\n sis[i] = sis_val\r\n except ZeroDivisionError:\r\n # The offspring already have legitimate values for every element,\r\n # so no need to take any special action here.\r\n pass\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]", "def evolve(population, target_sum, target_mult, retain=0.2, random_select=0.05, mutate=0.009):\r\n graded = [ (fitness(x, target_sum, target_mult), x) for x in population ]\r\n graded = [ x[1] for x in sorted(graded) ]\r\n retain_length = int(len(graded) * retain)\r\n parents = graded[:retain_length]\r\n # randomly add other individuals to promote genetic # diversity\r\n for individual in graded[retain_length:]:\r\n if random_select > random():\r\n parents.append(individual)\r\n # crossover parents to create offspring\r\n desired_length = len(population) - len(parents)\r\n children = []\r\n while len(children) < desired_length:\r\n male = randint(0, len(parents)-1)\r\n female = randint(0, len(parents)-1)\r\n if male != female:\r\n male = parents[male]\r\n female = parents[female]\r\n half = int(len(male) / 2)\r\n child = male[:half] + female[half:]\r\n children.append(child)\r\n # mutate some individuals for individual in children:\r\n for individual in children:\r\n if mutate > random():\r\n pos_to_mutate = randint(0, len(individual)-1)\r\n # this mutation is not ideal, because it\r\n # restricts the range of possible values,\r\n # but the function is unaware of the min/max\r\n # values used to create the individuals\r\n individual[pos_to_mutate] = randint(min(individual), max(individual))\r\n parents.extend(children)\r\n return parents", "def simulated_binary_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n di = args.setdefault('sbx_distribution_index', 10)\n bounder = args['_ec'].bounder\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\n try:\n if m > d:\n m, d = d, m\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\n u = random.random() \n if u <= (1.0 / alpha):\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\n else:\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\n bro_val = max(min(bro_val, ub), lb) \n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\n sis_val = max(min(sis_val, ub), lb)\n if random.random() > 0.5:\n bro_val, sis_val = sis_val, bro_val\n bro[i] = bro_val\n sis[i] = sis_val\n except ZeroDivisionError:\n # The offspring already have legitimate values for every element,\n # so no need to take any special action here.\n pass\n return [bro, sis]\n else:\n return [mom, dad]", "def crossover(parent1: Individual, parent2: Individual, root_individual: RootIndividual,\n **kwargs) -> Tuple[Individual, Individual]:\n sieve = np.random.randint(2, size=len(parent1.params)) # Array of 0's and 1's\n not_sieve = sieve ^ 1 # Complement of sieve\n\n child1 = Individual(list(parent1.params * sieve + parent2.params * not_sieve), root_individual=root_individual)\n child2 = Individual(list(parent1.params * not_sieve + parent2.params * sieve), root_individual=root_individual)\n\n return child1, child2", "def crossover(self, gene2):\r\n assert self.key == gene2.key\r\n\r\n \r\n new_gene = self.__class__(self.key)\r\n for a in self._gene_attributes:\r\n if random() > 0.5:\r\n setattr(new_gene, a.name, getattr(self, a.name))\r\n else:\r\n setattr(new_gene, a.name, getattr(gene2, a.name))\r\n\r\n return new_genes", "def crossover(first_chromosome: str, second_chromosome: str, nurses_number: int = 10) -> (str, str):\n\n # calculate the number of genes\n genes = 21 * nurses_number\n # generated a position to crossover\n position = randrange(0, genes)\n\n # Calculate two new chromosomes\n new_first_chromosome = first_chromosome[:position] + second_chromosome[position:]\n new_second_chromosome = second_chromosome[:position] + first_chromosome[position:]\n\n # calculates the fitness of the new chromosomes generated\n new_first_chromosome_fitness = fitness_function(individual=new_first_chromosome, nurses_number=nurses_number)\n new_second_chromosome_fitness = fitness_function(individual=new_second_chromosome, nurses_number=nurses_number)\n\n # return the best chromosome generated\n if new_first_chromosome_fitness < new_second_chromosome_fitness:\n return new_first_chromosome\n\n return new_second_chromosome", "def crossover(self, t1, t2):\n assert isinstance(t1, ast.AST)\n assert isinstance(t2, ast.AST)\n\n for body_attr in ['body', 'orelse', 'finalbody']:\n if self.crossover_attr(t1, t2, body_attr):\n return t1, t2\n\n raise CrossoverError(\"No crossover found\")" ]
[ "0.736195", "0.72073233", "0.7123464", "0.705394", "0.6893509", "0.6868745", "0.6859749", "0.6791421", "0.67872", "0.66507554", "0.6649494", "0.66126937", "0.6332303", "0.63217306", "0.6302329", "0.6289073", "0.62873167", "0.62827295", "0.6282539", "0.624641", "0.6236538", "0.62258005", "0.6189349", "0.61429846", "0.61247486", "0.61167514", "0.61157787", "0.6106192", "0.61056536", "0.6070103", "0.60617954", "0.6025586", "0.5995025", "0.597971", "0.5953841", "0.590846", "0.5908377", "0.58328587", "0.5821861", "0.58156765", "0.5814362", "0.5795026", "0.5791564", "0.5700118", "0.5697598", "0.5692186", "0.5681664", "0.56578445", "0.56492037", "0.563076", "0.56301975", "0.56091547", "0.55467594", "0.5522911", "0.5521807", "0.5515313", "0.5514427", "0.5513672", "0.55053884", "0.5505174", "0.5491809", "0.5484269", "0.5480695", "0.5411708", "0.5404785", "0.5396841", "0.5393622", "0.5382312", "0.53813195", "0.53673553", "0.5362576", "0.5360703", "0.53558975", "0.5344228", "0.53388876", "0.5336498", "0.5322092", "0.53204733", "0.5312246", "0.5295057", "0.52935797", "0.5291461", "0.52848494", "0.5278452", "0.52761173", "0.5274745", "0.5257387", "0.52454495", "0.5244711", "0.521847", "0.5215879", "0.51934135", "0.51915133", "0.5176701", "0.5176103", "0.5175092", "0.5166286", "0.5165299", "0.5163844", "0.51475275" ]
0.55216956
55
'If you create a Lambda function that processes events from streambased services (Amazon Kinesis Streams), the number of shards per stream is the unit of concurrency. If your stream has 100 active shards, there will be 100 Lambda functions running concurrently. Then, each Lambda function processes events on a shard in the order that they arrive.' Therefore, for checkpointing logic, we should make the primary
def handler(event, context): debug = False rewind = False dry_run = False table = _ensure_dynamo_table() consumer_id = 'test-consumer' if debug: state = table.scan() print "Active leases in Dynamo:", state["Count"] for item in state["Items"]: print json.dumps(item, indent=4, sort_keys=True) lease = None shard = None try: visitors = set() last_timestamp = None for i, record in enumerate(event.get('Records', [])): event_id, data = (record['eventID'], record['kinesis']['data']) shard, checkpoint = event_id.split(u':') if rewind: print "Rewinding to checkpoint 0" _clear_consumer_lease(table, consumer_id, shard) rewind = False if lease is None: lease = _get_consumer_lease(table, consumer_id, shard) \ or {"checkpoint": "0"} if checkpoint <= lease["checkpoint"]: # replayed event, we should skip it print "Replayed event; skipping" continue # => decode from b64 raw_event = base64.b64decode(data) # => parse from JSON json_event = json.loads(raw_event) # => extract out visitor id and timestamp if present visitor = json_event.get("visitor_site_id", "N/A") visitors.add(visitor) last_timestamp = json_event.get("ts_action", "N/A") # => do something with the data result = process(json_event) if result: pass # => checkpoint the shard lease["checkpoint"] = checkpoint logger.info("Saw {} unique visitors in batch ending with {}".format( len(visitors), last_timestamp)) if not dry_run: _put_consumer_lease(table, consumer_id, shard, lease) except Exception as ex: # do not save consumer checkpoints because error happened # instead, we should probably log something about the error # in the consumer lease, to allow the Lambda to retry a fixed # number of times, before finally "giving up" and skipping # the records raise "^ some form of error handling required" if ex: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lambda_handler(event, context):\n\n mytime, lambda_name, env_vars = lambda_init.init_lambda(context)\n stage = env_vars[\"stage\"]\n consumer_master_past_lambda = env_vars[\"consumer_master_past_name\"]\n\n apps, test_params = init_apps_from_test_params(event)\n filters = init_filters()\n\n step = generate_step_from_mytime(mytime)\n\n print(\"step:\", step)\n for app in apps:\n advance_app_timestamp(app, step)\n\n consumer_event = {}\n\n # Invoke the consumer-master lambda for each app in apps\n for app in apps:\n headers = Headers(\n shadowreader_type=\"past\", stage=stage, app=app, step=step\n ).headers\n\n consumer_event = {\n \"app\": app.name,\n \"identifier\": app.identifier,\n \"base_url\": app.base_url,\n \"cur_timestamp\": app.cur_timestamp,\n \"rate\": app.rate,\n \"baseline\": app.baseline,\n \"parent_lambda\": lambda_name,\n \"child_lambda\": consumer_master_past_lambda,\n \"headers\": headers,\n \"filters\": filters,\n }\n invoke_func(consumer_event, func=consumer_master_past_lambda)\n\n if apps and consumer_event:\n print_to_logs(consumer_event, apps)\n\n # Collect metrics and put metrics into CW\n metrics = []\n for app in apps:\n # This is the timestamp (in epoch time) that is being replayed\n # by the load test.\n metric = {\n \"name\": \"replayed_timestamp\",\n \"stage\": stage,\n \"lambda_name\": lambda_name,\n \"app\": app.name,\n \"identifier\": app.identifier,\n \"mytime\": mytime,\n \"val\": app.cur_timestamp,\n }\n metrics.append(metric)\n\n if sr_plugins.exists(\"metrics\"):\n metric_emitter = sr_plugins.load(\"metrics\")\n for metric in metrics:\n metric_emitter.main(metric)\n\n cur_params = {\"apps\": apps, \"filters\": filters, \"test_params\": test_params}\n\n if sr_plugins.exists(\"test_params_emitter\"):\n params_emitter = sr_plugins.load(\"test_params_emitter\")\n params_emitter.main(\n cur_params,\n lambda_name,\n mytime,\n stage,\n env_vars,\n sr_config,\n sr_plugins._sr_plugins,\n )\n\n return json.dumps(cur_params, default=str), json.dumps(consumer_event, default=str)", "def __init__(__self__, *,\n function_name: pulumi.Input[str],\n amazon_managed_kafka_event_source_config: Optional[pulumi.Input['EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs']] = None,\n batch_size: Optional[pulumi.Input[int]] = None,\n bisect_batch_on_function_error: Optional[pulumi.Input[bool]] = None,\n destination_config: Optional[pulumi.Input['EventSourceMappingDestinationConfigArgs']] = None,\n document_db_event_source_config: Optional[pulumi.Input['EventSourceMappingDocumentDbEventSourceConfigArgs']] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n event_source_arn: Optional[pulumi.Input[str]] = None,\n filter_criteria: Optional[pulumi.Input['EventSourceMappingFilterCriteriaArgs']] = None,\n function_response_types: Optional[pulumi.Input[Sequence[pulumi.Input['EventSourceMappingFunctionResponseTypesItem']]]] = None,\n maximum_batching_window_in_seconds: Optional[pulumi.Input[int]] = None,\n maximum_record_age_in_seconds: Optional[pulumi.Input[int]] = None,\n maximum_retry_attempts: Optional[pulumi.Input[int]] = None,\n parallelization_factor: Optional[pulumi.Input[int]] = None,\n queues: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n scaling_config: Optional[pulumi.Input['EventSourceMappingScalingConfigArgs']] = None,\n self_managed_event_source: Optional[pulumi.Input['EventSourceMappingSelfManagedEventSourceArgs']] = None,\n self_managed_kafka_event_source_config: Optional[pulumi.Input['EventSourceMappingSelfManagedKafkaEventSourceConfigArgs']] = None,\n source_access_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['EventSourceMappingSourceAccessConfigurationArgs']]]] = None,\n starting_position: Optional[pulumi.Input[str]] = None,\n starting_position_timestamp: Optional[pulumi.Input[float]] = None,\n topics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tumbling_window_in_seconds: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"function_name\", function_name)\n if amazon_managed_kafka_event_source_config is not None:\n pulumi.set(__self__, \"amazon_managed_kafka_event_source_config\", amazon_managed_kafka_event_source_config)\n if batch_size is not None:\n pulumi.set(__self__, \"batch_size\", batch_size)\n if bisect_batch_on_function_error is not None:\n pulumi.set(__self__, \"bisect_batch_on_function_error\", bisect_batch_on_function_error)\n if destination_config is not None:\n pulumi.set(__self__, \"destination_config\", destination_config)\n if document_db_event_source_config is not None:\n pulumi.set(__self__, \"document_db_event_source_config\", document_db_event_source_config)\n if enabled is not None:\n pulumi.set(__self__, \"enabled\", enabled)\n if event_source_arn is not None:\n pulumi.set(__self__, \"event_source_arn\", event_source_arn)\n if filter_criteria is not None:\n pulumi.set(__self__, \"filter_criteria\", filter_criteria)\n if function_response_types is not None:\n pulumi.set(__self__, \"function_response_types\", function_response_types)\n if maximum_batching_window_in_seconds is not None:\n pulumi.set(__self__, \"maximum_batching_window_in_seconds\", maximum_batching_window_in_seconds)\n if maximum_record_age_in_seconds is not None:\n pulumi.set(__self__, \"maximum_record_age_in_seconds\", maximum_record_age_in_seconds)\n if maximum_retry_attempts is not None:\n pulumi.set(__self__, \"maximum_retry_attempts\", maximum_retry_attempts)\n if parallelization_factor is not None:\n pulumi.set(__self__, \"parallelization_factor\", parallelization_factor)\n if queues is not None:\n pulumi.set(__self__, \"queues\", queues)\n if scaling_config is not None:\n pulumi.set(__self__, \"scaling_config\", scaling_config)\n if self_managed_event_source is not None:\n pulumi.set(__self__, \"self_managed_event_source\", self_managed_event_source)\n if self_managed_kafka_event_source_config is not None:\n pulumi.set(__self__, \"self_managed_kafka_event_source_config\", self_managed_kafka_event_source_config)\n if source_access_configurations is not None:\n pulumi.set(__self__, \"source_access_configurations\", source_access_configurations)\n if starting_position is not None:\n pulumi.set(__self__, \"starting_position\", starting_position)\n if starting_position_timestamp is not None:\n pulumi.set(__self__, \"starting_position_timestamp\", starting_position_timestamp)\n if topics is not None:\n pulumi.set(__self__, \"topics\", topics)\n if tumbling_window_in_seconds is not None:\n pulumi.set(__self__, \"tumbling_window_in_seconds\", tumbling_window_in_seconds)", "def __init__(\n self, stream_name, checkpoint_table=None, host_key=None, shard_iterator_type=None,\n iterator_timestamp=None, shard_iterators=None, recover_from_dynamo=False,\n iterator_sequence_number=None, custom_kinesis_client=None):\n\n super(AsyncKinesisConsumer, self).__init__()\n\n self.stream_name = stream_name\n self.shard_iterator_type = shard_iterator_type\n self.iterator_timestamp = iterator_timestamp\n self.iterator_sequence_number = iterator_sequence_number\n self.restricted_shard_iterators = shard_iterators\n\n if recover_from_dynamo and not checkpoint_table:\n raise RuntimeError('Can not use recover_from_dynamo without checkpoint table')\n self.recover_from_dynamodb = recover_from_dynamo\n\n # Allow a custom kinesis client to be passed in. This allows for setting of any additional parameters in\n # the client without needing to track them in this library.\n if custom_kinesis_client is not None:\n self.kinesis_client = custom_kinesis_client\n else:\n self.kinesis_client = aioboto3.client('kinesis')\n\n self.checkpoint_table = checkpoint_table\n self.checkpoint_callback = None\n self.host_key = host_key\n\n self.shard_readers = {}\n self.dynamodb_instances = {}\n self.stream_data = None\n self.force_rescan = True\n\n self.checkpoint_interval = AsyncKinesisConsumer.DEFAULT_CHECKPOINT_INTERVAL\n self.lock_holding_time = AsyncKinesisConsumer.DEFAULT_LOCK_HOLDING_TIME\n self.reader_sleep_time = AsyncKinesisConsumer.DEFAULT_SLEEP_TIME\n self.fallback_time_delta = AsyncKinesisConsumer.DEFAULT_FALLBACK_TIME_DELTA", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def lambda_handler(event, context): # pylint: disable=too-many-locals,too-many-branches,too-many-statements\r\n try: # pylint: disable=too-many-nested-blocks\r\n print(\"Execution started!\")\r\n #print(\"Event: \",event)\r\n # Bucket name and Full path for file - where file will be uploded\r\n source_bucket_name = event[\"detail\"][\"requestParameters\"][\"bucketName\"]\r\n source_key = urllib.parse.unquote_plus(\r\n event[\"detail\"][\"requestParameters\"][\"key\"], encoding='utf-8')\r\n \r\n print(\"file_path: \",source_key)\r\n #Loading master config\r\n print(\"Loading master_config\")\r\n audit_config = {}\r\n config_path = \"./config/\" + \\\r\n os.environ['CCM_ENV'] + \"/master_config.json\"\r\n config_content = open(config_path).read()\r\n config_json = json.loads(config_content)\r\n audit_config = config_json[\"audit_config\"]\r\n snow_params = config_json[\"ERROR_NOTIFICATION_SNOW_PARAMS\"]\r\n athena_query_param = config_json[\"ATHENA_QUERY_PARAMS\"]\r\n athena_table_params = config_json[\"ATHENA_TABLE_PARAMS\"]\r\n\r\n # Audit Parameters Based on the Invoking lambda and its operation involved\r\n audit_config[\"component_type_code\"] = \"ETL\"\r\n audit_config[\"component_name\"] = \"PCP Appflow\"\r\n audit_config[\"source_name\"] = \"Patient Connections Platform\"\r\n audit_config[\"target_name\"] = \"Consumer Consent Management\"\r\n audit_config[\"full_file_path\"] = \"s3://\" + \\\r\n source_bucket_name + \"/\" + source_key\r\n audit_config[\"file_version_id\"] = \"\"\r\n\r\n # Creates Job Entry in ABC Framework\r\n print(\"audit config::\", audit_config)\r\n process_execution_id = audit_helper.\\\r\n invoke_edb_abc_log_process_status_event_job_entry(audit_config)\r\n audit_config[\"process_execution_id\"] = process_execution_id\r\n print(\"process_execution_id ::\", process_execution_id)\r\n #print(\"source_key: \",source_key)\r\n s3_write = boto3.client('s3')\r\n record_dict = {}\r\n file_name = \"\"\r\n final_json = \"\"\r\n # prefix = \"\"\r\n # file_list = []\r\n # client = boto3.client(\"s3\")\r\n # result = client.list_objects(Bucket=source_bucket_name, Prefix=source_key, Delimiter='/')\r\n # #print(result)\r\n # for obj in result.get('CommonPrefixes'):\r\n # prefix = obj.get('Prefix')\r\n # #print(prefix)\r\n # file_list = list_files(client,source_bucket_name,prefix)\r\n # for file in file_list:\r\n # #print(file)\r\n json_read = read_s3_file(source_bucket_name, source_key)\r\n data = json.loads(json_read)\r\n #print(data)\r\n if data != '':\r\n record_dict = {k.lower(): v for k, v in data.items()}\r\n print(\"Record_Dict::\",record_dict)\r\n event_type_param = {}\r\n event_type_list = athena_table_params.keys()\r\n print(\"event_type_list\",event_type_list)\r\n for key in event_type_list:\r\n print(\"key\",key)\r\n if key in source_key:\r\n print(\"key\",key)\r\n event_type_param = athena_table_params[key]\r\n print(event_type_param)\r\n if \"changeeventheader\" in record_dict:\r\n if record_dict[\"changeeventheader\"][\"changeType\"] == \"CREATE\":\r\n #and record_dict[\"dtpc_affiliate__c\"] == 'US':\r\n recordid_create = record_dict[\"changeeventheader\"][\"recordIds\"][0]\r\n print(recordid_create)\r\n if recordid_create != '':\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace(\":\",\".\")\r\n create_json = json.dumps(record_dict)\r\n final_json = create_json\r\n file_name = recordid_create + \"-create-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, Key=final_source_key)\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n elif record_dict[\"changeeventheader\"][\"changeType\"] == \"UPDATE\":\r\n record_ids_list = record_dict[\"changeeventheader\"][\"recordIds\"]\r\n if len(record_ids_list) != 0:\r\n for ele in record_ids_list:\r\n print(ele)\r\n element = \"'\" + ele + \"'\"\r\n payload_condition = event_type_param[\"recordid_condition\"]\r\n query = 'SELECT * FROM '+event_type_param[\"athena_create_table\"]+\\\r\n ' WHERE lastmodifieddate IN(SELECT max(lastmodifieddate) from '\\\r\n +event_type_param[\"athena_create_table\"]+\\\r\n ', UNNEST(\"'+payload_condition[0]+'\".\"'+payload_condition[1]+\\\r\n '\") AS ln(jsondata) WHERE jsondata IN ('+element+'));'\r\n print(query)\r\n athena_query_param['athena_query'] = query\r\n query_result_record_id = athena_helper.perform_athena_search\\\r\n (athena_query_param)\r\n print(\"Athena Query Result for Create Path:::\", query_result_record_id)\r\n update_json = create_complete_payload(data,query_result_record_id)\r\n print(\"update_json: \",update_json)\r\n if len(update_json) != 0:\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace\\\r\n (\":\",\".\")\r\n final_json = json.dumps(update_json)\r\n file_name = ele + \"-update-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, \\\r\n Key=final_source_key)\r\n else:\r\n print(ele,\" does not have a create payload\")\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n else:\r\n raise Exception(\"ChangeEventHeader is missing: \", record_dict)\r\n else:\r\n raise Exception(\"Invalid Payload: \", record_dict)\r\n\r\n except (Exception) as err: # pylint: disable=line-too-long,broad-except\r\n print(\"Error occured: {0}\".format(str(err)))\r\n audit_type = \"error\"\r\n error_msg = sys.exc_info()\r\n exc_type = error_msg\r\n exc_obj = error_msg\r\n snow_params[\"flag\"] = \"FAIL\"\r\n snow_params[\"error_message\"] = str(exc_obj)\r\n snow_params[\"error_type\"] = str(exc_type)\r\n audit_config[\"exception_message\"] = str(exc_obj)\r\n if audit_config != {}:\r\n logging.exception(sys.exc_info())\r\n audit_helper.invoke_edb_abc_log_process_status_event(\r\n audit_type, audit_config) # pylint: disable=line-too-long\r\n audit_helper.raise_snow_incident(snow_params)", "def test_kinesis_too_large_record(sdc_builder, sdc_executor, aws, keep_data):\n record_1_content = 'Hello 1'\n record_2_content = 'Hello ' + '2' * 1024 * 1024\n record_3_content = 'Hello 3'\n file_content = f'{record_1_content}\\n{record_2_content}\\n{record_3_content}'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data=file_content,\n stop_after_first_batch=True,\n max_line_length=len(record_2_content)\n )\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n kinesis_producer = pipeline_builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> [kinesis_producer, wiretap.destination]\n pipeline = pipeline_builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n assert len(received_data) == 2\n assert received_data[0] == record_1_content\n assert received_data[1] == record_3_content\n\n error_records = wiretap.error_records\n assert len(error_records) == 1\n assert error_records[0].header['errorCode'] == 'KINESIS_08'\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))", "def flax_shard_checkpoint(params, max_shard_size=\"10GB\"):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = {}\n current_block_size = 0\n total_size = 0\n\n # flatten the weights to chunk\n weights = flatten_dict(params, sep=\"/\")\n for item in weights:\n weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = {}\n current_block_size = 0\n\n current_block[item] = weights[item]\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = FLAX_WEIGHTS_NAME.replace(\".msgpack\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack\")\n shards[shard_file] = shard\n for weight_name in shard.keys():\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index", "def lambda_handler(event, context):\n log.log_request_and_context(event, context)\n\n parent_batch_id = event[\"parent_batch_id\"]\n job_level = event[\"job_level\"]\n\n parent_batch = db.get_batch_metadata(parent_batch_id)\n if parent_batch is None:\n raise Exception(f\"Invalid parent batch id: {parent_batch_id}\")\n\n if job_level == 1:\n meta_data_type = BatchMetadataType.FIRST_LEVEL\n elif job_level == 2:\n meta_data_type = BatchMetadataType.SECOND_LEVEL\n elif job_level == 3:\n meta_data_type = BatchMetadataType.THIRD_LEVEL\n\n # Filter jobs by job level\n labeling_jobs = parent_batch[BatchMetadataTableAttributes.LABELING_JOBS]\n current_jobs = [job for job in labeling_jobs if job[\"jobLevel\"] == job_level]\n log.logging.info(\"Kicking off %d jobs for level %d\", len(current_jobs), job_level)\n\n batch_id = f\"{parent_batch_id}-{meta_data_type.lower()}\"\n for job in current_jobs:\n trigger_labeling_job(parent_batch_id, batch_id, job)\n\n try:\n db.insert_perform_labeling_job_metadata(\n parent_batch_id=parent_batch_id,\n batch_id=batch_id,\n batch_status=BatchStatus.IN_PROGRESS,\n batch_metadata_type=meta_data_type,\n num_children_batches=len(current_jobs),\n )\n except botocore.exceptions.ClientError as err:\n raise Exception(f\"failed to put batch id {batch_id}\") from err\n\n return {\n \"batch_id\": batch_id,\n }", "def redshift_lambda_handler(event, context):\n logging.debug('event: %s', event)\n\n detail = event['detail']\n event_name = detail['eventName']\n creator = get_creator(event)\n\n logger.info('Event type: %s', event_name)\n\n if is_err_detail(logger, detail):\n return False\n\n if event_name == 'CreateCluster':\n logger.debug('%s is creating cluster: %s',\n creator, detail['requestParameters']['clusterIdentifier'])\n\n # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html\n cluster_arn = 'arn:aws:redshift:' + detail['awsRegion'] + ':'\\\n + detail['userIdentity']['accountId'] + ':cluster:'\\\n + detail['requestParameters']['clusterIdentifier']\n short_msg = {\n \"EventName\": event_name,\n \"Creator\": creator,\n \"ResourceArn\": cluster_arn,\n \"TagStatus\": \"pending\",\n \"MaxRetries\": int(os.environ['SFN_MAX_RETRIES']),\n \"Retries\": 0\n }\n\n sfn = Boto3Wrapper.get_client('stepfunctions')\n response = sfn.start_execution(\n stateMachineArn=os.environ['SFN_ARN'],\n name=creator+'-'+event_name+'-'+detail['eventID'],\n input=json.dumps(short_msg)\n )\n\n logger.info('Step Functions start execution: %s', response)\n\n return True", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def process(msg, context, region):\n\n job_id = int(msg['ingest_job'])\n chunk_key = msg['chunk_key']\n tile_key = msg['tile_key']\n print(\"Tile key: {}\".format(tile_key))\n\n proj_info = BossIngestProj.fromTileKey(tile_key)\n\n # Set the job id\n proj_info.job_id = msg['ingest_job']\n\n print(\"Data: {}\".format(msg))\n\n # update value in the dynamo table\n tile_index_db = BossTileIndexDB(proj_info.project_name)\n chunk = tile_index_db.getCuboid(chunk_key, job_id)\n if chunk:\n if tile_index_db.cuboidReady(chunk_key, chunk[\"tile_uploaded_map\"]):\n print(\"Chunk already has all its tiles: {}\".format(chunk_key))\n # Go ahead and setup to fire another ingest lambda so this tile\n # entry will be deleted on successful execution of the ingest lambda.\n chunk_ready = True\n else:\n print(\"Updating tile index for chunk_key: {}\".format(chunk_key))\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n else:\n # First tile in the chunk\n print(\"Creating first entry for chunk_key: {}\".format(chunk_key))\n try:\n tile_index_db.createCuboidEntry(chunk_key, job_id)\n except ClientError as err:\n # Under _exceptional_ circumstances, it's possible for another lambda\n # to beat the current instance to creating the initial cuboid entry\n # in the index.\n error_code = err.response['Error'].get('Code', 'Unknown')\n if error_code == 'ConditionalCheckFailedException':\n print('Chunk key entry already created - proceeding.')\n else:\n raise\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n\n # ingest the chunk if we have all the tiles\n if chunk_ready:\n print(\"CHUNK READY SENDING MESSAGE: {}\".format(chunk_key))\n # insert a new job in the insert queue if we have all the tiles\n ingest_queue = IngestQueue(proj_info)\n ingest_queue.sendMessage(json.dumps(msg))\n\n # Invoke Ingest lambda function\n names = AWSNames.from_lambda(context.function_name)\n lambda_client = boto3.client('lambda', region_name=region)\n lambda_client.invoke(\n FunctionName=names.tile_ingest.lambda_,\n InvocationType='Event',\n Payload=json.dumps(msg).encode())\n else:\n print(\"Chunk not ready for ingest yet: {}\".format(chunk_key))\n\n print(\"DONE!\")", "def lambda_handler(event, context):\n\n # S3 resource invocation\n s3_resource = boto3.resource('s3')\n # S3 bucket selection\n data_bucket_name = \"put_here_data_bucket_name\"\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n request_body_dict = json.loads(event['body'])\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending both ticker and start date if given\n if request_body_dict['start_date'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_future_request(request_body=request_body_dict,\n s3_resource=s3_resource,\n s3_bucket=data_bucket_name, prefix='valid'))\n # or only ticker name if no start date has been provided\n elif request_body_dict['ticker_name'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_request(ticker_name=request_body_dict['ticker_name'],\n s3_resource=s3_resource, s3_bucket=data_bucket_name,\n prefix='train'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n # print data for debug purposes\n print(result)\n\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},\n 'body': str(result)\n }", "def lambda_handler(Event, Context):\n if 'StateMachineArn' in Event.keys():\n step_function_arn = Event['StateMachineArn']\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))\n\n else:\n stepfunctions = [os.getenv(\"CHARGEBEEDOWNLOADARN\"), os.getenv(\"EXCHANGERATESDOWNLOADARN\")]\n\n for stepfunction in stepfunctions:\n step_function_arn = stepfunction\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def test4():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000147203645/tic000147203645_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def handler(event, context): # pylint: disable=unused-argument\n\n if \"queue\" in event:\n # Lambda is being invoked to read messages directly from queue URL\n # In that mode SNS events are always sent to the internal\n # reconcile topic\n process_queue(\n stac_bucket=os.environ[\"STAC_BUCKET\"],\n cog_pds_meta_pds=json.loads(os.environ[\"COG_PDS_META_PDS\"]),\n queue=event[\"queue\"],\n message_batch_size=int(os.environ[\"MESSAGE_BATCH_SIZE\"]),\n sns_reconcile_target_arn=os.environ[\"SNS_RECONCILE_TARGET_ARN\"],\n catalog_update_queue=os.environ.get(\"CATALOG_UPDATE_QUEUE\"),\n catalog_update_table=os.environ[\"CATALOG_UPDATE_TABLE\"],\n corrupted_xml_queue=os.environ[\"corrupted_xml_queue_url\"],\n delete_processed_messages=int(os.environ[\"DELETE_MESSAGES\"]) == 1,\n )\n else:\n # Lambda is being invoked as trigger to SQS\n process_trigger(\n stac_bucket=os.environ[\"STAC_BUCKET\"],\n cog_pds_meta_pds=json.loads(os.environ[\"COG_PDS_META_PDS\"]),\n event=event,\n sns_target_arn=os.environ[\"SNS_TARGET_ARN\"],\n sns_reconcile_target_arn=os.environ[\"SNS_RECONCILE_TARGET_ARN\"],\n catalog_update_queue=os.environ.get(\"CATALOG_UPDATE_QUEUE\"),\n catalog_update_table=os.environ[\"CATALOG_UPDATE_TABLE\"],\n corrupted_xml_queue=os.environ[\"corrupted_xml_queue_url\"],\n )", "def lambda_handler(event, context):\n print(event)\n \n #Input variables\n aminP = 0.7\n amaxP = 12.0\n durs = [0.75, 1.25, 3, 5, 7]\n min_snr = 3.9\n max_tce = 4\n frac_remain = 0.8\n det_window = 42\n noise_window = 12\n n_sigma = 3.9\n search_bucket = \"tesssearchresults\"\n #---------\n \n cloud = True\n #Local Storage\n local_filename = \"/tmp/mylightcurve.fits\"\n local_detrend_fn = \"/tmp/detrended.fits\"\n out_file = \"/tmp/output.csv\"\n \n b_filename = event['Records'][0]['s3']['object']['key']\n bucket = event['Records'][0]['s3']['bucket']['name'] \n \n #If not in the cloud bucket begins with /, do the following to set up for a test.\n if bucket[0] == \"/\":\n print(\"Not Using cloud.\")\n cloud = False #For testing\n local_filename = bucket + b_filename\n local_dir = \"/Users/smullally/TESS/lambdaSearch/test/tesssearchresults/\"\n local_detrend_fn = local_dir + \"detrended.fits\"\n out_file = local_dir + \"outfile.fits\"\n \n meta = dict()\n \n #Get the information from the light curve file.\n if cloud:\n time, flux, qflags, phead = io.read_lightcurve_lambda(bucket, \\\n b_filename, local_filename)\n else:\n time, flux, qflags, phead = io.read_lightcurve_lambda_local(bucket, \\\n b_filename, local_filename)\n \n #print(time,flux,qflags)\n ticid, camera, sector, ccd = io.read_header(phead)\n\n namestr = \"tic%012u/tic%012u_s%04u-%1u-%1u\" % \\\n (int(ticid), int(ticid), int(sector),int(camera), int(ccd))\n \n #Detrend\n good_time, meddet_flux = ps.clean_timeseries(time, flux, qflags, det_window, \\\n noise_window, n_sigma, sector)\n \n #import matplotlib.pyplot as plt\n #plt.figure()\n #plt.plot(good_time,meddet_flux,'.')\n \n #Take BLS\n results, stats = ps.identifyTces(good_time, meddet_flux, bls_durs_hrs=durs,\\\n minSnr=min_snr, \\\n fracRemain=frac_remain,\\\n maxTces=max_tce, minP=aminP, maxP=amaxP)\n #print(results)\n \n #Now write out results.\n \n bucket_out_name = namestr + \"_plsearch\" + '.csv'\n bucket_detrend_name = namestr + \"_detrend\" + '.fits'\n \n \n io.write_results(out_file, int(ticid), results, stats, **meta)\n io.write_timeseries(local_detrend_fn, good_time, meddet_flux, phead)\n \n if cloud: \n #Write to the S3 bucket.\n s3_client = boto3.client('s3')\n resp = s3_client.upload_file(out_file, search_bucket, bucket_out_name)\n resp = s3_client.upload_file(local_detrend_fn, search_bucket, bucket_detrend_name)\n else:\n resp = \"not cloud\"\n if not os.path.exists(local_dir + \"tic%012u\" % (int(ticid))):\n os.mkdir(local_dir + \"tic%012u\" % (int(ticid)))\n \n try:\n os.remove(local_dir + bucket_out_name)\n except:\n pass\n \n os.rename(out_file, local_dir + bucket_out_name)\n try:\n os.remove(local_dir + bucket_detrend_name)\n except:\n pass\n os.rename(local_detrend_fn, local_dir + bucket_detrend_name)\n \n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"outname\": bucket_detrend_name,\n \"response\": str(resp),\n \"period\": str(results[0][0]),\n \"epoch\": str(results[0][1])\n })\n }", "def test3():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000129646247_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def test_kinesis_preserve_record_order(sdc_builder, sdc_executor, aws, keep_data):\n expected_data = [f'Hello {i}' for i in range(100)]\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data='\\n'.join(expected_data),\n stop_after_first_batch=True\n )\n\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n dev_raw_data_source >> kinesis_producer\n pipeline = builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS ...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream ...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n\n logger.debug(f'Number of messages received from Kinesis = {len(received_data)}')\n assert received_data == expected_data\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def lambda_handler(event, context):\r\n print(\"Function triggered\")\r\n if 'local' == environ.get('APP_STAGE'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\r\n table = dynamodb.Table(\"audiobooksDB\")\r\n else:\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(environ[\"TABLE_NAME\"])\r\n s3 = boto3.client('s3')\r\n \r\n s3FileName = event['Records'][0]['s3']['object']['key'].replace(\"+\", \" \")\r\n bucketName = event['Records'][0]['s3']['bucket']['name']\r\n # Download file from the S3 bucket\r\n try:\r\n book = s3.get_object(Bucket=bucketName, Key=s3FileName)\r\n print(\"Loading file from S3 bucket\")\r\n bookContent = book[\"Body\"].read().decode(\"utf-8\", errors=\"ignore\").split(\"------ END METADATA --------\")\r\n metadata = json.loads(bookContent[0])\r\n bookContent = bookContent[1]\r\n # Polly accepts 100,000 chars at a time. We make chunks of 99990 because we put the part 1 maker in\r\n bookContent = [bookContent[i:i+99990] for i in range(0, len(bookContent), 99990)]\r\n except Exception as e:\r\n print(\"Error while downloading file \" + s3FileName + \"from the S3 bucket \" + bucketName)\r\n raise\r\n # Add part marker to book\r\n if len(bookContent) > 1:\r\n count = 0\r\n for chunk in bookContent:\r\n chunk += \"Part \" + str(count)\r\n hasShortPart = False\r\n audioURLs = []\r\n pollyClient = boto3.client('polly')\r\n for chunk in bookContent:\r\n try:\r\n chunk = convert_text_to_ssml(chunk)\r\n print(\"Asking Polly to record the current chunk\")\r\n response = pollyClient.start_speech_synthesis_task(\r\n Engine='standard',\r\n LanguageCode='en-GB',\r\n OutputFormat='mp3',\r\n OutputS3BucketName=environ['AUDIO_S3_BUCKET'],\r\n Text=chunk,\r\n TextType='ssml',\r\n VoiceId='Brian',\r\n SnsTopicArn=environ[\"SNS_TOPIC\"],\r\n )\r\n\r\n audioURLs.append(response[\"SynthesisTask\"][\"OutputUri\"].split(\"amazonaws.com/\")[-1])\r\n if len(chunk) <= 2000:\r\n hasShortPart = True\r\n print(response)\r\n print(\"Polly was successfully asked to to record the current chunk\")\r\n except Exception as e:\r\n print(\"Error parsing chunk or requesting Polly to say it\")\r\n raise\r\n try:\r\n randomString = ''.join([random.choice(string.ascii_letters \r\n + string.digits) for n in range(32)]) \r\n audiobook = {\r\n \"id\": randomString,\r\n \"bookName\": metadata[\"bookName\"],\r\n \"imageURL\": metadata[\"imageURL\"],\r\n \"authorName\":metadata[\"authorName\"],\r\n \"genres\": metadata[\"genres\"],\r\n \"audioURLs\": audioURLs,\r\n \"description\": metadata[\"description\"],\r\n \"hidden\": False,\r\n \"hasShortPart\": hasShortPart,\r\n \"addedAt\": Decimal(datetime.now().timestamp())\r\n }\r\n response = table.put_item(\r\n Item=audiobook\r\n )\r\n except Exception as e:\r\n print(\"Exception inserting into database\")\r\n print(audiobook)\r\n print(response)\r\n raise\r\n return {\r\n \"statusCode\": 200,\r\n \"body\": json.dumps({\r\n \"message\": audioURLs\r\n }),\r\n }", "def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n lambdautils.utils.send_to_kinesis_stream(search_events, \"dummy_stream\")\n boto3_client(\"kinesis\").put_records.call_count == 1", "def run(self, event, context):\n logger.debug('Number of Records: %d', len(event.get('Records', [])))\n\n config = load_config()\n env = load_env(context)\n\n for record in event.get('Records', []):\n payload = StreamPayload(raw_record=record)\n classifier = StreamClassifier(config=config)\n classifier.map_source(payload)\n\n # If the kinesis stream or s3 bucket is not in our config,\n # go onto the next record\n if not payload.valid_source:\n continue\n\n if payload.service == 's3':\n self.s3_process(payload, classifier)\n elif payload.service == 'kinesis':\n self.kinesis_process(payload, classifier)\n elif payload.service == 'sns':\n self.sns_process(payload, classifier)\n else:\n logger.info('Unsupported service: %s', payload.service)\n\n # returns the list of generated alerts\n if self.return_alerts:\n return self.alerts\n # send alerts to SNS\n self.send_alerts(env, payload)", "def runs_on_aws_lambda():\n return 'AWS_SAM_LOCAL' not in os.environ and 'LAMBDA_TASK_ROOT' in os.environ", "def lambda_handler(event, context):\r\n if 'session' in event:\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n if ('session' in event and (event['session']['application']['applicationId'] !=\r\n \"amzn1.ask.skill.57119d91-fb3c-487f-be53-4e7fac12fb83\")):\r\n raise ValueError(\"Invalid Application ID\")\r\n\r\n \"\"\"if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\"\"\"\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])\r\n elif event['request']['type'] == 'UPDATE':\r\n return saveCoffeeMachineStatus(event['request'])\r\n elif event['request']['type'] == \"GLASS\":\r\n return glassStatus(event['request'])\r\n elif event['request']['type'] == \"WATER\":\r\n return waterStatus(event['request'])\r\n elif event['request']['type'] == \"COFFEE\":\r\n return coffeeStatus(event['request'])\r\n elif event['request']['type'] == \"ON_OFF\":\r\n return on_off_status(event['request'])\r\n elif event['request']['type'] == \"ONLINE\":\r\n return online_status_f(event['request'])\r\n elif event['request']['type'] == 'BUSY':\r\n return busyStatus(event['request'])", "def lambda_handler(event, context):\n # Environmental Variables\n CATALOG_BRANCHES_TABLE = anejocommon.set_env_var('CATALOG_BRANCHES_TABLE')\n PRODUCT_INFO_TABLE = anejocommon.set_env_var('PRODUCT_INFO_TABLE')\n S3_BUCKET = anejocommon.set_env_var('S3_BUCKET')\n\n # Loop through event records\n try:\n event_records = event['Records']\n except KeyError:\n event_records = [{'body': event}]\n\n for record in event_records:\n try:\n catalog_sync_info = json.loads(record['body'])\n except TypeError:\n catalog_sync_info = record['body']\n\n # Event Variables\n catalog_url = catalog_sync_info['catalog_url']\n\n apple_bucket_catalog_path = anejocommon.get_path_from_url(\n catalog_url,\n 'html',\n append_to_path='.apple'\n )\n \n catalog = anejocommon.retrieve_url(catalog_url)\n try:\n catalog_plist = plistlib.readPlistFromBytes(catalog.data)\n except plistlib.InvalidFileException:\n print(\"ERROR: Cannot read catalog plist\")\n return\n\n # Write our local (filtered) catalogs\n anejocommon.write_local_catalogs(\n apple_bucket_catalog_path,\n catalog_plist,\n S3_BUCKET,\n CATALOG_BRANCHES_TABLE,\n PRODUCT_INFO_TABLE\n )", "def test_kinesis_consumer(sdc_builder, sdc_executor, aws):\n # build consumer pipeline\n application_name = get_random_string(string.ascii_letters, 10)\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n kinesis_consumer = builder.add_stage('Kinesis Consumer')\n kinesis_consumer.set_attributes(application_name=application_name, data_format='TEXT',\n initial_position='TRIM_HORIZON',\n stream_name=stream_name)\n\n trash = builder.add_stage('Trash')\n\n kinesis_consumer >> trash\n\n consumer_origin_pipeline = builder.build(title='Kinesis Consumer pipeline').configure_for_environment(aws)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n # run pipeline and capture snapshot\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n\n expected_messages = set('Message {0}'.format(i) for i in range(10))\n # not using PartitionKey logic and hence assign some temp key\n put_records = [{'Data': exp_msg, 'PartitionKey': '111'} for exp_msg in expected_messages]\n client.put_records(Records=put_records, StreamName=stream_name)\n\n # messages are published, read through the pipeline and assert\n snapshot = sdc_executor.capture_snapshot(consumer_origin_pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n output_records = [record.field['text'].value\n for record in snapshot[kinesis_consumer.instance_name].output]\n\n assert set(output_records) == expected_messages\n finally:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name) # Stream operations are done. Delete the stream.\n logger.info('Deleting %s DynamoDB table on AWS ...', application_name)\n aws.dynamodb.delete_table(TableName=application_name)", "def lambda_handler(event, context):\n \n filename = None\n fobj = None\n\n try:\n \n filename = 'dlq' + '-' + datetime.datetime.now().strftime(\"%s\")\n fobj = open('/tmp/'+filename, 'w')\n logger.debug('S3 client set up.')\n\n for record in event['Records']:\n fobj.write(json.dumps(record['body']))\n fobj.write(\"\\n\")\n \n except Exception as ex:\n logger.error('Exception in executing ingestion to S3: {}'.format(ex))\n send_sns_alert(str(ex))\n raise\n\n else:\n \n #Saves file to S3\n fobj.close()\n load_data_s3(filename)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Success!')\n }\n\n finally:\n\n # S3 - close temp object\n fobj.close()", "def checkpoint():", "def raid_table_dynamodb_stream_event(event, context):\n try:\n # Log AWS Lambda event\n logger.info('Event: {}'.format(json.dumps(event, indent=4)))\n for record in event['Records']:\n # Convert low-level DynamoDB format to Python dictionary\n deserializer = TypeDeserializer()\n table_keys = {k: deserializer.deserialize(v) for k, v in record['dynamodb']['Keys'].items()}\n table_attributes = {k: deserializer.deserialize(v) for k, v in record['dynamodb']['NewImage'].items()}\n\n if record['eventSourceARN'] == os.environ['DEMO_RAID_STREAM_ARN']:\n ands_url_path = \"{}modifyValueByIndex?handle={}&value={}&index={}\".format(\n os.environ[\"DEMO_ANDS_SERVICE\"],\n table_keys['handle'],\n table_attributes['contentPath'],\n table_attributes['contentIndex']\n )\n\n ands_secret = os.environ[\"ANDS_DEMO_SECRET\"]\n\n elif record['eventSourceARN'] == os.environ['RAID_STREAM_ARN']:\n ands_url_path = \"{}modifyValueByIndex?handle={}&value={}&index={}\".format(\n os.environ[\"ANDS_SERVICE\"],\n table_keys['handle'],\n table_attributes['contentPath'],\n table_attributes['contentIndex']\n )\n\n ands_secret = os.environ[\"ANDS_SECRET\"]\n\n else:\n logger.info('Unknown DynamoDB Stream')\n continue\n\n # Process new records\n if record['eventName'] == 'INSERT':\n # Skip if default Raid\n if table_attributes['contentPath'] == settings.RAID_SITE_URL:\n logger.info('Not updating content path \"{}\" on new RAiD as it is the default: {}'.format(\n table_attributes['contentPath'], table_keys['handle'])\n )\n continue\n\n logger.info('Updating content path \"{}\" on new RAiD: {}'.format(\n table_attributes['contentPath'], table_keys['handle'])\n )\n\n ands_mint = ands_helpers.ands_handle_request(\n ands_url_path,\n os.environ[\"ANDS_APP_ID\"],\n \"raid\",\n \"raid.org.au\",\n ands_secret,\n )\n\n logger.info(json.dumps(ands_mint))\n\n elif record['eventName'] == 'MODIFY':\n old_table_attributes = {\n k: deserializer.deserialize(v) for k, v in record['dynamodb']['OldImage'].items()\n }\n\n # Update handle content Path if it is different\n if old_table_attributes['contentPath'] != table_attributes['contentPath']:\n logger.info('Updating content path \"{}\" on existing RAiD: {}'.format(\n table_attributes['contentPath'], table_keys['handle'])\n )\n\n ands_mint = ands_helpers.ands_handle_request(\n ands_url_path,\n os.environ[\"ANDS_APP_ID\"],\n \"raid\",\n \"raid.org.au\",\n ands_secret,\n )\n\n logger.info(json.dumps(ands_mint))\n\n else:\n logger.info('Existing RAiD has no changes to content path.')\n\n except Exception as e:\n logger.error('Unknown error occurred.')\n logger.error(str(e))\n\n logger.info('DynamoDB Stream Processed...')", "def lambda_handler(event, context):\n\n current_module = \"Aggregation_Combiner\"\n error_message = \"\"\n bpm_queue_url = None\n current_step_num = 5\n\n # Define run_id outside of try block\n run_id = 0\n try:\n # Retrieve run_id before input validation\n # Because it is used in exception handling\n run_id = event[\"RuntimeVariables\"][\"run_id\"]\n\n environment_variables = EnvironmentSchema().load(os.environ)\n\n runtime_variables = RuntimeSchema().load(event[\"RuntimeVariables\"])\n\n # Environment Variables\n bucket_name = environment_variables[\"bucket_name\"]\n run_environment = environment_variables[\"run_environment\"]\n\n # Runtime Variables\n additional_aggregated_column = runtime_variables[\"additional_aggregated_column\"]\n aggregated_column = runtime_variables[\"aggregated_column\"]\n aggregation_files = runtime_variables[\"aggregation_files\"]\n bpm_queue_url = runtime_variables[\"bpm_queue_url\"]\n environment = runtime_variables[\"environment\"]\n in_file_name = runtime_variables[\"in_file_name\"]\n out_file_name = runtime_variables[\"out_file_name\"]\n sns_topic_arn = runtime_variables[\"sns_topic_arn\"]\n survey = runtime_variables[\"survey\"]\n total_steps = runtime_variables[\"total_steps\"]\n\n except Exception as e:\n error_message = general_functions.handle_exception(e, current_module, run_id,\n context=context)\n raise exception_classes.LambdaFailure(error_message)\n try:\n logger = general_functions.get_logger(survey, current_module, environment,\n run_id)\n except Exception as e:\n error_message = general_functions.handle_exception(e, current_module,\n run_id, context=context)\n raise exception_classes.LambdaFailure(error_message)\n\n try:\n logger.info(\"Started - Retrieved configuration variables.\")\n # Get file from s3\n imp_df = aws_functions.read_dataframe_from_s3(bucket_name, in_file_name)\n\n logger.info(\"Retrieved data from s3\")\n\n # Receive the 3 aggregation outputs.\n ent_ref_agg = aggregation_files[\"ent_ref_agg\"]\n cell_agg = aggregation_files[\"cell_agg\"]\n top2_agg = aggregation_files[\"top2_agg\"]\n\n # Load file content.\n ent_ref_agg_df = aws_functions.read_dataframe_from_s3(bucket_name, ent_ref_agg)\n cell_agg_df = aws_functions.read_dataframe_from_s3(bucket_name, cell_agg)\n top2_agg_df = aws_functions.read_dataframe_from_s3(bucket_name, top2_agg)\n logger.info(\"Successfully retrievied aggragation data from s3\")\n\n to_aggregate = [aggregated_column]\n if additional_aggregated_column != \"\":\n to_aggregate.append(additional_aggregated_column)\n\n # merge the imputation output from s3 with the 3 aggregation outputs\n first_merge = pd.merge(\n imp_df, ent_ref_agg_df, on=to_aggregate, how=\"left\")\n\n second_merge = pd.merge(\n first_merge, cell_agg_df, on=to_aggregate, how=\"left\")\n\n third_merge = pd.merge(\n second_merge, top2_agg_df, on=to_aggregate, how=\"left\")\n\n logger.info(\"Successfully merged dataframes\")\n\n # convert output to json ready to return\n final_output = third_merge.to_json(orient=\"records\")\n\n # send output onwards\n aws_functions.save_to_s3(bucket_name, out_file_name, final_output)\n logger.info(\"Successfully sent data to s3.\")\n\n if run_environment != \"development\":\n logger.info(aws_functions.delete_data(bucket_name, ent_ref_agg))\n logger.info(aws_functions.delete_data(bucket_name, cell_agg))\n logger.info(aws_functions.delete_data(bucket_name, top2_agg))\n logger.info(\"Successfully deleted input data.\")\n\n aws_functions.send_sns_message(sns_topic_arn, \"Aggregation - Combiner.\")\n logger.info(\"Successfully sent data to sns.\")\n\n except Exception as e:\n error_message = general_functions.handle_exception(e,\n current_module,\n run_id,\n context=context,\n bpm_queue_url=bpm_queue_url)\n\n finally:\n if (len(error_message)) > 0:\n logger.error(error_message)\n raise exception_classes.LambdaFailure(error_message)\n\n logger.info(\"Successfully completed module.\")\n\n # Send end of module status to BPM.\n status = \"DONE\"\n aws_functions.send_bpm_status(bpm_queue_url, current_module, status, run_id,\n current_step_num, total_steps)\n\n return {\"success\": True}", "def test_3():\n \n\n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n t = Stream('t')\n filter_then_square(in_streams[0], t,\n filter_threshold=20)\n print_stream(t, name='p1')\n\n def sums(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=' p2')\n\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n }\n },\n 'process_1':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': g,\n 'sources': {}\n },\n 'process_2':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': sums,\n 'sources': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('process_1', 'in'), ('process_2', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'process_1':\n {\n },\n 'process_2':\n {\n }\n }\n\n multicore(processes, connections)", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n event['session']['attributes'] = {\"convoState\" : 1}\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def consumer(state: SharedState):", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'], state)\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'], state)\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n LOGGER.debug(\n \"Processing event: %s\",\n json.dumps(event, indent=2)\n if LOGGER.isEnabledFor(logging.DEBUG)\n else \"--data-hidden--\",\n )\n sfn_client = boto3.client(\"stepfunctions\")\n s3_resource = boto3.resource(\"s3\")\n\n all_accounts = get_all_accounts()\n account_file = get_file_from_s3(get_details_from_event(event), s3_resource)\n\n processed_account_list = process_account_list(\n all_accounts=all_accounts,\n accounts_in_file=account_file.get(\"content\", {}).get(\"accounts\", []),\n )\n\n if processed_account_list:\n start_executions(\n sfn_client,\n processed_account_list,\n codepipeline_execution_id=account_file.get(\"execution_id\"),\n request_id=context.aws_request_id,\n )\n return event", "def test_kinesis_producer(sdc_builder, sdc_executor, aws):\n # build producer pipeline\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n raw_str = 'Hello World!'\n\n # Create Kinesis stream and capture the ShardId\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n desc_response = client.describe_stream(StreamName=stream_name)\n shard_id = desc_response['StreamDescription']['Shards'][0]['ShardId']\n\n producer_dest_pipeline = get_kinesis_producer_pipeline(sdc_builder, aws, stream_name, raw_str)\n\n # add pipeline and capture pipeline messages to assert\n sdc_executor.add_pipeline(producer_dest_pipeline)\n sdc_executor.start_pipeline(producer_dest_pipeline).wait_for_pipeline_batch_count(10)\n sdc_executor.stop_pipeline(producer_dest_pipeline)\n\n history = sdc_executor.get_pipeline_history(producer_dest_pipeline)\n msgs_sent_count = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n logger.debug('Number of messages ingested into the pipeline = %s', msgs_sent_count)\n\n # read data from Kinesis to assert it is what got ingested into the pipeline\n shard_iterator = client.get_shard_iterator(StreamName=stream_name,\n ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')\n response = client.get_records(ShardIterator=shard_iterator['ShardIterator'])\n msgs_received = [response['Records'][i]['Data'].decode().strip()\n for i in range(msgs_sent_count)]\n\n logger.debug('Number of messages received from Kinesis = %d', (len(msgs_received)))\n\n assert msgs_received == [raw_str] * msgs_sent_count\n finally:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def inner(fn_inner):\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler", "def test_kinesis_producer_other_region(sdc_builder, sdc_executor, aws):\n endpoint = SERVICE_ENDPOINT_FORMAT.format('kinesis', aws.region)\n\n # build producer pipeline\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n raw_str = 'Hello World!'\n\n # Create Kinesis stream and capture the ShardId\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n desc_response = client.describe_stream(StreamName=stream_name)\n shard_id = desc_response['StreamDescription']['Shards'][0]['ShardId']\n\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',\n raw_data=raw_str)\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(data_format='TEXT', stream_name=stream_name)\n\n dev_raw_data_source >> kinesis_producer\n producer_dest_pipeline = builder.build().configure_for_environment(aws)\n kinesis_producer.set_attributes(region='OTHER', endpoint=endpoint)\n\n # add pipeline and capture pipeline messages to assert\n sdc_executor.add_pipeline(producer_dest_pipeline)\n sdc_executor.start_pipeline(producer_dest_pipeline).wait_for_pipeline_batch_count(10)\n sdc_executor.stop_pipeline(producer_dest_pipeline)\n\n history = sdc_executor.get_pipeline_history(producer_dest_pipeline)\n msgs_sent_count = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n logger.debug('Number of messages ingested into the pipeline = %s', msgs_sent_count)\n\n # read data from Kinesis to assert it is what got ingested into the pipeline\n shard_iterator = client.get_shard_iterator(StreamName=stream_name,\n ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')\n response = client.get_records(ShardIterator=shard_iterator['ShardIterator'])\n msgs_received = [response['Records'][i]['Data'].decode().strip()\n for i in range(msgs_sent_count)]\n\n logger.debug('Number of messages received from Kinesis = %d', (len(msgs_received)))\n\n assert msgs_received == [raw_str] * msgs_sent_count\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, producer_dest_pipeline)\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def handleProducersNumberOfEvents(self):\n\n procScript = \"cmssw_handle_nEvents.py\"\n cmd = \"%s --input_pkl %s --output_pkl %s\" % (\n procScript,\n os.path.join(self.stepSpace.location, self.configPickle),\n os.path.join(self.stepSpace.location, self.configPickle))\n self.scramRun(cmd)\n\n return", "def handler(kinesis_records, context):\n data = kinesis_records[0].parse()\n detail = data.get('detail')\n return publish({\n \"eventSource\": data['source'],\n \"awsRegion\": data['region'],\n \"eventTime\": data['time'],\n \"eventName\": detail['eventName'],\n \"userIdentity\": {\n \"principalId\": detail['userIdentity']['principalId']\n },\n \"requestParameters\": {\n \"sourceIPAddress\": detail['sourceIPAddress']\n },\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"bucket\": {\n \"name\": detail['requestParameters']['bucketName'],\n \"arn\": detail['resources'][1]['ARN']\n },\n \"object\": {\n \"key\": detail['requestParameters']['key'],\n \"size\": detail['additionalEventData']['bytesTransferredIn']\n }\n }\n })", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def checkpoint(func, inputs, params, flag):\n return func(*inputs)", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n else:\n print (\"********************** Unknown Request\")", "def main():\n # start Spark application and get Spark session, logger and config\n os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-streaming-kafka-0-10_2.11:2.1.0,org.apache.spark:spark-sql-kafka-0-10_2.11:2.1.0 pyspark-shell'\n spark = SparkSession \\\n .builder \\\n .master(\"local\") \\\n .appName(\"Demo\") \\\n .getOrCreate()\n #upper_udf = udf(lambda z: upper(z), StringType())\n spark.udf.register(\"upper_udf\", upper)\n ds1 = spark\\\n .readStream\\\n .format(\"kafka\")\\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"checkpointLocation\", \"/home/rajesh/Desktop/checkpoint\") \\\n .option(\"subscribe\", \"test\")\\\n .load()\n ds2=ds1\\\n .selectExpr(\"upper_udf(CAST(value AS STRING)) as values\")\\\n .writeStream \\\n .option(\"path\", \"/home/rajesh/Desktop/Data\")\\\n .option(\"checkpointLocation\", \"/home/rajesh/Desktop/checkpoint\")\\\n .start()\n # .selectExpr(\"upper_udf(value) as values\")\\\n ds2.awaitTermination()", "def event(event, context):\n# Sample event:\n #\n # _event = { \"Records\":[\n # {\n # \"eventVersion\":\"2.1\",\n # \"eventSource\":\"aws:s3\",\n # \"awsRegion\":\"us-east-1\",\n # \"eventTime\":\"2021-10-14T07:40:55.113Z\",\n # \"eventName\":\"ObjectCreated:Put\",\n # \"userIdentity\":{\n # \"principalId\":\"AWS:AROA6L2YJX2JCJYHEJ4UI:serverless-image-processing-test-create\"\n # },\n # \"requestParameters\":{\n # \"sourceIPAddress\":\"94.140.8.209\"\n # },\n # \"responseElements\":{\n # \"x-amz-request-id\":\"7CJHSGZ9MZF9995F\",\n # \"x-amz-id-2\":\"X5OtpRb+P9CuYKDHvjT8z9prnqqsH1yatZchN2uw8/158mcRUVhQNSW/z5ffXLqkLhu+4Kc163vZiRgVk3XaGd8H1NhZCu8N\"\n # },\n # \"s3\":{\n # \"s3SchemaVersion\":\"1.0\",\n # \"configurationId\":\"9b8f4135-35d4-4e07-b8a5-7d68cc95870b\",\n # \"bucket\":{\n # \"name\":\"serverless-image-processing-test-serverless-image-processing\",\n # \"ownerIdentity\":{\n # \"principalId\":\"A5IHQSLNTJKZN\"\n # },\n # \"arn\":\"arn:aws:s3:::serverless-image-processing-test-serverless-image-processing\"\n # },\n # \"object\":{\n # \"key\":\"test/6e7ef3f0-dcb6-4db6-9518-3bc6ec0ba492\",\n # \"size\":116716,\n # \"eTag\":\"f04e70e100f653a0e67f32f6098dea1c\",\n # \"sequencer\":\"006167DF06C888A626\"\n # }\n # }\n # }\n # ]\n # }\n\n logger.debug('event: {}'.format(event))\n for record in event['Records']:\n processRecord(record)\n\n return {'statusCode': httplib.ACCEPTED}", "def lambda_handler(event, _context):\n print('=====lambda handler started...')\n print(json.dumps(event))\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n # this will trigger if a one shot is used\n if event['request']['type'] == \"IntentRequest\":\n return on_launch(event['request'], event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n if event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def publish(event: dict):\n return kinesis.put_record(\n StreamName=DATA_STREAM,\n Data=json.dumps(event).encode('utf-8'),\n PartitionKey=randomize_arn(INVENTORY_ARN)\n )", "def shiftr_event_listener(event):\n state = event.data.get(\"new_state\")\n topic = state.entity_id.replace(\".\", \"/\")\n\n try:\n _state = state_helper.state_as_number(state)\n except ValueError:\n _state = state.state\n\n try:\n mqttc.publish(topic, _state, qos=0, retain=False)\n\n if state.attributes:\n for attribute, data in state.attributes.items():\n mqttc.publish(\n f\"/{topic}/{attribute}\", str(data), qos=0, retain=False\n )\n except RuntimeError:\n pass", "def test_ale_workflow_function_smoke(tmp_path_factory):\n tmpdir = tmp_path_factory.mktemp(\"test_ale_workflow_function_smoke\")\n sleuth_file = op.join(get_test_data_path(), \"test_sleuth_file.txt\")\n prefix = \"test\"\n\n # The same test is run with both workflow function and CLI\n workflows.ale_sleuth_workflow(\n sleuth_file, output_dir=tmpdir, prefix=prefix, n_iters=10, n_cores=1\n )\n assert op.isfile(op.join(tmpdir, f\"{prefix}_input_coordinates.txt\"))", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n #print (\"**** Reached\")\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n #print(\"**** Intent coming is : \" + event['request']['type'])\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def run(self):\n lsh, minhashes = self._new_lsh_index()\n total_num_events = len(minhashes)\n for key, minhash in minhashes.items():\n event_id, event_type, index_name = key\n score = self._calculate_score(lsh, minhash, total_num_events)\n self._update_event(event_id, event_type, index_name, score)\n\n return dict(\n index=self._config.index,\n data_type=self._config.data_type,\n num_events_processed=total_num_events\n )", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def ingest(self):\n datetime_retrieved = datetime.now()\n prefix = self.prefix_template.format(**self.feed, year=datetime_retrieved.strftime('%Y'), month=datetime_retrieved.strftime('%m'))\n fp = self.generate_fp(\n template='{feedname}_{datetime_retrieved}',\n feedname=self.feed['feedname'],\n datetime_retrieved=datetime_retrieved\n )\n\n url_to_request = self.url_dict[(self.feed['state'],self.feed['feedname'])]\n try:\n r = requests.get(url_to_request)\n if r.status_code == 200:\n data_to_write = r.content\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('Raw data ingested from {} to {} at {} UTC'.format(url_to_request, prefix+fp, datetime_retrieved))\n else:\n self.print_func('Received status code {} from {} feed.'.format(r.status_code,self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))\n return\n except BaseException as e:\n data_to_write = f'The feed at {datetime_retrieved.isoformat()}.'.encode('utf-8')\n fp += '__FEED_NOT_RETRIEVED'\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('We could not ingest data from {} at {} UTC'.format(url_to_request, datetime_retrieved))\n raise e\n\n # trigger semi-parse ingest\n if self.feed['pipedtosandbox'] == True:\n self.print_func('Trigger {} for {}'.format(self.lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n\n # trigger ingest to socrata\n if self.feed['pipedtosocrata'] == True:\n self.print_func('Trigger {} for {}'.format(self.socrata_lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.socrata_lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))", "def test_bulk_round_trip_with_low_ingestrate(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=10000,\n copy_from_options={'INGESTRATE': 1500})", "def lambda_handler(event, context):\n #print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n #if event['session']['new']:\n # on_session_started({'requestId': event['request']['requestId']},event['session'])\n \n intent = None\n try:\n intent = Intent(**event)\n return handle_intent(intent)\n except Exception as ex:\n err = traceback.format_exc()\n print(err)\n return error_handler_func(intent,msg=str(err))", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n \n if event['session']['new']: # if its a new session, go to on_session_started() funtion\n on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def partition_fn(example, num_partitions):\n distribution = [80, 10, 10]\n\n bucket = hash(str(example['id'])) % np.sum(distribution)\n\n if bucket < distribution[0]:\n partition_train.inc()\n return 0\n elif bucket < distribution[0] + distribution[1]:\n partition_validation.inc()\n return 1\n else:\n partition_test.inc()\n return 2", "def lambda_handler(event, context):\n try:\n if not _is_setup:\n _setup()\n\n previous_attempts = event.get(\"Artifact\", dict(ReadAttempts=0))[\"ReadAttempts\"]\n\n manifest = _load_manifest(event[\"ResourceKey\"])\n artifact_location = dict(S3Bucket=_bucket_name, S3Key=manifest[\"ArtifactS3Key\"])\n artifact_exists = _artifact_exists(manifest[\"ArtifactS3Key\"])\n\n return dict(\n Found=artifact_exists,\n ReadAttempts=previous_attempts + 1,\n Location=artifact_location,\n ProjectName=manifest[\"ProjectName\"],\n Runtimes=manifest[\"Runtimes\"],\n )\n except Exception:\n # TODO: Turn these into known-cause state machine failures.\n raise", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def handle_event(event, context):\n\n try:\n print(\"Received event: \" + json.dumps(event, indent=2))\n\n # grab resources section of event, get task execution ids\n task_execution_arns = event['resources']\n\n # now fetch the input filter info from each task_detail, fire off jobs\n new_files_to_process = []\n for task_execution_arn in task_execution_arns:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/datasync.html#DataSync.Client.describe_task_execution\n response = datasync_client.describe_task_execution(TaskExecutionArn=task_execution_arn)\n print(\"Task execution details: \" + str(response))\n # this will be the location of the data in configured s3 bucket:\n # 'Includes': [\n # {\n # 'FilterType': 'SIMPLE_PATTERN',\n # 'Value': 'string'\n # },\n # ]\n if len(response['Includes']) > 0:\n file = response['Includes'][0]['Value']\n # files typically start with leading '/', strip that leading '/'\n print(\"Got filename:\" + file)\n if file.startswith('/', 0):\n new_files_to_process.append(file.lstrip('/'))\n else:\n new_files_to_process.append(file)\n else:\n print(\"Response didn't contain Includes files...\")\n\n if len(new_files_to_process) == 0:\n print('No files were parsed from input...exiting')\n return\n\n for new_file_to_process in new_files_to_process:\n state_machine_arn = os.environ['STATE_MACHINE_ARN']\n payload = {\"ObjectName\": new_file_to_process}\n json_payload = json.dumps(payload)\n print('Starting bcl2fastq with payload %s' % json_payload)\n #\n response = step_client.start_execution(stateMachineArn=state_machine_arn, input=json_payload)\n print(response)\n\n except Exception as e:\n print(e)\n print('Error handling event. %s' % e)\n raise e", "def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")", "def test_kinesis_producer(sdc_builder, sdc_executor, aws):\n # build producer pipeline\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n raw_str = 'Hello World!'\n\n # Create Kinesis stream and capture the ShardId\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n desc_response = client.describe_stream(StreamName=stream_name)\n shard_id = desc_response['StreamDescription']['Shards'][0]['ShardId']\n\n producer_dest_pipeline = get_kinesis_producer_pipeline(sdc_builder, aws, stream_name, raw_str)\n\n # add pipeline and capture pipeline messages to assert\n sdc_executor.add_pipeline(producer_dest_pipeline)\n sdc_executor.start_pipeline(producer_dest_pipeline).wait_for_pipeline_batch_count(10)\n sdc_executor.stop_pipeline(producer_dest_pipeline)\n\n history = sdc_executor.get_pipeline_history(producer_dest_pipeline)\n msgs_sent_count = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n logger.debug('Number of messages ingested into the pipeline = %s', msgs_sent_count)\n\n # read data from Kinesis to assert it is what got ingested into the pipeline\n shard_iterator = client.get_shard_iterator(StreamName=stream_name,\n ShardId=shard_id, ShardIteratorType='TRIM_HORIZON')\n response = client.get_records(ShardIterator=shard_iterator['ShardIterator'])\n msgs_received = [response['Records'][i]['Data'].decode().strip()\n for i in range(msgs_sent_count)]\n\n logger.debug('Number of messages received from Kinesis = %d', (len(msgs_received)))\n\n assert msgs_received == [raw_str] * msgs_sent_count\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, producer_dest_pipeline)\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def _process_stream_configuration(self, name, resource_id, resource_config):\n\n function_name = resource_config['Properties']['FunctionName']\n # From CloudFormation\n if self.cloudformation_template:\n function_id_pattern = re.compile(AwsApi.FUNCTION_ID_PATTERN.format(function_name, resource_id), re.IGNORECASE)\n for other_resource_id, other_resource_config in self.cloudformation_template.get('Resources', {}).items():\n if other_resource_config.get('Type') == 'AWS::Lambda::EventSourceMapping':\n # either ARN, function name, or broken intrinsic function\n target = other_resource_config.get('Properties', {}).get('FunctionName')\n if target and function_id_pattern.search(target):\n arn = other_resource_config['Properties'].get('EventSourceArn')\n if not arn:\n eprint(\"warn: event source mapping for `{}` missing `EventSourceArn`\", name)\n continue\n\n service_match = AwsApi.STREAM_ARN_SERVICE_PATTERN.match(arn)\n if not service_match:\n continue\n service = service_match.group(1)\n\n self._function_permissions. \\\n setdefault(name, {}). \\\n setdefault(arn, set()). \\\n update(\"{}:{}\".format(service, action) for action in AwsApi.STREAM_ACTIONS)\n\n # From production environment\n event_source_mappings = self.get_cached_api_result('lambda', region=self.default_region, account=self.default_account, api_method='list_event_source_mappings', api_kwargs={'FunctionName': function_name})\n for event_source_mapping in event_source_mappings['EventSourceMappings']:\n service_match = AwsApi.STREAM_ARN_SERVICE_PATTERN.match(event_source_mapping['EventSourceArn'])\n if not service_match:\n continue\n service = service_match.group(1)\n\n self._function_permissions. \\\n setdefault(name, {}). \\\n setdefault(event_source_mapping['EventSourceArn'], set()). \\\n update(\"{}:{}\".format(service, action) for action in AwsApi.STREAM_ACTIONS)", "def step_lambda_wrapper(func):\n\n @functools.wraps(func)\n def _lambda_wrapper(*args, **kwargs):\n \"\"\"\n Generic Step Function wrapper\n \"\"\"\n cold_start_duration = time.time() - constants.COLD_START_TIME\n trace = epsagon.trace.trace_factory.get_or_create_trace()\n trace.prepare()\n\n try:\n event, context = args\n except ValueError:\n # This can happen when someone manually calls handler without\n # parameters / sends kwargs. In such case we ignore this trace.\n return func(*args, **kwargs)\n\n try:\n runner = epsagon.runners.aws_lambda.StepLambdaRunner(\n time.time(),\n context\n )\n trace.set_runner(runner)\n # pylint: disable=W0703\n except Exception as exception:\n # Regress to python runner.\n warnings.warn(\n 'Lambda context is invalid, using simple python wrapper',\n EpsagonWarning\n )\n trace.add_exception(\n exception,\n traceback.format_exc()\n )\n return epsagon.wrappers.python_function.wrap_python_function(\n func,\n args,\n kwargs\n )\n\n if constants.COLD_START:\n runner.resource['metadata'][\n 'aws.lambda.cold_start_duration'\n ] = cold_start_duration\n constants.COLD_START = False\n\n try:\n trace.add_event(\n epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(\n time.time(),\n event,\n context\n )\n )\n # pylint: disable=W0703\n except Exception as exception:\n trace.add_exception(\n exception,\n traceback.format_exc(),\n additional_data={'event': event}\n )\n\n trace.set_timeout_handler(context)\n\n result = None\n try:\n result = func(*args, **kwargs)\n steps_data = epsagon.utils.find_in_object(\n event,\n STEP_DICT_NAME\n )\n\n if isinstance(result, dict):\n epsagon.utils.print_debug(\n 'Step function result type is dict, steps_data={}'.format(\n steps_data\n )\n )\n # If the step functions data is not present, then this is the\n # First step.\n if steps_data is None:\n epsagon.utils.print_debug(\n 'Could not find existing steps data'\n )\n steps_dict = {'id': str(uuid4()), 'step_num': 0}\n path = []\n # Otherwise, just advance the steps number by one.\n else:\n # don't change trigger data\n steps_dict, path = steps_data\n steps_dict = copy.deepcopy(steps_dict)\n if 'step_num' in steps_dict:\n steps_dict['step_num'] += 1\n epsagon.utils.print_debug(\n 'Steps data found, new dict={}'.format(steps_dict)\n )\n else:\n steps_dict = {'id': str(uuid4()), 'step_num': 0}\n epsagon.utils.print_debug(\n 'Steps data not found, new dict={}'.format(\n steps_dict\n )\n )\n\n result_path = result\n # Tries to inject the steps data in the configured\n # or same path where it was found\n if isinstance(trace.step_dict_output_path, list):\n path = trace.step_dict_output_path\n try:\n for sub_path in path:\n result_path = result_path.get(sub_path)\n except Exception as exception: # pylint: disable=broad-except\n epsagon.utils.print_debug(\n 'Could not put steps in path={}'.format(path)\n )\n if result_path:\n epsagon.utils.print_debug(\n 'Adding steps dict to result_path={}'.format(\n result_path\n )\n )\n result_path[STEP_DICT_NAME] = steps_dict\n else:\n epsagon.utils.print_debug(\n 'Adding steps dict to root result'\n )\n result[STEP_DICT_NAME] = steps_dict\n\n runner.add_step_data(steps_dict)\n return result\n # pylint: disable=W0703\n except Exception as exception:\n runner.set_exception(\n exception,\n traceback.format_exc(),\n handled=False\n )\n raise\n finally:\n try:\n _add_status_code(runner, result)\n if not trace.metadata_only:\n runner.resource['metadata']['return_value'] = (\n copy.deepcopy(result)\n )\n # pylint: disable=W0703\n except Exception as exception:\n trace.add_exception(\n exception,\n traceback.format_exc(),\n )\n try:\n epsagon.trace.Trace.reset_timeout_handler()\n # pylint: disable=W0703\n except Exception:\n pass\n try:\n epsagon.trace.trace_factory.send_traces()\n # pylint: disable=W0703\n except Exception:\n pass\n\n return _lambda_wrapper", "def test_cbbackupmgr_with_eventing(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This eventing test is only for cb version 5.5 and later. \")\n from pytests.eventing.eventing_constants import HANDLER_CODE\n from lib.testconstants import STANDARD_BUCKET_PORT\n\n self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')\n self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')\n self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')\n self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')\n self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')\n self.create_functions_buckets = self.input.param('create_functions_buckets', True)\n self.docs_per_day = self.input.param(\"doc-per-day\", 1)\n self.use_memory_manager = self.input.param('use_memory_manager', True)\n self.backup_before_eventing = self.input.param('backup_before_eventing', False)\n bucket_params = self._create_bucket_params(server=self.master, size=256,\n replicas=self.num_replicas)\n self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.buckets = RestConnection(self.master).get_buckets()\n self.src_bucket = RestConnection(self.master).get_buckets()\n self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.backup_create()\n if (self.backup_before_eventing):\n self.backup_cluster()\n self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.buckets = RestConnection(self.master).get_buckets()\n self.gens_load = self.generate_docs(self.docs_per_day)\n self.expiry = 3\n\n self.restServer = self.get_nodes_from_services_map(service_type=\"eventing\")\n self.rest = RestConnection(self.restServer)\n\n\n self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,\n batch_size=self.batch_size)\n function_name = \"Function_{0}_{1}\".format(randint(1, 1000000000), self._testMethodName)\n self.function_name = function_name[0:90]\n body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)\n bk_events_created = False\n rs_events_created = False\n try:\n self.deploy_function(body)\n bk_events_created = True\n self.backup_cluster()\n rest_bk = RestConnection(self.backupset.cluster_host)\n bk_fxn = rest_bk.get_all_functions()\n\n backup_index = 0\n\n if self.backup_before_eventing:\n backup_index = 1\n self.backupset.start = 1\n self.backupset.end = 2\n\n if bk_fxn != \"\":\n self._verify_backup_events_definition(json.loads(bk_fxn), body, backup_index = backup_index)\n\n self.backup_restore()\n\n rest_rs = RestConnection(self.backupset.restore_cluster_host)\n\n if self.backup_before_eventing:\n self.assertTrue('metadata' in [bucket.name for bucket in rest_rs.get_buckets()])\n\n self.bkrs_resume_function(body, rest_rs)\n rs_events_created = True\n self._verify_restore_events_definition(bk_fxn)\n except Exception as e:\n self.fail(e)\n finally:\n master_nodes = [self.backupset.cluster_host,\n self.backupset.restore_cluster_host]\n for node in master_nodes:\n rest = RestConnection(node)\n self.bkrs_undeploy_and_delete_function(body, rest, node)\n self.rest = RestConnection(self.master)", "def main():\n\n config = configparser.ConfigParser()\n config.read('aws/dl.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n load_parquet_tables(cur, conn)\n record_count(cur, conn)\n duplicate_record_check(cur)\n\n conn.close()", "def generate_s3_events(cluster_name, cluster_dict, config):\n modules = config['clusters'][cluster_name]['modules']\n s3_bucket_id = modules['s3_events'].get('s3_bucket_id')\n\n if not s3_bucket_id:\n LOGGER_CLI.error(\n 'Config Error: Missing S3 bucket in %s s3_events module',\n cluster_name)\n return False\n\n cluster_dict['module']['s3_events_{}'.format(cluster_name)] = {\n 'source': 'modules/tf_stream_alert_s3_events',\n 'lambda_function_arn': '${{module.stream_alert_{}.lambda_arn}}'.format(cluster_name),\n 'lambda_function_name': '{}_{}_stream_alert_processor'.format(\n config['global']['account']['prefix'],\n cluster_name),\n 's3_bucket_id': s3_bucket_id,\n 's3_bucket_arn': 'arn:aws:s3:::{}'.format(s3_bucket_id),\n 'lambda_role_id': '${{module.stream_alert_{}.lambda_role_id}}'.format(cluster_name),\n 'lambda_role_arn': '${{module.stream_alert_{}.lambda_role_arn}}'.format(cluster_name)}\n\n return True", "def testCheckpointContinuationValidity(self):\n\n # Train once, get checkpoint via callback returns\n res_1 = {}\n bst_1 = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[\n _checkpoint_callback(frequency=1, before_iteration_=False)\n ],\n num_boost_round=2,\n ray_params=RayParams(num_actors=2),\n additional_results=res_1)\n last_checkpoint_1 = res_1[\"callback_returns\"][0][-1]\n last_checkpoint_other_rank_1 = res_1[\"callback_returns\"][1][-1]\n\n # Sanity check\n lc1 = xgb.Booster()\n lc1.load_model(last_checkpoint_1)\n self.assertEqual(last_checkpoint_1, last_checkpoint_other_rank_1)\n self.assertEqual(last_checkpoint_1, lc1.save_raw())\n self.assertEqual(bst_1.get_dump(), lc1.get_dump())\n\n # Start new training run, starting from existing model\n res_2 = {}\n bst_2 = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[\n _checkpoint_callback(frequency=1, before_iteration_=True),\n _checkpoint_callback(frequency=1, before_iteration_=False)\n ],\n num_boost_round=4,\n ray_params=RayParams(num_actors=2),\n additional_results=res_2,\n xgb_model=lc1)\n first_checkpoint_2 = res_2[\"callback_returns\"][0][0]\n first_checkpoint_other_actor_2 = res_2[\"callback_returns\"][1][0]\n last_checkpoint_2 = res_2[\"callback_returns\"][0][-1]\n last_checkpoint_other_actor_2 = res_2[\"callback_returns\"][1][-1]\n\n fcp_bst = xgb.Booster()\n fcp_bst.load_model(first_checkpoint_2)\n\n lcp_bst = xgb.Booster()\n lcp_bst.load_model(last_checkpoint_2)\n\n # Sanity check\n self.assertEqual(first_checkpoint_2, first_checkpoint_other_actor_2)\n self.assertEqual(last_checkpoint_2, last_checkpoint_other_actor_2)\n self.assertEqual(bst_2.get_dump(), lcp_bst.get_dump())\n\n # Training should not have proceeded for the first checkpoint,\n # so trees should be equal\n self.assertEqual(lc1.get_dump(), fcp_bst.get_dump())\n\n # Training should have proceeded for the last checkpoint,\n # so trees should not be equal\n self.assertNotEqual(fcp_bst.get_dump(), lcp_bst.get_dump())", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.xxxx\"):\n #Set Alexa Skill ID\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def handler(event, context):\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def pre_run_hook(self, instance, private_data_dir):\n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n league = brasileirao.get()\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'], league)\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context) -> Dict:\n\n destination: Optional[str] = os.environ.get(\"DESTINATION\")\n assert destination, \"failed to get destination\"\n\n ec2_client = boto3.client(\"ec2\")\n try:\n vpcs_res: Dict = ec2_client.describe_vpcs()\n except ClientError as error:\n raise error\n\n for vpc in vpcs_res.get(\"Vpcs\", []):\n try:\n flow_logs_res: Dict = ec2_client.describe_flow_logs(\n Filters=[{\"Name\": \"resource-id\", \"Values\": [vpc.get(\"VpcId\")]}],\n )\n except ClientError as error:\n raise error\n\n flow_logs = flow_logs_res.get(\"FlowLogs\", [])\n\n if len(flow_logs) == 0:\n try:\n response = ec2_client.create_flow_logs(\n ResourceIds=[vpc.get(\"VpcId\")],\n ResourceType=\"VPC\",\n TrafficType=\"ALL\",\n LogDestinationType=\"s3\",\n LogDestination=destination,\n )\n except ClientError as error:\n raise error\n\n if response.get(\"Unsuccessful\", []) != []:\n raise Exception(f\"failed to create_flow_logs {response}\")\n\n print(f\"Created flow logs for {vpc.get('VpcId')}\")\n\n elif len(flow_logs) >= 1:\n exists = False\n for flow_log in flow_logs:\n if flow_log.get(\"LogDestination\", \"\") == destination:\n exists = True\n\n if not exists:\n try:\n ec2_client.create_flow_logs(\n ResourceIds=[vpc.get(\"VpcId\")],\n ResourceType=\"VPC\",\n TrafficType=\"ALL\",\n LogDestinationType=\"s3\",\n LogDestination=destination,\n )\n except ClientError as error:\n raise error\n print(f\"Created flow logs for {vpc.get('VpcId')}\")\n\n return {\"statusCode\": 200, \"body\": \"success\"}", "def test_generic(key,bucket):\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": bucket\n },\n \"object\": {\n \"key\": key\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n print(out)\n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n print('HANDLING EVENT')\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def test_inputmode_spark(self):\n def _map_fun(args, ctx):\n import tensorflow as tf\n cluster, server = TFNode.start_cluster_server(ctx)\n if ctx.job_name == \"ps\":\n server.join()\n elif ctx.job_name == \"worker\":\n with tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % ctx.task_index,\n cluster=cluster)):\n x = tf.placeholder(tf.int32, [None, 1])\n sq = tf.square(x)\n init_op = tf.global_variables_initializer()\n with tf.train.MonitoredTrainingSession(is_chief=(ctx.task_index == 0)) as sess:\n tf_feed = TFNode.DataFeed(ctx.mgr, False)\n while not sess.should_stop() and not tf_feed.should_stop():\n outputs = sess.run([sq], feed_dict={x: tf_feed.next_batch(10)})\n tf_feed.batch_results(outputs[0])\n\n input = [[x] for x in range(1000)] # set up input as tensors of shape [1] to match placeholder\n rdd = self.sc.parallelize(input, 10)\n cluster = TFCluster.run(self.sc, _map_fun, tf_args={}, num_executors=self.num_workers, num_ps=0, input_mode=TFCluster.InputMode.SPARK)\n rdd_out = cluster.inference(rdd)\n rdd_sum = rdd_out.sum()\n self.assertEqual(rdd_sum, sum([x * x for x in range(1000)]))\n cluster.shutdown()", "def lambda_handler(event, context):\n\n \"\"\"\n This statement prevents someone else from configuring a skill that sends \n requests to this function.\n \"\"\"\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\r\n print(\"Incoming request...\")\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def runEventCreation():\r\n config = CONFIG['steps']['EventCreation']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n\r\n min_window_size = ci['min_window_size']\r\n change_speed_by = ci['change_speed_by']\r\n speed_ratio = ci['train_zero_speed_ratio']\r\n datetime_limit = ci['datetime_limit']\r\n csv_name_prefix = ci['csv_name_prefix']\r\n input_bucket = ci['bucket']\r\n window_event_bucket = ci['window_event_bucket']\r\n window_events_file = ci['window_events_file']\r\n\r\n output_bucket = co['bucket']\r\n event_dir = co['event_dir']\r\n filename_include = co['filename_include']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n csv_files = get_files(input_bucket, boto_client,\r\n file_type='csv', prefix='filtered')\r\n csv_files = ['filtered/7016_2020-09-09.csv']\r\n create_window_event(files=csv_files,\r\n input_bucket=input_bucket,\r\n output_bucket=output_bucket,\r\n minio_client=minioClient,\r\n min_window_size=min_window_size,\r\n ouput_dir=event_dir,\r\n window_event_bucket=window_event_bucket,\r\n window_events_file=window_events_file,\r\n csv_name_prefix=csv_name_prefix,\r\n change_speed_by=change_speed_by,\r\n train_zero_speed_ratio=speed_ratio,\r\n datetime_limit=datetime_limit,\r\n filename_include=filename_include)", "def lambda_handler(event, context):\n client = boto3.client('sns')\n topic_arn = os.environ['topic_arn']\n try:\n quarantine_instance_id = event['quarantine_instance_id']\n message = \"Quarantine Instance : {quarantine_instance_id} has been launched.\".format(quarantine_instance_id=quarantine_instance_id)\n subject = \"Quarantine Instance had been launched.\"\n client.publish(\n TopicArn=topic_arn,\n Message=message,\n Subject=subject\n )\n return message\n except KeyError as ke:\n print(\"Key not found : {ke}\".format(ke=ke))\n raise\n except:\n print(\"Error encountered while sending SNS notification about instance launch.\")\n raise", "def lambda_handler(event, context):\n\n REGION = 'eu-central-1' # region to launch instance.\n INSTANCE_ID = 'i-02ad64e2ab12d8719'\n INSTANCE_TYPE = 't2.medium' # instance type to launch.\n BUCKET_NAME = 'bluecaparticles'\n\n ec2 = boto3.resource('ec2', REGION)\n instance = ec2.Instance(INSTANCE_ID)\n\n crawl_date = datetime.datetime.now()\n print(\"\\n--> Crawling date %s\" % crawl_date.strftime(\"%Y-%m-%d\"))\n\n # try:\n try:\n trigger_file = event['Records'][0]['s3']['object']['key']\n except:\n trigger_file = ''\n\n if trigger_file:\n if 'cincodias' in trigger_file:\n print(\"\\n--> Crawling Elconfidencial\")\n parse_elconfidencial(crawl_date, BUCKET_NAME)\n elif 'elconfidencial' in trigger_file:\n print(\"\\n--> Crawling Eleconomista\")\n parse_eleconomista(crawl_date, BUCKET_NAME)\n elif 'eleconomista' in trigger_file:\n print(\"\\n--> Crawling Expansion\")\n parse_expansion(crawl_date, BUCKET_NAME)\n elif 'expansion' in trigger_file:\n print(\"\\n--> Launching EC2...\")\n response = instance.start()\n else:\n print(\"\\n--> Crawling cincodias\")\n parse_cincodias(crawl_date, BUCKET_NAME)", "async def predict(events):\n global model, model_version\n\n async for event in events:\n live_version = model_table['live_version']\n\n # check stream processors model version against live version in shared table\n if model_version != live_version:\n # load in new model if out of sync\n print(f\"Loading new model {live_version}\")\n # model is locally saved pickled python code\n # but more realistically this would be s3 and a much smarter rehydrating strategy\n model_location = model_table['model_location']\n model = pickle.load(open(model_location, \"rb\"))\n model_version = live_version\n\n result = model(event)\n print(f\"\\nEvent: {event}\\nPrediction: {result}\")", "def test_2():\n \n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n filter_then_square(in_streams[0], out_streams[0],\n filter_threshold=20)\n\n def h(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=s.name)\n \n\n # Specify processes and connections.\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n },\n 'actuators': {}\n },\n 'filter_and_square_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('filtered', 'i')],\n 'compute_func': g,\n 'sources': {},\n 'actuators': {}\n },\n 'aggregate_and_output_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': h,\n 'sources': {},\n 'actuators': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('filter_and_square_process', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'filter_and_square_process' :\n {\n 'filtered' : [('aggregate_and_output_process', 'in')],\n },\n 'aggregate_and_output_process':\n {}\n }\n\n multicore(processes, connections)" ]
[ "0.6036465", "0.5968453", "0.5582644", "0.54831797", "0.5460034", "0.54361176", "0.5364517", "0.53256035", "0.52649206", "0.5256149", "0.5254377", "0.5188891", "0.5165099", "0.5165099", "0.5149605", "0.51353747", "0.51282066", "0.51216805", "0.5100538", "0.50952226", "0.50934696", "0.50637984", "0.5053903", "0.50325537", "0.5010805", "0.49944043", "0.4984336", "0.49797618", "0.49396428", "0.49276087", "0.49101356", "0.48882574", "0.48787558", "0.48528978", "0.4846506", "0.48409584", "0.4837236", "0.48267227", "0.48246104", "0.48214078", "0.48185223", "0.48170894", "0.4813803", "0.48028073", "0.48026073", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.4798868", "0.47932315", "0.47924957", "0.47863367", "0.47849458", "0.47837308", "0.478153", "0.47727555", "0.47726095", "0.47645804", "0.47631675", "0.4762575", "0.4762575", "0.47569874", "0.47496727", "0.47447497", "0.4742914", "0.47407475", "0.47406065", "0.47230798", "0.47219014", "0.4708105", "0.47052044", "0.47043332", "0.46965727", "0.46682277", "0.46658275", "0.46625045", "0.46586448", "0.46563107", "0.4644354", "0.46424505", "0.46418428", "0.46324745", "0.46305808", "0.4630348", "0.4626402", "0.46183953", "0.46161577", "0.46148372", "0.46147394", "0.46015748", "0.45999876", "0.45983216", "0.45909747" ]
0.61084247
0
Create a Render object.
def __init__(self, *args, **kwargs): super(ShotRenderCrawler, self).__init__(*args, **kwargs) parts = self.var("name").split("_") locationParts = parts[0].split("-") # Add the job var once job names on disk match job code names in shotgun self.setVar('seq', locationParts[1], True) self.setVar('shot', parts[0], True) self.setVar('step', parts[1], True) self.setVar('pass', parts[2], True) self.setVar('renderName', '{}-{}'.format( self.var('step'), self.var('pass') ), True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self):\n raise NotImplementedError(\"Renderer is an abstract class\")", "def make(self):\n\t\tif RENDER_VIEWS > 1:\n\t\t\tself._make()", "def render(self):\n return self", "def render(self):\n raise RenderNotImplemented('Render function is not implemented.')", "def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)", "def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)", "def render(self,obj,external=True):\n\n # check some prerequisites...\n if not obj.Renderer:\n return\n if not obj.Template:\n return\n if not os.path.exists(obj.Template):\n return\n\n # get the renderer module\n renderer = importRenderer(obj.Renderer)\n if not renderer:\n return\n\n # get the rendering template\n template = None\n with open(obj.Template,\"r\") as f:\n template = f.read()\n if sys.version_info.major < 3:\n template = template.decode(\"utf8\")\n if not template:\n return\n\n # get a camera string\n cam = self.writeCamera(obj,renderer)\n\n # get objects rendering strings (including lights objects)\n # and add a ground plane if required\n if obj.DelayedBuild:\n objstrings = [self.writeObject(view,renderer) for view in obj.Group]\n else:\n objstrings = [view.ViewResult for view in obj.Group]\n\n if hasattr(obj,\"GroundPlane\") and obj.GroundPlane:\n objstrings.append(self.writeGroundPlane(obj,renderer))\n\n renderobjs = ''.join(objstrings)\n\n # merge all strings (cam, objects, ground plane...) into rendering template\n if \"RaytracingCamera\" in template:\n template = re.sub(\"(.*RaytracingCamera.*)\",cam,template)\n template = re.sub(\"(.*RaytracingContent.*)\",renderobjs,template)\n else:\n template = re.sub(\"(.*RaytracingContent.*)\",cam+\"\\n\"+renderobjs,template)\n if sys.version_info.major < 3:\n template = template.encode(\"utf8\")\n\n # write merger result into a temporary file\n fh, fp = tempfile.mkstemp( prefix=obj.Name,\n suffix=os.path.splitext(obj.Template)[-1])\n with open(fp,\"w\") as f:\n f.write(template)\n os.close(fh)\n obj.PageResult = fp\n os.remove(fp)\n if not obj.PageResult:\n FreeCAD.Console.PrintError(translate(\"Render\",\"Error: No page result\"))\n return\n\n FreeCAD.ActiveDocument.recompute()\n\n # fetch the rendering parameters\n p = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/Render\")\n prefix = p.GetString(\"Prefix\",\"\")\n if prefix:\n prefix += \" \"\n output = os.path.splitext(obj.PageResult)[0]+\".png\"\n if hasattr(obj,\"OutputImage\") and obj.OutputImage:\n output = obj.OutputImage\n width = 800\n if hasattr(obj,\"RenderWidth\") and obj.RenderWidth:\n width = obj.RenderWidth\n height = 600\n if hasattr(obj,\"RenderHeight\") and obj.RenderHeight:\n height = obj.RenderHeight\n\n # run the renderer on the temp file\n return renderer.render(obj,prefix,external,output,width,height)\n\n FreeCAD.Console.PrintError(translate(\"Render\",\"Error while executing renderer\")+\" \"+str(obj.Renderer) + \": \" + traceback.format_exc()+\"\\n\")", "def createBasicRenderSetup():\n\n pass", "def render(self, **kw):\r\n style = kw.get('style', c.render_style or 'html')\r\n return Wrapped.part_render(self, self.style, style = style, **kw)", "def render(self):\n raise NotImplementedError", "def _render(self) -> None:\n pass", "def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return self.__class__(*args, **kw)", "def render(self):\n raise NotImplementedError()", "def render(self):\n if not self.renderer:\n raise NoRendererError('Field %s has no renderer assigned.' %\n self.id)\n return self.renderer.render(self)", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def createRenderLayer(*args, empty: bool=True, g: bool=True, makeCurrent: bool=True, name:\n AnyStr=\"\", noRecurse: bool=True, number: int=0, **kwargs)->AnyStr:\n pass", "def render(self, *args, **kwargs):\r\n raise NotImplementedError", "def render(self, r):\n raise NotImplementedError", "def _make_buffer(self, width, height):\n fb_prop = p3d.FrameBufferProperties(p3d.FrameBufferProperties.get_default())\n fb_prop.set_multisamples(self._multisamples)\n fb_prop.set_srgb_color(self._srgb_color)\n\n self._buffer = self._engine.make_output(\n self._pipe, name=\"offscreen\", sort=0,\n fb_prop=p3d.FrameBufferProperties.get_default(),\n win_prop=p3d.WindowProperties(size=(width, height)),\n flags=p3d.GraphicsPipe.BFRefuseWindow)\n\n self._region = self._buffer.make_display_region()\n\n self._depth_tex = p3d.Texture()\n self._depth_tex.setFormat(p3d.Texture.FDepthComponent)\n self._buffer.addRenderTexture(\n self._depth_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPDepth)\n\n self._color_tex = p3d.Texture()\n self._color_tex.setFormat(p3d.Texture.FRgba8)\n self._buffer.addRenderTexture(\n self._color_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPColor)", "def render(self, screen):\n pass", "def render(self, screen):\n pass", "def AsyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return self.__class__(*args, **kw)", "def render(self):\n try:\n if self.permit():\n return self.renderer.render(self)\n except AttributeError:\n if self.renderer is None:\n raise NotImplementedError(\"Should have implemented a renderer for {0}\".format(self.name))\n else:\n raise\n return ''", "def render(self, renderer, right=False):\n pass # pragma: no cover", "def AsyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return AsyncRenderer(*args, **kw)", "def AsyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return AsyncRenderer(*args, **kw)", "def render(self, surface):\n raise _InheritanceError('Function not defined')", "def render(self, data, accepted_media_type=None, renderer_context=None):\n renderer_context = renderer_context or {}\n form = data.serializer\n\n style = renderer_context.get('style', {})\n if 'template_pack' not in style:\n style['template_pack'] = self.template_pack\n style['renderer'] = self\n\n template_pack = style['template_pack'].strip('/')\n template_name = template_pack + '/' + self.base_template\n template = loader.get_template(template_name)\n context = {\n 'form': form,\n 'style': style\n }\n return template.render(context)", "def render(strategy, io, conf, options, template):\n\n renderer = Render(strategy, conf, options, template)\n renderer.render(io)", "def __init__(self, render_options: RenderOptions,\n verbosity: int, **kwargs) -> None:\n self.render_options = render_options\n self.verbosity = verbosity\n\n # this deferred is fired with the render result when\n # the result is ready\n self.deferred = defer.Deferred()", "def make(self, width=1500.0, height=1000.0):\n return self._meta.template1(width, height)", "def create_screen(self, width, height):", "def dspyRender(self):\n pass", "def render(self, mode=\"human\", height=None, width=None, camera_name=\"agentview\"):\n if mode == \"human\":\n cam_id = self.env.sim.model.camera_name2id(camera_name)\n self.env.viewer.set_camera(cam_id)\n return self.env.render()\n elif mode == \"rgb_array\":\n return self.env.sim.render(height=height, width=width, camera_name=camera_name)[::-1]\n else:\n raise NotImplementedError(\"mode={} is not implemented\".format(mode))", "def render(self, **kwargs) -> str:\n return self.renderable(**kwargs).render()", "def __init__(self, renderSurf):\n self.surf = renderSurf", "def generateRender(self, **options):\r\n\r\n path = options.get('path', '')\r\n resolution = options.get(\r\n 'resolution', QSize(mxs.renderWidth, mxs.renderHeight))\r\n pixelAspect = options.get('pixelAspect', 1.0)\r\n step = options.get('step', 1)\r\n frameRange = options.get('frameRange', [])\r\n missingFramesOnly = options.get('missingFramesOnly', False)\r\n\r\n if path:\r\n basePath = os.path.split(path)[0]\r\n if not os.path.exists(basePath):\r\n os.makedirs(basePath)\r\n\r\n if frameRange:\r\n bitmap = mxs.render(outputFile=path, fromFrame=frameRange[0], toFrame=frameRange[\r\n 1], camera=self._nativePointer, nthFrame=step, outputWidth=resolution.width(), outputHeight=resolution.height(), pixelAspect=pixelAspect)\r\n mxs.undisplay(bitmap)\r\n else:\r\n bitmap = mxs.render(outputFile=path, frame=mxs.pyHelper.namify(\r\n 'current'), camera=self._nativePointer, outputWidth=resolution.width(), outputHeight=resolution.height(), pixelAspect=pixelAspect)", "def do_render(parser, token):\n argv = token.contents.split()\n argc = len(argv)\n\n if argc != 2:\n raise TemplateSyntaxError('Tag %s takes one argument.' % argv[0])\n\n return RenderNode(obj=argv[1])", "def render( *args, **kwargs ):", "def render(self):\n self.run()\n return [{'dest' : self.dest,\n 'text' : self.tmpl.render(**self.data)}]", "def render(self, renderer: RenderingManager):\n self.grader.render(renderer)", "def load_render(views):\n render = render_jinja(\n views, encoding='utf-8',\n extensions=['jinja2.ext.do', AssetsExtension])\n render._lookup.assets_environment = env\n render._lookup.globals.update(dict(DEV=config.DEV,\n VERSION=get_version()))\n def inner():\n web.ctx.render = render;\n return inner", "def __call__(self, req, res):\n if not hasattr(res, 'render'):\n res.render = Renderer(res)\n res.locals = {}\n res.render.add_engine(self)", "def render(self, mode='human'):\n pass # no use in this situation", "def render(self) -> None:\n if self.native_rendering:\n self._render()\n else:\n self.renderer.render_image(self.get_rendered_image())", "def create_widget(self):\n pass", "def part_render(self, attr, *a, **kw):\r\n style = kw.get('style', 'html')\r\n template = self.template(style)\r\n dt = template.get_def(attr)\r\n return unsafe(dt.render(thing = self, *a, **kw))", "def render(self, template_name, **kwargs):\n raise NotImplementedError()", "def render(self):\n\n context = {\n 'model': self,\n 'hidden_fields': self.hidden_fields,\n 'css_prefix': self.css_prefix,\n }\n rendered = loader.render_to_string(self.template_path,\n dictionary=context)\n return rendered", "def render(self):\r\n super().render()", "def setup_render(\n self, options: Dict[str, Any], env: MutableMapping[str, Any]\n ) -> None:\n self.md_env = env\n self.config: Dict[str, Any] = options\n self.document: nodes.document = self.config.get(\"document\", make_document())\n self.current_node: nodes.Element = self.config.get(\n \"current_node\", self.document\n )\n self.reporter: Reporter = self.document.reporter\n # note there are actually two possible language modules:\n # one from docutils.languages, and one from docutils.parsers.rst.languages\n self.language_module_rst: ModuleType = get_language_rst(\n self.document.settings.language_code\n )\n self._level_to_elem: Dict[int, nodes.Element] = {0: self.document}", "def render(self, mode='human'):", "def render(self):\n\n self.desert_image.render()\n self.cannon_image.render()\n self.play_button.render()\n self.escape_label.render()", "def render(self, mode='human'):\n\n pass", "def __init__( self , objList = [] ):\r\n super( OGL_App , self ).__init__( resizable = True, caption = 'Render a Cuboid' )\r\n glClearColor( 0.7 , 0.7 , 0.8 , 1 ) # Set the BG color for the OGL window\r\n \r\n # URL: https://www.opengl.org/discussion_boards/showthread.php/165839-Use-gluLookAt-to-navigate-around-the-world\r\n self.camera = [ 4 , 4 , 4 , # eyex , eyey , eyez : Camera location , point (world) , XYZ\r\n 0 , 0 , 0 , # centerx , centery , centerz : Center of the camera focus , point (world) , XYZ\r\n 0 , 0 , 1 ] # upx , upy , upz : Direction of \"up\" in the world frame , vector , XYZ\r\n \r\n self.renderlist = objList", "def _render(self):\n self.dirty = False\n self.image = self.font.render(self._text, self.aa, self.color_fg)\n self.rect = self.image.get_rect()", "def create(\n cls,\n window, name, *,\n force_writes=False,\n follow_cursor=False,\n unlisted=False,\n **kwargs\n ):\n validate_view_options(kwargs)\n\n window.destroy_output_panel(name)\n view = window.create_output_panel(name, unlisted)\n set_view_options(view, **kwargs)\n\n return cls(window, name, force_writes=force_writes, follow_cursor=follow_cursor)", "def render(*args, **kwargs):\n if args:\n assert len(args) == 1, \\\n 'Expected exactly one argument, but got %r' % (args,)\n template = loader.load(args[0])\n else:\n template = cherrypy.thread_data.template\n ctxt = Context(url=cherrypy.url)\n ctxt.push(kwargs)\n return template.generate(ctxt)", "def _render(self, f_target, data):\n \n evt = self.plugin.dispatch(\n jink.plugin.Event('onBeforeRender', self, data=data, target=f_target),\n permit_cancel=True)\n if evt.isCancelled(): return\n data = evt.extra.data\n f_target = evt.extra.target\n \n # check if inheritance is specified\n refs = jinja2.meta.find_referenced_templates(self.engine.parse(data))\n try:\n refs.next()\n except StopIteration, e:\n # no, so insert default template\n t = self._filter(f_target, self.templates)\n if t: data = ( '{%% extends \"%s\" %%}\\n' % t ) + data\n \n # render\n data = self.engine.from_string(data).render({ 'config' : self.config.clone({ 'jink.path' : f_target}).get })\n self.log(2, '------------------------------')\n self.log(2, data)\n self.log(2, '------------------------------')\n return data", "def create(cls):\n pass\n return cls()", "def create(self, obj, include_link=False):\n return self._create(obj, preview=False, include_link=include_link)", "def render(self):\n self.env.render()", "def render(self):\n canvas_id = 'zdog_{}'.format(self.CANVAS_INDEX)\n illo_id = 'illo_{}'.format(self.CANVAS_INDEX)\n Scene.CANVAS_INDEX += 1\n\n html_lines = []\n\n js_lines = []\n\n euler = -rowan.to_euler(\n self.rotation, convention='xyz', axis_type='intrinsic')\n translation = self.translation*(1, -1, 1)\n\n pan_cfg = self.get_feature_config('pan')\n pan = pan_cfg.get('value', True) if pan_cfg is not None else False\n\n js_lines.append(\"\"\"\n let {illo_id} = new Zdog.Illustration({{\n element: '#{canvas_id}',\n zoom: {zoom},\n dragRotate: {rotation_enabled},\n rotate: {{x: {angle[0]}, y: {angle[1]}, z: {angle[2]}}},\n translate: {{x: {pos[0]}, y: {pos[1]}, z: {pos[2]}}},\n }});\n \"\"\".format(\n illo_id=illo_id, canvas_id=canvas_id, zoom=self.zoom*self.pixel_scale,\n angle=euler, pos=translation,\n rotation_enabled=('false' if pan else 'true')))\n\n config = self.get_feature_config('ambient_light')\n ambient_light = 0 if config is None else config.get('value', .4)\n\n config = self.get_feature_config('directional_light')\n directional_light = ([(0, 0, 0)] if config is None else\n config.get('value', [(0, 0, 0)]))\n directional_light = np.atleast_2d(directional_light)\n\n shapeIndex = 0\n for i, prim in enumerate(self._primitives):\n js_lines.extend(prim.render(\n rotation=self.rotation, illo_id=illo_id,\n name_suffix=i, ambient_light=ambient_light,\n directional_light=directional_light))\n\n (width, height) = map(int, self.size_pixels)\n html_lines.append(\"\"\"\n <canvas id=\"{canvas_id}\" width=\"{width}\" height=\"{height}\"></canvas>\n \"\"\".format(canvas_id=canvas_id, width=width, height=height))\n\n html_lines.append(\"\"\"<script>\n var fill_{canvas_id} = function() {{\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(LOCAL_HELPER_SCRIPT)\n html_lines.extend(js_lines)\n\n pan_snippet = \"\"\"\n new Zdog.Dragger({{\n startElement: {illo_id}.element,\n onDragStart: function( pointer, moveX, moveY) {{\n this.lastX = 0;\n this.lastY = 0;\n }},\n onDragMove: function( pointer, moveX, moveY ) {{\n let deltax = moveX - this.lastX;\n let deltay = moveY - this.lastY;\n let scale = 1.0/{illo_id}.zoom;\n {illo_id}.translate.x += deltax*scale;\n {illo_id}.translate.y += deltay*scale;\n this.lastX = moveX;\n this.lastY = moveY;\n }}\n }});\"\"\".format(illo_id=illo_id)\n if pan:\n html_lines.append(pan_snippet)\n\n html_lines.append(\"\"\"\n let this_canvas = document.querySelector(\"#{canvas_id}\");\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(\"\"\"\n let animate_{canvas_id} = function() {{\n if(is_in_view(this_canvas))\n {{\n {illo_id}.updateRenderGraph();\n }}\n if(document.contains(this_canvas))\n {{\n requestAnimationFrame(animate_{canvas_id});\n }}\n }};\n animate_{canvas_id}();\"\"\".format(canvas_id=canvas_id, illo_id=illo_id))\n # remove the global reference to this function after using it\n html_lines.append('fill_{canvas_id} = null;'.format(canvas_id=canvas_id))\n html_lines.append('};') # end of fill_{canvas_id}\n # now call fill_{canvas_id}, possibly after loading zdog\n html_lines.append(\"\"\"\n if (typeof Zdog == 'undefined')\n {{\n var script = document.createElement('script');\n script.addEventListener('load', fill_{canvas_id}, false);\n script.src = 'https://unpkg.com/zdog@1/dist/zdog.dist.min.js';\n document.getElementsByTagName('head')[0].appendChild(script);\n }}\n else\n fill_{canvas_id}();\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append('</script>')\n\n return '\\n'.join(html_lines)", "def render(self):\n self.rendering = True\n self.env.render()", "def __init__(self, *args, **kwargs):\n _gdi_.RendererVersion_swiginit(self,_gdi_.new_RendererVersion(*args, **kwargs))", "def render(self):", "def render(self, style = None):\r\n from pylons import c\r\n style = style or c.render_style or 'html'\r\n template = self.template(style)\r\n if template:\r\n res = template.render(thing = self)\r\n return res if (style and style.startswith('api')) else unsafe(res)\r\n else:\r\n raise NoTemplateFound, repr(self)", "def get_renderer ( self, object ):\n return self.renderer", "def create_renderer(\n n: int, # number of modes\n output_format: Format = Format.TEXT, # rendering method\n skin: ASkin = None, # skin (unused in text rendering)\n **opts\n) -> ICircuitRenderer:\n if output_format == Format.TEXT:\n return TextRenderer(n)\n \n assert skin is not None, \"A skin must be selected for circuit graphical rendering\"\n if output_format == Format.HTML:\n canvas = SvgCanvas(**opts)\n elif output_format == Format.LATEX:\n canvas = LatexCanvas(**opts)\n else:\n canvas = MplotCanvas(**opts)\n return CanvasRenderer(n, canvas, skin)", "def render(self, **kwargs):\n template_file = kwargs.get(\"template_file\", None)\n search_path = kwargs.get(\"search_path\", None)\n template_string = kwargs.get(\"template_string\", None)\n destination_file = kwargs.get(\"destination_file\", None)\n render_obj = kwargs.get(\"render_obj\", None)", "def render(self):\n return render_to_string(\n self.template_name, self.get_context_data(), request=self.request\n )", "def start_render_window(self):\n\n # Initialize interactor\n self.__render_window_interactor.Initialize()\n\n # Start render window with interactor\n self.__render_window.Render()\n self.__render_window_interactor.Start()", "def render(self, *html, **opt):\n context = self.context\n # add request, response to context implicitly.\n context['request'] = self.request\n context['response'] = self.response\n if 'context' in opt:\n context.update(opt['context'])\n opt['context'] = context.dicts[0]\n cnt = self.get_controller()\n cnt.render(*html, **opt)", "def render_view(self, h, *args):\n return self.view(h)", "def create_instance(self, **kwargs):\r\n create_options = self._generate_create_dict(**kwargs)\r\n return self.guest.createObject(create_options)", "def CreateContext(*args):\n return _gdi_.GraphicsRenderer_CreateContext(*args)", "def render(self, mode=\"human\", width=500, height=500):\n if mode == \"human\":\n return self.mujoco_simulation.mujoco_viewer.render()\n elif mode == \"rgb_array\":\n return self.mujoco_simulation.render(width=width, height=height)\n else:\n raise ValueError(\"Unsupported mode %s\" % mode)", "def _render(self) -> str:\n html = self._template.render(self._transient_context)\n self._transient_context = None\n return html", "def render(self, rstate):\n pass", "def __init__(self, topleft, text=\"\", \n font_size=16, color=pygame.Color('white')):\n self.fontSize = font_size\n self.text = text\n self.color = color\n img, size = self._render_as_image(text)\n Drawable.__init__(self, pygame.Rect(topleft, size), img)", "def render(self) -> Any:\n if self.render_mode == \"rgb_array\":\n return self.ale.getScreenRGB()\n elif self.render_mode == \"human\":\n pass\n else:\n raise Error(\n f\"Invalid render mode `{self.render_mode}`. Supported modes: `human`, `rgb_array`.\"\n )", "def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]:\n # Set the available rendering modes\n viewer_backend = (self.simulator.viewer or Viewer).backend\n if self.render_mode == 'human' and viewer_backend == \"panda3d-sync\":\n Viewer.close()\n\n # Call base implementation\n return self.simulator.render( # type: ignore[return-value]\n return_rgb_array=self.render_mode == 'rgb_array')", "def create_page(self, data):\n env = Environment(loader=FileSystemLoader(self.template_folder), trim_blocks=True, lstrip_blocks=True)\n template = env.get_template(self.template_file_name)\n template_vars = {'class_name': self.get_class_name(data['name']), 'page': data}\n output = template.render(template_vars)\n formatted_output = output.encode('utf8').strip()\n file_name = data['name'] + self.get_output_file_type()\n result_html = open(os.path.join(self.output_folder, file_name), 'w')\n result_html.write(formatted_output)\n result_html.close()", "def init_renderer(self):\n\n # Initialise render window\n renWin = vtk.vtkRenderWindow()\n if self.FULL_SCREEN:\n renWin.FullScreenOn()\n else:\n renWin.SetSize(\n int(self.WIN_H_SCALE*self.SCREEN_SIZE[0]),\n int(self.WIN_V_SCALE*self.SCREEN_SIZE[1])\n )\n\n class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):\n\n def __init__(self, parent=None):\n return None\n\n # Initialise interactor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetInteractorStyle(MyInteractorStyle())\n # iren.AutoAdjustCameraClippingRangeOn()\n iren.SetRenderWindow(renWin)\n\n return renWin, iren", "def render(self, render_color=True, front_and_back=False):\n if self._camera is None:\n raise ValueError('scene.camera must be set before calling render()')\n if self._renderer is None:\n self._renderer = OpenGLRenderer(self)\n return self._renderer.render(render_color, front_and_back=front_and_back)", "def render(self):\n if self.frame_pos:\n self.pos = [\n self.frame_pos[0] + self.position[0] - (self.size[0] / 2),\n self.frame_pos[1] + self.position[1] - (self.size[1] / 2),\n ]\n if self.variable_text:\n self.image = self.fontA.render(self.text, 1, self.color)", "def render(self, mode='human', close=False):\n pass", "def render(self) -> Optional[np.ndarray]:\n if self.render_mode is None:\n assert self.spec is not None\n gym.logger.warn(\n \"You are calling render method without specifying any render mode. \"\n \"You can specify the render_mode at initialization, \"\n f'e.g. gym.make(\"{self.spec.id}\", render_mode=\"rgb_array\")'\n )\n return\n if self.viewer is None:\n self.viewer = EnvViewer(self)\n\n self.enable_auto_render = True\n\n self.viewer.display()\n\n if not self.viewer.offscreen:\n self.viewer.handle_events()\n if self.render_mode == 'rgb_array':\n image = self.viewer.get_image()\n return image", "def render(self, request=None):\n widget_cls = self.get_widget()\n\n if widget_cls:\n widget = widget_cls(self)\n\n render = widget.render(request=request)\n return render or ''\n elif DEBUG:\n logger.debug(\"No widget defined for %s.\", self.uid)", "def render(self, proj):\n if self.text == '' or not self.mesh:\n return\n\n model = self.model.getTransformation()\n mvp = proj * self.transform.getTransformation() * model\n\n gl.glEnable(gl.GL_FRAMEBUFFER_SRGB)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n self.shader.bind()\n if self.color:\n self.shader.setUniform('u_color', self.color)\n self.font.bindAtlas()\n self.shader.setUniform('T_MVP', mvp)\n self.mesh.draw()\n gl.glDisable(gl.GL_BLEND)\n self.shader.unbind()\n self.font.unbindAtlas()\n gl.glDisable(gl.GL_FRAMEBUFFER_SRGB)", "def render_to_template_mock(*_args):", "def create(self):\n self.panel = pg.rect.Rect(self.position, self.dimensions)", "def render(self):\n\n context = {\n 'record': self,\n 'hidden_fields': self.hidden_fields,\n 'css_prefix': self.css_prefix,\n }\n\n meta = getattr(self, 'Meta', None)\n context['survey_name'] = getattr(meta, 'survey_name', '')\n\n rendered = loader.render_to_string(self.template_path,\n dictionary=context)\n return rendered", "def renderInNewInstance(settings):\n if not pythonProper:\n print \"Python is not installed. You cannot call renderInNewInstance()\"\n return -1\n settings.showUI=False\n if settings.showUI:\n backgroundFlag=''\n else:\n backgroundFlag=' -W -p 0 0 0 0 -noaudio -nojoystick -noglsl'\n def fixup(longpath):\n try:\n import win32api\n return win32api.GetShortPathName(longpath)\n except ImportError:\n return '\"'+longpath+'\"'\n if settings.filename[-6:]==\".blend\":\n blenderFile='\"'+settings.filename+'\"'\n else:\n blenderFile=\"\"\n cmdline=fixup(settings.blender)+backgroundFlag+' '+blenderFile+' -P \"'+settings.renderThis+'\" --filename=\"'+settings.filename+'\"'\n cmdline=addParam(cmdline,\"--frame=\",settings,\"frame\")\n cmdline=addParam(cmdline,\"--w=\",settings,\"w\")\n cmdline=addParam(cmdline,\"--h=\",settings,\"h\")\n cmdline=addParam(cmdline,'--out=\"',settings,\"out\",'\"')\n cmdline=addParam(cmdline,\"--osa=\",settings,\"osa\")\n cmdline=addParam(cmdline,\"--ao=\",settings,\"ao\")\n cmdline=addParam(cmdline,\"--gi=\",settings,\"gi\")\n \n print \"Launching Blender:\\n\"+cmdline\n print \"--------------------------------\\n\\n\"\n result=k_runner.Application().run(cmdline,hideWindows=(settings.showUI==False),wDogLifetime=settings.killRenderAfter)\n print str(result)+\"\\n--------------------------------\\n\\n\"\n return result", "def render(self, camera=None):\r\n glPushMatrix()\r\n x,y,z = self.pos\r\n glTranslatef(x,y,-z)\r\n a, b, c = self.rotation\r\n glRotatef(a, 1, 0, 0)\r\n glRotatef(b, 0, 1, 0)\r\n glRotatef(c, 0, 0, 1)\r\n try:\r\n glScalef(*self.scale)\r\n except:\r\n glScalef(self.scale, self.scale, self.scale)\r\n glColor(*self.colorize)\r\n\r\n if self.outline:\r\n misc.outline(misc.OutlineGroup([i[0] for i in self.gl_lists]),\r\n self.outline_color, self.outline_size)\r\n\r\n for i in self.gl_lists:\r\n i[1].bind()\r\n i[0].render()\r\n glPopMatrix()", "def create(self, **kwargs):\n return Create(self, metadata=kwargs)" ]
[ "0.6394629", "0.621124", "0.61963266", "0.6114999", "0.5978222", "0.5978222", "0.5938133", "0.593024", "0.5887697", "0.56696737", "0.5659791", "0.5651973", "0.5642207", "0.5623252", "0.55697376", "0.55697376", "0.55697376", "0.55697376", "0.55697376", "0.55697376", "0.553905", "0.5533639", "0.5518394", "0.5509096", "0.5488539", "0.5488539", "0.54753447", "0.54635465", "0.5456709", "0.54479545", "0.54479545", "0.54475623", "0.54163444", "0.54123217", "0.5406335", "0.5371942", "0.53496444", "0.5340956", "0.5340323", "0.5339119", "0.532605", "0.5322108", "0.52906674", "0.52808267", "0.52544296", "0.524815", "0.5243213", "0.52348715", "0.5229121", "0.5226363", "0.52213424", "0.52190953", "0.52174133", "0.5202632", "0.5188441", "0.51818055", "0.5149506", "0.51454574", "0.5138044", "0.5137593", "0.5129652", "0.5126663", "0.51223224", "0.51217395", "0.5120446", "0.5117906", "0.5117083", "0.5115174", "0.5107935", "0.51056916", "0.51024586", "0.50876033", "0.50824344", "0.507743", "0.5065534", "0.5063339", "0.5059827", "0.5050913", "0.5047678", "0.50433064", "0.5039873", "0.5039846", "0.5039333", "0.5035139", "0.5032935", "0.5031781", "0.50286317", "0.5023964", "0.5018293", "0.5018168", "0.5016493", "0.5010034", "0.5008379", "0.5007601", "0.50062025", "0.5003303", "0.50024337", "0.49997696", "0.4987985", "0.49798375", "0.49750757" ]
0.0
-1
Test if the path holder contains a shot render.
def test(cls, pathHolder, parentCrawler): if not super(ShotRenderCrawler, cls).test(pathHolder, parentCrawler): return False renderType = pathHolder.baseName().split(".")[0].split("_")[-1] return renderType == "sr"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def point_is_shot(self, point: Point):\n return point in self.shot_locations", "def test(cls, pathHolder, parentCrawler):\n if not super(TurntableCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n renderType = pathHolder.baseName().split(\".\")[0].split(\"_\")[-1]\n\n return renderType == \"tt\"", "def test(cls, pathHolder, parentCrawler):\n if not super(Jpg, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() == 'jpg'", "def is_shot(event):\n event_id = event['eventId']\n return event_id == 10", "def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()", "def is_shot_related_version(self, version):\n return self.get_shot(version) is not None", "def is_shot_valid(self, shot):\n a = self.check_position(shot.opponent)\n b = self.check_shot_direction(shot)\n c = self.check_shot_on_target(shot)\n return a and b and c", "def _is_repeatedshot_type(cls, object_):\n return (type(object_).__name__ in ['RepeatedShot'])", "def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)", "def hasPng(self):\n\t\tif self.isPng:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.png' ) ).exists", "def can_grab(self, thing):\n return False", "def can_grab(self, thing):\n return False", "def hasScreenshot(self, timeout=20.0, commandId=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n return self.isActionAccepted(timeout=timeout, commandName=Command.SCREENSHOT, \n commandId=commandId)", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def _setJob_checkShot(shotPath):\n\tvalid = True\n\n\tjobPath = os.path.split(shotPath)[0]\n\t#jobDataDir = os.path.join(jobPath, os.environ['IC_METADATA'])\n\tshotDataDir = os.path.join(shotPath, os.environ['IC_METADATA'])\n\n\t# if not os.path.isdir(jobDataDir):\n\t# \tvalid = False\n\n\tif not os.path.isdir(shotDataDir):\n\t\tvalid = False\n\n\treturn valid", "def has_screenshots(miscobj):\n\n imagedir = misctools.get_screenshots_dir(miscobj)\n return imagedir", "def _checkPath(self):\r\n if(not self._isStraightLine()):\r\n raise IllegalMoveException(\"Move is not a straight line\")\r\n path = self._getPath()\r\n if(any(cell.isOccupied() for cell in path)):\r\n raise IllegalMoveException(\"There are pawns on the path\")\r\n return True", "def has_guardian(self):\n return self.tiles.count(3) > 0", "def test(cls, pathHolder, parentCrawler):\n if not super(AsciiCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in ['json']", "def check_shot_direction(self, shot):\n return Vector.v_from_a(shot.angle) * self.dir < 0", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def is_visible(self, path):\n return True", "def exists(self):\n\t\tif self.hasUdim:\n\t\t\treturn len( self.udimPaths ) != 0\n\t\treturn super( textureFile, self ).exists", "def check_path_tile(self):\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.return_path and self.tile == self.return_path[0]:\n del self.return_path[0]\n if not len(self.return_path) > 0:\n return '*' # signal that the path is complete\n return None", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def onGoal(self):\n return self.index == len(self.path)", "def is_path_available(self, y_pos, x_pos):\n if 15 > y_pos >= 0 and 0 <= x_pos < 15:\n return self.map[y_pos][x_pos] in [' ', 'G', 'X', 'W']\n return False", "def requires_safe_render(self) -> bool:\n return True\n # return any(is_reserved(child.name) for child in self.children)", "def canTile(self):\n raise RuntimeError('Not implemented')\n \n return False", "def shooting(self):\r\n return not self.stopped", "def is_mountpoint(path):\r\n return path in [m['dest'] for m in mounts()]", "def exists(self):\r\n return os.path.exists(self.full_path)", "def mounted(self):\n return os.path.ismount(self.get(\"~mountpoint\", \"/\"))", "def check_shot_nodes(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n shot_nodes = pm.ls(type=\"shot\")\n progress_controller.complete()\n if len(shot_nodes) == 0:\n raise PublishError(\"There is no <b>Shot</b> node in the scene\")", "def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None", "def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass", "def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def check_path(self, path):\n if path in self.app_path:\n return True\n else:\n return False", "def __nonzero__(self):\n return any(self.path)", "def has_run(self, path):\n\n try:\n data = [d for d in self.calls if path in d][0]\n except IndexError:\n return False\n else:\n return data[path]", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def valid_for_shot(fetcher):\n\n def check_to_clause(shot, k, dic):\n \"\"\" check for leftover instances of to=[date, shot]\n where shot is 0 - this is very likely to be a mistake\n as there is no shot before 0\n \"\"\"\n if ('_to' in k and isinstance(shot,(list, tuple, ndarray)) \n and dic[k][1] == 0):\n print('******** Warning - valid shot of 0 in to clause?')\n\n\n if hasattr(fetcher,'valid_shots'):\n valid_shots = fetcher.valid_shots\n elif hasattr(fetcher.acq, 'valid_shots'):\n valid_shots = fetcher.acq.valid_shots\n else:\n valid_shots = None\n # another example of inheritance via pyfusion.cfg \n # - need to formalise this, extract the code to a function?\n is_valid = True\n\n # check for an MDSplus W7M test shot 18... - normal W7M is 2018.... - need to find command lines from ipp\n # Starting with 18 instead of 2018 allows pyfusion to distinguish test shots - on the W7X net,\n # perhaps these are distinguished by using a different tree, because the shot number may be duplicated\n # Examplee: run pyfusion/examples/plot_signals.py dev_name=W7M diag_name=W7M_BRIDGE_V1 shot_number=[180907,9]\n if np.shape(fetcher.shot) != () and fetcher.shot[0] < 990000: \n valid_shots = None # don't check as the check will find no shot at the moment\n pyfusion.utils.warn('ignoring valid_since data assumed for MDS test {sh}'\n .format(sh=str(fetcher.shot)))\n\n if valid_shots is not None:\n shot_or_utc = fetcher.shot\n # this 15 line code block is W7X specific - how to remove to W7X?\n if np.isscalar(shot_or_utc):\n compfun = int\n else:\n compfun = tuple\n valid_dict = eval('dict({ps})'.format(ps=valid_shots))\n for k in valid_dict:\n root = k.replace('_from','').replace('_to','')\n if '_' + root in fetcher.config_name:\n if pyfusion.VERBOSE>1: print('find_valid_for_shot: key={k}, root={r} shot={s} valid={v}'\n .format(k=k, r=root, s=fetcher.shot, v=valid_dict[k]))\n check_to_clause(shot_or_utc, k, valid_dict)\n # need to be both tuples or both lists for comparison to work\n if (('_from' in k and compfun(get_shot_utc(shot_or_utc)) < compfun(get_shot_utc(valid_dict[k])))\n or ('_to' in k and compfun(get_shot_utc(shot_or_utc)) > compfun(get_shot_utc(valid_dict[k])))):\n is_valid = False\n debug_(pyfusion.DEBUG, 2, 'valid_shots')\n return(is_valid)", "def hasImage(self):\n return self._image is not None", "def _mount_point_exists(self, mountpoint):\n cmd = ['dir', mountpoint]\n logger.debug('running command: %s' % (' '.join(cmd)))\n stdout, stderr, retval = self._run_cli_process(cmd)\n\n if not retval:\n logger.debug(\"mountpoint %s ready\" % mountpoint)\n else:\n logger.debug(\"mountpoint %s reported not ready with error '%s'\" %\n (mountpoint, stderr.strip()))\n\n return not retval", "def play_a_shot(self, req):\n game = models.BattleShip.getByUrlKey(req.url_key)\n return game.shoot(str(req.player), (str(req.y) + str(req.x - 1)))", "def scratch(self) -> bool:\n hcell = self._get_hcell2()\n return \"scratch\" in hcell", "def poll(cls, context):\r\n return context.object.animation_data.action is not None", "def _is_placeholder_mapped(self):\n return self.ctx.gwt_path(\n p4gf_const.P4GF_EMPTY_CHANGELIST_PLACEHOLDER).to_depot()", "def _hunting_mode(self):\n grid = self._grid\n width, height = grid.dimensions()\n valid_shot = False\n while not valid_shot:\n pos = (randint(0, width-1), randint(0, height-1))\n hit = grid.shoot(pos)\n shot = hit.cell\n valid_shot = shot not in HITS\n # if shot is valid\n if shot in SHIPS:\n self._stack += self._get_neighbours(pos)\n self._mode = TARGETING\n log(\"[HUNT]: Hit a ship at \" + str(pos) + \", going into targeting.\")\n elif shot == WATER:\n log(\"[HUNT]: Missed at \" + str(pos))\n if valid_shot:\n self.shots.add(pos)\n return shot", "def check(self):\n return self.tile==\"\"", "def shootdown(self, missile: 'games.stardash.projectile.Projectile') -> bool:\n return self._run_on_server('shootdown', {\n 'missile': missile\n })", "def draw_shot(self, dist, stepsize):\r\n if stepsize < 1:\r\n return dist\r\n \r\n self.image = self.base_image.copy()\r\n shoot_to = (self.rect.w/2 + math.cos(self.direction)*dist,\r\n self.rect.h/2 + math.sin(self.direction)*dist)\r\n \r\n pygame.draw.line(self.image, self.shot_color, (self.rect.w/2,self.rect.h/2), shoot_to, 1)\r\n #the third argument is a threshold value. Apparently, it doesn't work without it.\r\n self.mask = pygame.mask.from_threshold(self.image, self.shot_color, (1,1,1))\r\n\r\n if(self.world.visible_objects(self, self.world.walls)):\r\n return self.draw_shot(dist-stepsize, stepsize/2)\r\n else:\r\n return self.draw_shot(dist+stepsize, stepsize/2)", "def is_harvestable(self, name_path, item):\n name = name_path[-1]\n if (\n name.startswith(\"_\")\n or id(item) in self._seen\n or name in self.excludes\n or self._join_path_names(*name_path) in self.excludes\n ):\n return False\n\n self._seen.add(id(item))\n\n return (\n (callable(item) or is_regular_class(name, item) or inspect.ismodule(item))\n and (not self.base_modules or inspect.getmodule(item) in self.base_modules)\n and (not self.predicate or self.predicate(item))\n )", "def _IsTimeReplot( self ):\n return True", "def test_takes_shot(self):\n player = TestPlayer()\n self.ai.take_shot(player)\n self.assertEqual(1, player.shots_taken)", "def ismount(self, vPath):\n return vPath[1:] in self.listdir('/')", "def check_already_mounted(devpath, mountpoint):\n mounts = Mounter().read_mounts()\n for m in mounts:\n if devpath == m.device and mountpoint == m.mountpoint:\n return True\n return False", "def isPng(self):\n\t\treturn self.extension == '.png'", "def is_view_loaded(view):\n\n if not G.AGENT:\n return\n if not G.AGENT.joined_workspace:\n return\n if view.is_loading():\n return\n\n buf = get_buf(view)\n if not buf or buf.get('buf') is None:\n return\n\n return buf", "def check_exist(self):\n helper.RbdImageOperator._check_rbd_image(self.real_path)", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def exists(self):\n if self.attributes[AT.GARBAGE]:\n return False\n if get_ticks() < self.attributes[AT.TIME_TO_BE_SHOWN]:\n return False\n return True", "def isRenderable(data):\n return data.find(\"<molecule\") != -1 and data.find(\"<atom\") != -1", "def is_stac_item(path: str) -> bool:\n if isinstance(path, pystac.Item):\n return True\n else:\n try:\n pystac.Item.from_file(str(path))\n return True\n # with .tif as url, pystac/stac_io.py/read_test_from_href() returns Exception, not HTTPError\n except Exception:\n return False", "def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk", "def outOfScreen(self):\n x,y = self.currentLevel.transformToScreenCoordinate(self.position)\n w,h = cblocals.GAME_SCREEN_SIZE\n if x<0 or y<0 or x>x or y>h:\n return True\n return False", "def shots_allowed(self, shots_allowed):\n\n self._shots_allowed = shots_allowed", "def IsTileBlockingView(self, x, y):\r\n index = self.GetTileIndex([x, y])\r\n return self.palette[index].get('noview', 0)", "def is_snapshot(self):\n return self.proto.display_type == DISPLAY_TYPE.Snapshot.value", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n\n # If we're on a diff viewer page, then this should be initially\n # rendered, but might be hidden.\n if match.url_name in diffviewer_url_names:\n return True\n\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.has_diffsets)", "def Exists(self, path: str) -> bool:\n ...", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def isGrabando(self):\n return self.grabando", "def _is_rage(self):\n\n# dir_list = os.listdir(os.environ['PWD'])\n\n# # Check .../*/pc/data/cdimages dir\n# for data_dir in dir_list:\n# if os.path.exists(os.path.join(os.environ['PWD'], data_dir, 'pc/data/cdimages')):\n# return True\n if os.path.exists(os.path.join(os.environ['PWD'], 'pc/data/cdimages')):\n return True\n\n return False", "def check(self):\n # get the data from shotgun\n app = self.parent.app\n context = app.context\n\n # get step short name\n filters = [[\"id\", \"is\", context.step[\"id\"]]]\n fields = [\"short_name\"]\n stepShortName = app.shotgun.find_one(\n \"Step\", filters=filters, fields=fields)[\"short_name\"]\n\n try:\n shotNode = gNodes.getTopGNode()\n except:\n shotNode = None\n\n if shotNode:\n metadataCode = shotNode.grid_code.get()\n metadataPipeStep = shotNode.grid_pipeStep.get(asString=True)\n if not (stepShortName == metadataPipeStep and\n context.entity[\"name\"] == metadataCode):\n self.status = self.errorMode\n self.addError(\"Context and shot node metadata don't match\")\n self.errorMessage = \"Context and shot node metadata don't match\"\n else:\n self.status = \"OK\"\n else:\n self.status = \"OK\"", "def exists(self) -> bool:\n p = pathlib.Path(self.summary_path)\n return p.exists()", "def passable(self, point):\n return point not in self.obstacles", "def test_should_render_for_owner(self) -> None:\n self.assertTrue(self.action.should_render(\n context=self._create_request_context()))", "def is_visible(self, path):\n st = os.lstat(path)\n minute = 60\n hour = minute * 60\n day = hour * 24\n week = day * 7\n return time.time() - st.st_ctime < week * 2", "def checkAssetPath():\n\tverbose.print_(\"Checking for published assets...\", 4)\n\n\t# import setJob\n\n\t# Get the paths of the job and all shots within the job\n\tpaths = [os.environ['IC_JOBPATH'], ]\n\tshots = _setJob_listShots(os.environ['IC_JOB']) # UPDATE\n\tfor shot in shots:\n\t\tpaths.append( _setJob_getPath(os.environ['IC_JOB'], shot) ) # UPDATE\n\n\tfor path in paths:\n\t\tassetDir = os.path.join(path, '.publish')\n\t\t#print(assetDir)\n\n\t\tif os.path.isdir(assetDir):\n\t\t\tassetTypeDirs = []\n\n\t\t\t# Get subdirectories\n\t\t\tsubdirs = next(os.walk(assetDir))[1]\n\t\t\tif subdirs:\n\t\t\t\tfor subdir in subdirs:\n\t\t\t\t\tif not subdir.startswith('.'): # ignore directories that start with a dot\n\t\t\t\t\t\tassetTypeDirs.append(subdir)\n\n\t\t\tif assetTypeDirs:\n\t\t\t\treturn True\n\n\treturn False", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def hasCurrentFrame(self):\n if self.currentFrame == []:\n return False\n return True", "def check_footage():\n\n ok = True\n\n for clip in bpy.data.movieclips:\n abspath = bpy.path.abspath(clip.filepath, clip.library)\n if not os.path.exists(abspath):\n print(\"Clip {} is not found\" . format(abspath))\n ok = False\n\n return ok", "def is_path_registered(path):\n result = db_session.query(MediaFiles).filter_by(path=path).all()\n return True if result else False", "def is_mountpoint(path: str) -> bool:\n mtpt = subprocess.run([\"mountpoint\", path], check=False, capture_output=True)\n return mtpt.returncode == 0", "def process_shot(self):\n if self.has_active_ship():\n self.mark = constants.HIT_SHIP_MARK\n self.hit_count += 1\n if self.hit_count == self.ship.power:\n self.mark = constants.DEAD_SHIP_MARK\n return constants.KILL\n else:\n return constants.HIT\n elif not self.occupied or self.mark == constants.MISS_HIT_MARK:\n self.mark = constants.MISS_HIT_MARK\n return constants.MISS", "def has_asset(self, name):\n return name in self.assets", "def available(self):\n contextPhyPath = self.context.getPhysicalPath()\n portalPhyPath = api.portal.get().getPhysicalPath()\n path = [elem for elem in list(contextPhyPath) if elem not in list(portalPhyPath)] # noqa\n depth = len(path)\n if depth < 2:\n return False\n return True", "def conditionsAreMetForDrawing(self):\n\t\tcurrentController = self.controller.view().window().windowController()\n\t\tif currentController:\n\t\t\ttool = currentController.toolDrawDelegate()\n\t\t\ttextToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolText\") )\n\t\t\thandToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolHand\") )\n\t\t\tif not textToolIsActive and not handToolIsActive: \n\t\t\t\treturn True\n\t\treturn False", "def is_gripping(self):\n return self.gripper_io.get_signal_value(\"is_gripping\")", "def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False", "def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True", "def is_passable(self, tile, pos):\n #Check superclass to see if it's passable first\n if not super().is_passable(tile, pos):\n return False\n\n #This unit can't pass these specific terrains\n ttype = tile.type\n if (tile.type == 'forest'):\n return False\n \n #The tile is passable\n return True", "def pointer_has_grab(self):\n lib.wlr_seat_pointer_has_grab(self._ptr)", "def has_geom(self):\n return bool(self.give_geom())", "def __isTileGoalState(self, point):\n return point == self.goalPoint", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def is_chopped_run(self):\n return self._slicerKey is not None", "def isObscuredBy(self, QGraphicsItem): # real signature unknown; restored from __doc__\n return False" ]
[ "0.66429377", "0.6288864", "0.6145428", "0.6053248", "0.6007993", "0.5842656", "0.5803181", "0.5795196", "0.57627773", "0.5747576", "0.56485415", "0.56485415", "0.5615542", "0.55867237", "0.5571235", "0.55255395", "0.55035883", "0.54886705", "0.5453938", "0.545361", "0.5423278", "0.5396576", "0.53901947", "0.5384317", "0.5359062", "0.532667", "0.53093433", "0.53070503", "0.5258432", "0.52555484", "0.5232192", "0.5228089", "0.51907724", "0.51840115", "0.5181288", "0.51768976", "0.51756436", "0.517221", "0.5130473", "0.5129172", "0.512652", "0.51200604", "0.51143354", "0.51142186", "0.508913", "0.50875556", "0.5086202", "0.50849783", "0.50799537", "0.50750184", "0.5074312", "0.5071816", "0.50643384", "0.5062297", "0.50393003", "0.5038861", "0.50305533", "0.5011673", "0.50110716", "0.5005365", "0.50050884", "0.49980915", "0.4988758", "0.4988033", "0.49692428", "0.4949481", "0.4948119", "0.49431288", "0.49405923", "0.4938224", "0.49376076", "0.4932352", "0.49061778", "0.4905849", "0.49019855", "0.49018234", "0.48997322", "0.4897474", "0.48904142", "0.4889905", "0.48864028", "0.48861563", "0.48840585", "0.4880152", "0.48763382", "0.48738554", "0.48731187", "0.4868063", "0.48660618", "0.48607853", "0.48587984", "0.48569396", "0.4856811", "0.48510385", "0.48506558", "0.48466906", "0.48457876", "0.48447526", "0.48438078", "0.4839193" ]
0.7443225
0
Find links in jsoncompatible data.
def find_links(obj): if isinstance(obj, dict): for key, value in obj.iteritems(): for url in find_links(value): yield url elif isinstance(obj, list): for item in obj: for url in find_links(item): yield url else: try: if is_link(str(obj)): yield obj except Exception: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_urls(json_dict):\n url_list = []\n count = 0\n for i in json_dict[\"items\"]:\n if i[\"is_answered\"]:\n url_list.append(i[\"link\"])\n count += 1\n if count == 3 or count == len(i):\n break\n \n for i in url_list:\n wb.open(i)", "def get_urls(self, data):\n data = json.loads(data)\n urls = []\n for article in data['articles']:\n urls.append(article['url'])\n return urls", "def search_link(self):\n return self._json['coredata'].get('link', [])[2].get('@href')", "def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links", "async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)", "def getDiscussionLinks(self, json_info, tag_filter=[]):\n discussion_links = []\n for t in json_info['document']['data']:\n if(t['type'] == 'discussions'):\n id = (t['id'])\n slug = t['attributes']['slug']\n tags = []\n for tag in t['relationships']['tags']['data']:\n tags.append(int(tag['id']))\n \n if(len(tag_filter) == 0 or len(list(set(tag_filter) & set(tags))) > 0):\n discussion_links.append(\"https://fbtag.net/d/{id}-{slug}\".format(id=id, slug=slug))\n else:\n logging.debug(msg=(tags, 'not in filter ', tag_filter, 'link', id, slug))\n pass\n \n return discussion_links", "def parse_json_export(json_file):\n\n json_file.seek(0)\n links = json.load(json_file)\n json_date = lambda s: datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ')\n\n for link in links:\n # example line\n # {\"href\":\"http:\\/\\/www.reddit.com\\/r\\/example\",\"description\":\"title here\",\"extended\":\"\",\"meta\":\"18a973f09c9cc0608c116967b64e0419\",\"hash\":\"910293f019c2f4bb1a749fb937ba58e3\",\"time\":\"2014-06-14T15:51:42Z\",\"shared\":\"no\",\"toread\":\"no\",\"tags\":\"reddit android\"}]\n if link:\n # Parse URL\n url = link.get('href') or link.get('url') or link.get('URL')\n if not url:\n raise Exception('JSON must contain URL in each entry [{\"url\": \"http://...\", ...}, ...]')\n\n # Parse the timestamp\n ts_str = str(datetime.now().timestamp())\n if link.get('timestamp'):\n # chrome/ff histories use a very precise timestamp\n ts_str = str(link['timestamp'] / 10000000) \n elif link.get('time'):\n ts_str = str(json_date(link['time'].split(',', 1)[0]).timestamp())\n elif link.get('created_at'):\n ts_str = str(json_date(link['created_at']).timestamp())\n elif link.get('created'):\n ts_str = str(json_date(link['created']).timestamp())\n elif link.get('date'):\n ts_str = str(json_date(link['date']).timestamp())\n elif link.get('bookmarked'):\n ts_str = str(json_date(link['bookmarked']).timestamp())\n elif link.get('saved'):\n ts_str = str(json_date(link['saved']).timestamp())\n \n # Parse the title\n title = None\n if link.get('title'):\n title = link['title'].strip() or None\n elif link.get('description'):\n title = link['description'].replace(' — Readability', '').strip() or None\n elif link.get('name'):\n title = link['name'].strip() or None\n\n yield {\n 'url': url,\n 'timestamp': ts_str,\n 'title': title,\n 'tags': link.get('tags') or '',\n 'sources': [json_file.name],\n }", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def readLinkoJson(file):\n with open(file, 'r') as jsonFile:\n preLinko = json.load(jsonFile)\n\n linko = Linkograph([], preLinko[0])\n\n for entry in preLinko[1:]:\n linko.append((set(entry[0]), set(entry[1]), set(entry[2])))\n linko.uuids.append(entry[3])\n\n return linko", "def getAllLinks(jsonData, propDict, refDict, prefix='', context=''):\n linkList = OrderedDict()\n # check keys in propertyDictionary\n # if it is a Nav property, check that it exists\n # if it is not a Nav Collection, add it to list\n # otherwise, add everything IN Nav collection\n # if it is a Complex property, check that it exists\n # if it is, recurse on collection or individual item\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['isNav']:\n insideItem = jsonData.get(item)\n if insideItem is not None:\n cType = propDict[key].get('isCollection') \n autoExpand = propDict[key].get('OData.AutoExpand',None) is not None or\\\n propDict[key].get('OData.AutoExpand'.lower(),None) is not None\n if cType is not None:\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n for cnt, listItem in enumerate(insideItem):\n linkList[prefix+str(item)+'.'+getType(propDict[key]['isCollection']) +\n '#' + str(cnt)] = (listItem.get('@odata.id'), autoExpand, cType, cSchema, listItem)\n else:\n cType = propDict[key]['attrs'].get('type')\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n linkList[prefix+str(item)+'.'+getType(propDict[key]['attrs']['name'])] = (\\\n insideItem.get('@odata.id'), autoExpand, cType, cSchema, insideItem)\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['realtype'] == 'complex':\n if jsonData.get(item) is not None:\n if propDict[key].get('isCollection') is not None:\n for listItem in jsonData[item]:\n linkList.update(getAllLinks(\n listItem, propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n else:\n linkList.update(getAllLinks(\n jsonData[item], propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n rsvLogger.debug(str(linkList))\n return linkList", "def get_all_links(self):\n links_url = \"{}/links\".format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)", "def parse_link(self,data,api):\n return REACT_API_DOCS_URL + data.FILE.split('/')[1] + api.find('a',attrs = {'class': 'hash-link'}).attrs['href']", "def iter_links(self):", "def get_links(self):\n return self.__data['links']", "def extract_links(data):\n soup = BeautifulSoup(data)\n for link in soup.findAll(\"a\"):\n for pair in link.attrs:\n if pair[0] == u'href':\n yield pair[1]", "def _parse_links(self, item, start, links_list):\n result_list = []\n target_str_1 = start.strftime(\"%m-%d-%Y\").replace(\" 0\", \" \")\n target_str_2 = start.strftime(\"%m-%d-%y\").replace(\" 0\", \" \")\n for item in links_list:\n if item[\"date\"] in target_str_1 or item[\"date\"] in target_str_2:\n new_dict = {}\n new_dict[\"href\"] = item[\"href\"]\n new_dict[\"title\"] = item[\"title\"]\n result_list.append(new_dict)\n return result_list", "def test_get_variant_links(variant_obj):\n # GIVEN a variant object without links\n assert \"thousandg_link\" not in variant_obj\n # WHEN fetching the variant links\n links = get_variant_links(variant_obj)\n # THEN check that links are returned\n assert \"thousandg_link\" in links", "def testDereferenceLinks(self):\n ddict = {\"ext_group\": {\"dataset\": 10}}\n dictdump.dicttonx(ddict, self.h5_ext_fname)\n ddict = {\"links\": {\"group\": {\"dataset\": 10, \">relative_softlink\": \"dataset\"},\n \">relative_softlink\": \"group/dataset\",\n \">absolute_softlink\": \"/links/group/dataset\",\n \">external_link\": \"nx_ext.h5::/ext_group/dataset\"}}\n dictdump.dicttonx(ddict, self.h5_fname)\n\n ddict = dictdump.h5todict(self.h5_fname, dereference_links=True)\n self.assertTrue(ddict[\"links\"][\"absolute_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"relative_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"external_link\"], 10)\n self.assertTrue(ddict[\"links\"][\"group\"][\"relative_softlink\"], 10)", "def _recursive_gh_get(href, items, password=None):\n response = GitHub._request('GET', href, token=password)\n response.raise_for_status()\n items.extend(response.json())\n if \"link\" not in response.headers:\n return\n # links = link_header.parse(response.headers[\"link\"])\n # rels = {link.rel: link.href for link in links.links}\n # if \"next\" in rels:\n # ghRelease._recursive_gh_get(rels[\"next\"], items)", "def get_json_urls(self):\n gi = GetImageURLs(self.json_url)\n self.urls = gi.get_image_url()\n\n # Turn it into a Python set\n self.urls_from_json = Set(self.urls)", "def _parse_links(self, item):\n regex = compile(r\"<a\\s+(?:[^>]*?\\s+)?href=([\\\"\\'])(.*?)\\1.*\\>(.*)<\\/a>\")\n links = [\n {\"href\": href, \"title\": title}\n for (_, href, title) in findall(regex, item[\"Event\"][\"Description\"])\n ]\n for link in links:\n if link[\"href\"][0] == \"/\":\n link[\"href\"] = \"https://www.pghschools.org\" + link[\"href\"]\n return links", "def get_image_links(data):\n painting_links = []\n\n print(data)\n\n for painting in data:\n painting_links.append(painting['image'])\n\n return painting_links", "def fetch_url_information(status_code):\n links = []\n result = {}\n obj = LinksInformation.objects.filter(status=status_code)\n for i in obj:\n links.append(i.link)\n result[\"result\"] = links\n json.dump(result, open(\"airlines/links.json\", \"w\"), indent=4)\n return result", "def links(self):\n\t\treturn self.list_of_links", "def result_urls(self, job_id: str, show_progress: bool = False) -> List:\n data = self.result_json(job_id, show_progress)\n urls = [x['href'] for x in data.get('links', []) if x['rel'] == 'data']\n return urls", "def get_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: link\n #BEGIN get_data_link\n id_ = _get_id_from_object(params, 'linkid', required=True)\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_data_link', ctx.log_info)\n dl = self._samples.get_data_link_admin(id_)\n link = _links_to_dicts([dl])[0]\n #END get_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(link, dict):\n raise ValueError('Method get_data_link return value ' +\n 'link is not type dict as required.')\n # return the results\n return [link]", "def fetchJson(url):", "def extract_from_json_ld(self, data, url):\n\n scripts = data.xpath(\"//script[@type='application/ld+json']\")\n records = [ ]\n\n for scr in scripts:\n\n try:\n data = json.loads(scr.text)\n except:\n continue\n\n if not isinstance(data, dict):\n continue\n\n record = dict([ (k, v) for k, v in data.items() if k in self.store_fields ])\n if \"recipeIngredient\" not in record and \"ingredients\" in data:\n record[\"recipeIngredient\"] = data[\"ingredients\"]\n\n record[\"url\"] = url\n record[\"collect_time\"] = datetime.utcnow()\n\n if self.validate(record):\n records.append(record)\n\n return records", "def get_data_links_from_data(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN get_data_links_from_data\n upa = _get_upa_from_object(params)\n dt = _get_datetime_from_epochmillseconds_in_object(params, 'effective_time')\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_data_links_from_data', ctx.log_info, skip_check=not params.get('as_admin'))\n links, ts = self._samples.get_links_from_data(\n _UserID(ctx[_CTX_USER]), upa, dt, as_admin=admin)\n results = {'links': _links_to_dicts(links),\n 'effective_time': _datetime_to_epochmilliseconds(ts)\n }\n #END get_data_links_from_data\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method get_data_links_from_data return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]", "def get_links(self, response, domain, port, folder):\n\t\t# find link in tags: a, link, form, button\n\t\t# call to all function in file get_link\n\t\t# for method in get_link:\n\t\tlinks = get_link(response, domain, port, folder)\n\t\tlinks = filter(None, links.getResults())\n\t\treturn links", "def access_jsonlines_urls(json_path=DEFAULT_JSON_PATH):\n token_dict_set = set()\n with open(json_path, 'rb') as f:\n for line in json_lines.reader(f):\n for url in line.keys():\n token_dict_set.add(url)\n return token_dict_set", "def app_links_json(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_links_json\")", "def app_links_json(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_links_json\")", "def collectLinks(self, output):\n pass", "def app_links_json(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_links_json\")", "def test_links(get_good_response):\n response = get_good_response(\"/links\")\n\n assert \"data\" in response", "def nuke_link(word):\n\n for i in json_read_link:\n try:\n return i[word].encode('utf-8')\n except:\n pass", "def test_00_link_object(self):\r\n # For app\r\n res = self.app.get(\"/api/app/1\", follow_redirects=True)\r\n output = json.loads(res.data)\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n\r\n err_msg = \"There should be a Links list with the category URI\"\r\n assert output['links'] is not None, err_msg\r\n assert len(output['links']) == 1, err_msg\r\n app_link = self.hateoas.link(rel='category', title='category',\r\n href='http://localhost/api/category/1')\r\n assert app_link == output['links'][0], err_msg\r\n\r\n app_link = self.hateoas.link(rel='self', title='app',\r\n href='http://localhost/api/app/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert app_link == output['link'], err_msg\r\n\r\n # For task\r\n res = self.app.get(\"/api/task/1\", follow_redirects=True)\r\n output = json.loads(res.data)\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n task_link = self.hateoas.link(rel='self', title='task',\r\n href='http://localhost/api/task/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert task_link == output['link'], err_msg\r\n err_msg = \"There should be one parent link: app\"\r\n assert output.get('links') is not None, err_msg\r\n assert len(output.get('links')) == 1, err_msg\r\n err_msg = \"The parent link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='app',\r\n href='http://localhost/api/app/1')\r\n assert output.get('links')[0] == app_link, err_msg\r\n\r\n # For taskrun\r\n res = self.app.get(\"/api/taskrun/1\", follow_redirects=True)\r\n output = json.loads(res.data)\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n task_link = self.hateoas.link(rel='self', title='taskrun',\r\n href='http://localhost/api/taskrun/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert task_link == output['link'], err_msg\r\n err_msg = \"There should be two parent links: app and task\"\r\n assert output.get('links') is not None, err_msg\r\n assert len(output.get('links')) == 2, err_msg\r\n err_msg = \"The parent app link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='app',\r\n href='http://localhost/api/app/1')\r\n assert output.get('links')[0] == app_link, err_msg\r\n\r\n err_msg = \"The parent task link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='task',\r\n href='http://localhost/api/task/1')\r\n assert output.get('links')[1] == app_link, err_msg\r\n res = self.app.post(\"/api/taskrun\")\r\n\r\n # For category\r\n res = self.app.get(\"/api/category/1\", follow_redirects=True)\r\n output = json.loads(res.data)\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n category_link = self.hateoas.link(rel='self', title='category',\r\n href='http://localhost/api/category/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert category_link == output['link'], err_msg\r\n err_msg = \"There should be no other links\"\r\n assert output.get('links') is None, err_msg\r\n err_msg = \"The object links should are wrong\"\r\n\r\n # For user\r\n # Pending define what user fields will be visible through the API\r\n # Issue #626. For now let's suppose link and links are not visible\r\n # res = self.app.get(\"/api/user/1?api_key=\" + self.root_api_key, follow_redirects=True)\r\n # output = json.loads(res.data)\r\n # err_msg = \"There should be a Link with the object URI\"\r\n # assert output['link'] is not None, err_msg\r\n # user_link = self.hateoas.link(rel='self', title='user',\r\n # href='http://localhost/api/user/1')\r\n # err_msg = \"The object link ir wrong: %s\" % output['link']\r\n # assert user_link == output['link'], err_msg\r\n # # when the links specification of a user will be set, modify the following\r\n # err_msg = \"The list of links should be empty for now\"\r\n # assert output.get('links') == None, err_msg\r", "def get_links(self, node): # pragma: no cover\n\t\traise NotImplementedError", "def parse_links_from_HTML():\n\n file_content = open(BANK_LIST_HTML_FILE, 'r').read()\n\n # Parsing html files to get list of all anchor tags \n soup = BeautifulSoup(file_content)\n table_content = soup.find('table', class_='tablebg')\n anchor_links = table_content.find_all('a')\n \n abbr_map = load_from_a_file(BANK_NAME_JSON_FILE)\n bank_links, urls_list = {}, {}\n for anchor_link in anchor_links:\n bank_links[str(anchor_link.text)] = anchor_link.get('href')\n for abbr, bank_name in abbr_map.items():\n if bank_name not in bank_links:\n print \"{0} bank from RBI list\".format(bank_name)\n else:\n urls_list[abbr] = bank_links[bank_name]\n dump_to_file(bank_links, BANK_NAME_FILE_URL_JOSN)\n dump_to_file(urls_list, ABBR_BANK_NAME_FILE_URL)", "def _complete_href_links(self, parent_collection, current):\n if isinstance(current, HyperLink) or \\\n (isinstance(current, dict) and \"href\" in current):\n if isinstance(current[\"href\"], (bytes, str)):\n resource = None\n if current[\"href\"] in self._cache:\n resource = self._cache[current[\"href\"]]\n elif current[\"href\"].startswith(\"#\"):\n resource = jsonpointer.resolve_pointer(parent_collection,\n current[\"href\"][1:])\n if not resource:\n resource = \"Unresolved\"\n elif current[\"href\"].startswith(\"$\"):\n path = jsonpath(parent_collection,\n current[\"href\"], result_type=\"PATH\")\n if path:\n resource = eval(\"parent_collection%s\" % path[0].lstrip(\"$\"))\n else:\n resource = \"Unresolved\"\n self._cache[current[\"href\"]] = resource\n if resource and resource != \"Unresolved\":\n if \"selfRef\" not in resource:\n ret = self.set_self_ref(resource)\n if ret < 0:\n return ret\n current[\"href\"] = resource[\"selfRef\"]\n return 0\n elif isinstance(current, list):\n keys = range(len(current))\n elif isinstance(current, dict):\n keys = current.keys()\n else:\n return 0\n \n for key in keys:\n value = current[key]\n if isinstance(value, (NetworkResource, Topology)) and \\\n \"selfRef\" not in value:\n ret = self.set_self_ref(value)\n if ret < 0:\n return ret\n if isinstance(value, list) or isinstance(value, dict):\n ret = self._complete_href_links(parent_collection, value)\n if ret < 0:\n return ret\n return 0", "def external_search_engines_links(search, deep=0, debug=0, links=[]):\n s = Subseek()\n for search_engine in SEARCH_ENGINES:\n for subtitle_search_engine in SUBTITLE_SEARCH_ENGINES:\n if debug == 1:\n print \"Searching '%s' in '%s'\" % (search,\n search_engine['name'])\n links_aux = s.get_links(search_engine, search, deep,\n subtitle_search_engine[\"name\"])\n if not links_aux or len(links_aux) == 0:\n if debug == 1:\n print \"No match found in '%s'\" % search_engine['name']\n else:\n if debug == 1:\n print \"%s matches found in '%s'\" % (len(links_aux),\n search_engine['name'])\n links = links_aux + links\n\n return links", "def test_01_link_object(self):\r\n # For app\r\n res = self.app.get(\"/api/app\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n app_link = self.hateoas.link(rel='self', title='app',\r\n href='http://localhost/api/app/1')\r\n\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert app_link == output['link'], err_msg\r\n\r\n err_msg = \"There should be a Links list with the category URI\"\r\n assert output['links'] is not None, err_msg\r\n assert len(output['links']) == 1, err_msg\r\n app_link = self.hateoas.link(rel='category', title='category',\r\n href='http://localhost/api/category/1')\r\n assert app_link == output['links'][0], err_msg\r\n\r\n # For task\r\n res = self.app.get(\"/api/task\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n task_link = self.hateoas.link(rel='self', title='task',\r\n href='http://localhost/api/task/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert task_link == output['link'], err_msg\r\n err_msg = \"There should be one parent link: app\"\r\n assert output.get('links') is not None, err_msg\r\n assert len(output.get('links')) == 1, err_msg\r\n err_msg = \"The parent link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='app',\r\n href='http://localhost/api/app/1')\r\n assert output.get('links')[0] == app_link, err_msg\r\n\r\n # For taskrun\r\n res = self.app.get(\"/api/taskrun\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n task_link = self.hateoas.link(rel='self', title='taskrun',\r\n href='http://localhost/api/taskrun/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert task_link == output['link'], err_msg\r\n err_msg = \"There should be two parent links: app and task\"\r\n assert output.get('links') is not None, err_msg\r\n assert len(output.get('links')) == 2, err_msg\r\n err_msg = \"The parent app link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='app',\r\n href='http://localhost/api/app/1')\r\n assert output.get('links')[0] == app_link, err_msg\r\n\r\n err_msg = \"The parent task link is wrong\"\r\n app_link = self.hateoas.link(rel='parent', title='task',\r\n href='http://localhost/api/task/1')\r\n assert output.get('links')[1] == app_link, err_msg\r\n\r\n # Check that hateoas removes all link and links from item\r\n without_links = self.hateoas.remove_links(output)\r\n err_msg = \"There should not be any link or links keys\"\r\n assert without_links.get('link') is None, err_msg\r\n assert without_links.get('links') is None, err_msg\r\n\r\n # For category\r\n res = self.app.get(\"/api/category\", follow_redirects=True)\r\n output = json.loads(res.data)[0]\r\n err_msg = \"There should be a Link with the object URI\"\r\n assert output['link'] is not None, err_msg\r\n category_link = self.hateoas.link(rel='self', title='category',\r\n href='http://localhost/api/category/1')\r\n err_msg = \"The object link is wrong: %s\" % output['link']\r\n assert category_link == output['link'], err_msg\r\n err_msg = \"There should be no other links\"\r\n assert output.get('links') is None, err_msg\r\n err_msg = \"The object links should are wrong\"\r\n\r\n # For user\r\n # Pending define what user fields will be visible through the API\r\n # Issue #626. For now let's suppose link and links are not visible\r\n # res = self.app.get(\"/api/user?api_key=\" + self.root_api_key, follow_redirects=True)\r\n # output = json.loads(res.data)[0]\r\n # err_msg = \"There should be a Link with the object URI\"\r\n # assert output['link'] is not None, err_msg\r\n # user_link = self.hateoas.link(rel='self', title='user',\r\n # href='http://localhost/api/user/1')\r\n # err_msg = \"The object link ir wrong: %s\" % output['link']\r\n # assert user_link == output['link'], err_msg\r\n # # when the links specification of a user will be set, modify the following\r\n # err_msg = \"The list of links should be empty for now\"\r\n # assert output.get('links') == None, err_msg\r", "def _parse_links(self, item) -> list:\n # TODO This would be a \"nice to have\" but is not necessary right now.\n return [{\"href\": \"\", \"title\": \"\"}]", "def test_json_format(self):\n response = self.client.get(reverse('search'), {\n 'q': 'bookmarks',\n 'a': '1',\n 'format': 'json',\n }, follow=True)\n eq_(response.status_code, 200)\n eq_(response['Content-Type'], 'application/json')", "def getLink(self):", "def getLinks(link):\n source = requests.get(link).text\n soup = BeautifulSoup(source, 'lxml')\n rows = soup.find_all(class_ = 'column-1') #select which column \n list_of_links = []\n \n for row in rows[1:]: #rows[1:] is used in case first row is a title row (ie there is no useful data here)\n name = row.find('a')\n link = name.attrs['href'] #the data I'm trying to extract\n list_of_links.append(link)\n return list_of_links", "def in_collections(self):\n links = []\n for link in self.link:\n if link.rel == PARENT_LINK_REL and link.href:\n links.append(link)\n return links", "def get_links(self):\r\n return self.links", "def links(self):\n return self.container['links']", "def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links", "def test_read_json1():\n s = JsonSource()\n g = s.parse(os.path.join(RESOURCE_DIR, 'valid.json'))\n nodes = {}\n edges = {}\n for rec in g:\n if rec:\n if len(rec) == 4:\n edges[(rec[0], rec[1])] = rec[3]\n else:\n nodes[rec[0]] = rec[1]\n\n assert len(nodes.keys()) == 6\n assert len(edges.keys()) == 5\n\n n = nodes['MONDO:0017148']\n assert 'id' in n and n['id'] == 'MONDO:0017148'\n assert n['name'] == 'heritable pulmonary arterial hypertension'\n assert n['category'][0] == 'biolink:Disease'\n\n e = edges[('HGNC:11603', 'MONDO:0017148')]\n assert e['subject'] == 'HGNC:11603'\n assert e['object'] == 'MONDO:0017148'\n assert e['predicate'] == 'biolink:related_to'\n assert e['relation'] == 'RO:0004013'", "def get_showreel_item_urls(self):\n\n links = []\n rel_path = \"../\"\n if self.display:\n rel_path = rel_path * 2\n for item in self.showreel_document[\"reels\"]:\n if item[\"item_type\"] == 'dashboard':\n link = \"../%sdisplay/dashboard/%s\" % (rel_path, item[\"title\"])\n links.append(json.dumps(link))\n elif item[\"item_type\"] == 'graph':\n link = \"../%sdisplay/graph/%s\" % (rel_path, item[\"title\"])\n links.append(json.dumps(link))\n\n return links", "def _parse_links(self, response, start):\n links = self.document_date_map[start.date()]\n for link in response.css(\".agenda-min-pres .field a\"):\n link_url = response.urljoin(link.xpath(\"@href\").extract_first())\n title = link.xpath(\"./text()\").extract_first()\n if title.strip().startswith(\"Agenda\"):\n title = \"Agenda\"\n links.append(\n {\"title\": re.sub(r\"\\s+\", \" \", title).strip(), \"href\": link_url}\n )\n return links", "def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError", "def get_all_build_links(url, auth=None, netloc_force=False):\n all_build_links = []\n if 'api/json' not in url:\n # if the api endpoint isnt appended, then append it:\n url += '/api/json/'\n def recurse_to_build(url):\n orig_url = urllib.parse.urlparse(url)\n try:\n json_reply = json.loads(requests.get(url, verify=False, auth=auth).text)\n except JSONDecodeError:\n return\n if 'builds' in json_reply:\n if len(json_reply['builds']) > 0:\n url_link = json_reply['builds'][0]['url']\n if netloc_force:\n url_link = urllib.parse.urlparse(url_link)\n url_link = url_link._replace(netloc=orig_url.netloc)\n url_link = url_link.geturl()\n print(\"{}\".format(url_link))\n all_build_links.append(url_link)\n if 'jobs' in json_reply:\n for job in json_reply['jobs']:\n url_link = job['url'] + 'api/json/'\n if netloc_force:\n url_link = urllib.parse.urlparse(url_link)\n url_link = url_link._replace(netloc=orig_url.netloc)\n url_link = url_link.geturl()\n recurse_to_build(url_link)\n if 'endpoint' in json_reply:\n url_link = json_reply['endpoint'] + 'api/json/'\n if netloc_force:\n url_link = urllib.parse.urlparse(url_link)\n url_link = url_link._replace(netloc=orig_url.netloc)\n url_link = url_link.geturl()\n recurse_to_build(url_link)\n recurse_to_build(url)\n return all_build_links", "def getArticleURLS(base_url, headers):\n \n url_links = []\n for url in base_url:\n try:\n #retrieve webpage from the url\n page = requests.get(url, headers=headers).text\n\n #use beautifulSoup to scrap the page\n soup = BeautifulSoup(page, 'lxml')\n\n links = []\n #loop through the page to collect anchor tags and retrieve the urls\n for a in soup.find_all(href=True):\n links.append(a['href'])\n # titles.append(a.text.encode('ascii',errors='replace').replace(b'?', b' ').decode('utf8'))\n\n #clean collected urls\n final_links = [link for link in links if '/News/' in link]\n clean_links = [link for link in final_links if not 'News/688334-688334' in link]\n clean_urls = ['https://www.monitor.co.ug' + link for link in clean_links if not 'https://www.monitor.co.ug' in link]\n cleaned_links = list(OrderedDict.fromkeys(clean_urls))\n url_links += cleaned_links\n except requests.exceptions.ConnectionError as error:\n return error\n\n #patterns to filter base urls with headlines only\n patterns = ['/News/688324-','/News/National/688334-','/News/Education/688336-',\n '/News/Insight/688338-','/News/World/688340-','/News/photos/3286528-']\n result_list = [row for row in url_links if not any(p in row for p in patterns)]\n\n return json.dumps(result_list)", "def get_links(self, url):\n page_content = self.previous_results['page_content'][url]\n assert 'content' in page_content\n\n if page_content['content'] is None:\n return\n\n result = {\n 'links': [],\n 'exception': None,\n }\n\n soup = BeautifulSoup(page_content['content'], 'html.parser')\n\n for link in soup.find_all(\"a\"):\n result['links'].append({\n 'href': link.get('href'),\n 'text': link.text.strip(),\n })\n\n return result", "def find_details_json(self, url):\n response = self.get_response(url)\n if response:\n html_soup = BeautifulSoup(response.text, 'html.parser')\n listings_json = html_soup.find('script', id='__NEXT_DATA__')\n if listings_json:\n listings_json = str(listings_json)\n listings_json = listings_json.replace(\"<script id=\\\"__NEXT_DATA__\\\" type=\\\"application/json\\\">\", \"\").replace(\"</script>\", \"\")\n listings = json.loads(listings_json)\n return listings\n else:\n skip_scraper(self.college, 'Trulia')", "def _parse_links(self, response):\n links = []\n for link in response.css(\".row.mt-4 .list-unstyled a\"):\n links.append(\n {\n \"title\": \" \".join(link.css(\"*::text\").extract()).strip(),\n \"href\": response.urljoin(link.attrib[\"href\"]),\n }\n )\n return links", "def test_read_json2():\n s = JsonSource()\n g = s.parse(os.path.join(RESOURCE_DIR, 'valid.json'), provided_by='Test JSON')\n nodes = {}\n edges = {}\n for rec in g:\n if rec:\n if len(rec) == 4:\n edges[(rec[0], rec[1])] = rec[3]\n else:\n nodes[rec[0]] = rec[1]\n\n assert len(nodes.keys()) == 6\n assert len(edges.keys()) == 5\n\n n = nodes['MONDO:0017148']\n assert 'id' in n and n['id'] == 'MONDO:0017148'\n assert n['name'] == 'heritable pulmonary arterial hypertension'\n assert n['category'][0] == 'biolink:Disease'\n assert 'Test JSON' in n['provided_by']\n\n e = edges[('HGNC:11603', 'MONDO:0017148')]\n assert e['subject'] == 'HGNC:11603'\n assert e['object'] == 'MONDO:0017148'\n assert e['predicate'] == 'biolink:related_to'\n assert e['relation'] == 'RO:0004013'\n assert 'Test JSON' in e['provided_by']", "def json_externals(json):\n return set([ x[\"name\"] for x in json.get(\"externals\",[]) ])", "def _get_img_urls(self, jdict):\n\n # find photos inside the JSON file\n pics = jdict['props']['homeDetails']['media']['photos']\n urls = [pic['url']['mediumSrc'] for pic in pics]\n return urls", "def read(data):\n return Link(**data)", "def references(md5):\n u = Upload.objects.filter(md5=md5).first()\n if not u:\n abort(404)\n # first, is this searchable?\n is_searchable = False\n count = elastic.count('page', filter={'md5': md5})\n if count > 0:\n is_searchable = True\n #annotations = Reference.objects.filter(upload=u, ref_url__exists=True)\n annotations = Reference.objects.filter(upload=u).order_by('ref_pos')\n # create a list of referenced things\n references = {'references':[], 'searchable': is_searchable}\n for a in annotations:\n try:\n references['references'].append({\n 'pos_x': a.pos_x, \n 'pos': a.pos, \n 'ref': a.ref_upload.md5, \n 'ref_pos': a.ref_pos\n })\n except:\n pass\n return jsonify(references)", "def _find_impl(url, query, count, auto_complete):\n try:\n res = requests.get(\n url,\n params={\"q\": query, \"count\": count, \"autoCorrect\": (\"true\" if auto_complete else \"false\")},\n )\n except (requests.ConnectionError, requests.ConnectTimeout):\n return \"`connection error`\"\n\n try:\n data = json.loads(res.content.decode(\"utf-8\"))\n except ValueError:\n return \"`no valid json`\"\n #print(data)\n\n if not data.get(\"value\"):\n return \"Nix\"\n\n return [v[\"url\"] for v in data[\"value\"]]", "def findLinksByText(page, searchRe):\n urls = []\n page = parseHtmlLinks(page)\n for linkUrl, linkText in page['links'].iteritems():\n dbgStr = 'Checking linkText %s (url %s) against %s' % (repr(unidecode.unidecode(linkText)), linkUrl, searchRe.pattern)\n logging.log(5, dbgStr)\n if searchRe.match(linkText):\n urls.append(linkUrl)\n logging.debug('Found link: %s -> %s' % (linkText, linkUrl))\n\n logging.debug('Found links with %s in label: %s' % (repr(searchRe.pattern), urls))\n return urls", "def schema_links(section, sec_key=None):\n NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys\n links = section.links\n if section.data:\n data = section.data.items()\n for sub_section_key, sub_section in data:\n new_links = schema_links(sub_section, sec_key=sub_section_key)\n links.update(new_links)\n\n if sec_key is not None:\n new_links = OrderedDict()\n for link_key, link in links.items():\n new_key = NESTED_FORMAT % (sec_key, link_key)\n new_links.update({new_key: link})\n return new_links\n\n return links", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def get_external_links(parsed_drug_doc):\n\n external_link_info = list(parsed_drug_doc.find(id='external-links').next_sibling.dl.children)\n external_links = {}\n for i in range(0, len(external_link_info), 2):\n source = external_link_info[i].text\n value = external_link_info[i+1].text\n # Ignoring a few sources for this MVP that don't give obvious alternate IDs.\n if source not in [\"RxList\", \"Drugs.com\", \"PDRhealth\"]:\n external_links[source] = value\n\n return external_links", "def links(self):\n return self.dom.findall(\".//a\")", "def parse_urls(data):\n testing = [0] * len(data[\"Commit_URL\"])\n build = [0] * len(data[\"Commit_URL\"])\n maintenance = [0] * len(data[\"Commit_URL\"])\n for ii in range(len(data[\"Commit_URL\"])):\n try:\n html = urlopen(data[\"Commit_URL\"].iloc[ii])\n bsObj = BeautifulSoup(html, \"html.parser\")\n paths = bsObj.findAll(\"a\", {\"href\": re.compile(r\"#diff-[a-z0-9]+\")})\n for path in paths:\n if len(path.attrs) == 1:\n if re.match(r\".*(build|pom).*\", str(path)):\n build[ii] = 1\n if re.match(r\".*(test|tests|tester).*\", str(path)):\n testing[ii] = 1\n if re.match(r\".*(u|U)til.*\", str(path)) or re.match(r\".*(h|H)elper.*\", str(path)):\n maintenance[ii] = 1\n except HTTPError as e:\n print(data[\"Commit_ID\"].iloc[ii])\n except URLError as e:\n print(\"The server could not be found!\")\n data[\"Testing\"] = testing\n data[\"Build\"] = build\n data[\"Maintenance\"] = maintenance\n return data", "def getLinks(content):\n soup = BeautifulSoup(content, 'lxml')\n links = set([link.get('href') for link in soup.find_all('a')])\n return links", "def get_links(self, data):\n\n count = 0\n for link in data.xpath(\"//*[@href]\"):\n cleaned = re.sub(\"\\?.*\", \"\", link.attrib[\"href\"])\n cleaned = urljoin(self.base_url, cleaned)\n if re.match(self.link_prefix, cleaned, flags = re.I) and \\\n cleaned not in self.link_queue + self.links:\n self.link_queue.append(cleaned)\n self.logger.debug(\"Adding link %s\" % cleaned)\n count += 1\n return count", "def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classname): ', final_link)\n scrape_data(final_link, final_tags, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)", "def getExpandedLinks():", "def get_link_data_from_soup(soup):\n\n link_data = []\n if soup == None:\n return [None, None]\n link = soup.get('href')\n text = soup.text\n if link is not None:\n link = link.strip()\n if text is not None:\n text = text.strip() \n link_data.append(link)\n link_data.append(text)\n return link_data", "def deep_link_scraping(final_links, driver):\n\n import re\n second_links = [] \n for website2 in final_links:\n links2 = extract_all_links(website2, driver)\n final_links1 = find_usefull_links(links2, classmodel, class_count_vect)\n final_links2 = list(set(final_links1) - set(final_links))\n second_links += final_links2\n\n \n second_links = list(dict.fromkeys(second_links))\n second_links1 = find_usefull_links(second_links, classmodel, class_count_vect)\n second_links2 = []\n for link in second_links1:\n if re.search('#', link):\n x = re.search('#', link)\n link = link[:int(x.span()[0])]\n second_links2.append(link)\n else:\n second_links2.append(link)\n\n second_links2 = list(dict.fromkeys(second_links2))\n for final_link in second_links2:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n scrape_data(final_link, final_tags, driver)\n else:\n scrape_data_tag(final_link, driver)\n else:\n scrape_data_tag(final_link, driver)\n return second_links2", "def search(self, links=False):\n if self.type == \"text\":\n mg = Manager()\n ret = mg.dict()\n jobs = []\n p1 = Process(target=self.google_proc, args=(ret,))\n jobs.append(p1)\n p2 = Process(target=self.yahoo_proc, args=(ret,))\n jobs.append(p2)\n p3 = Process(target=self.bing_proc, args=(ret,))\n jobs.append(p3)\n p1.start()\n p2.start()\n p3.start()\n\n for proc in jobs:\n proc.join()\n\n temp = ret.values()[0] + ret.values()[1] + ret.values()[2]\n print temp\n for i in temp:\n f = 0\n for j in self.uniquelinks:\n if i[1] == j[1]:\n f = 1\n if f == 0:\n self.uniquelinks.append(i)\n if links:\n return self.uniquelinks\n else: # [[title, link, data], [title, link, data] ...]\n mg = Manager()\n ret = mg.dict()\n jobs = []\n n = 0\n for li in self.uniquelinks[0:3]:\n p = Process(target=self.data_collector, args=(n, li[1], ret))\n n += 1\n jobs.append(p)\n p.start()\n\n for proc in jobs:\n proc.join()\n print ret.values()\n print len(ret.values())", "def test_finder_detects_latest_find_links(data: TestData) -> None:\n req = install_req_from_line(\"simple\")\n finder = make_test_finder(find_links=[data.find_links])\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"simple-3.0.tar.gz\")", "def test_get_link_correct_return_type_dict(self):\n\n result = parse_topo.get_link(kytos_link=helper._kytos_link, oxp_url=helper._oxp_url)\n\n assert result.__class__ == dict", "def brief_json(self, absolutize_url):\n return {\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name\n }", "def test_annotation_api_url_links(_, factories):\n annotation = factories.Annotation()\n annotation_api_url = mock.Mock()\n\n feed = atom.feed_from_annotations(\n [annotation], \"atom_url\", mock.Mock(), annotation_api_url=annotation_api_url\n )\n\n annotation_api_url.assert_called_once_with(annotation)\n assert feed[\"entries\"][0][\"links\"][1] == {\n \"rel\": \"alternate\",\n \"type\": \"application/json\",\n \"href\": annotation_api_url.return_value,\n }", "def find_active_links(lat, lon, place, name):\n\tWIKIPEDIA_BASE = 'https://wikipedia.org/wiki/Special:Search/'\n\tlinks = {}\n\tlinks[\"wikipediaUrl\"] = WIKIPEDIA_BASE + name\n\n\ttry:\n\t\tfsqReturn = find_foursquare_url(lat, lon, name)\n\t\tfoursquareVenueId = fsqReturn['venueId']\n\t\tfoursquareUrl = fsqReturn['4sqUrl']\n\t\twebsite = fsqReturn['url']\n\t\tdisplayMetadata = fsqReturn['metadata']\n\n\t\tif foursquareUrl is not None:\n\t\t\tlinks['foursquare'] = {\"foursquareUrl\" : foursquareUrl,\n\t\t\t\t\"foursquareVenueId\" : foursquareVenueId}\n\n\t\tif website is not None:\n\t\t\tlinks['url'] = website\n\n\t\tif displayMetadata is not None:\n\t\t\tlinks['displayMetadata'] = displayMetadata\n\n\texcept:\n\t\tprint \"foursquare failed\"\n\n\ttry:\n\t\topenTableUrl = find_open_table_url(place)\n\t\tif openTableUrl is not None:\n\t\t\tlinks['openTableUrl'] = openTableUrl\n\n\texcept: \n\t\tprint \"opentable failed\"\n\n\treturn links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def getURLs():", "def get_titles_and_links(list_of_urls, main_url):\n target = {url: {} for url in list_of_urls}\n for url in list_of_urls:\n content = urlopen(url).read()\n\n soup = BeautifulSoup(content, \"html.parser\")\n\n title = soup.title.string\n if title:\n target[url] = {'title': title}\n else:\n target[url] = {'title': 'No links here'}\n\n # I have to make list because .json file can't recognise sets and json.dump() doesn't work.\n\n links = list({urljoin(url, link.get('href')) for link in soup.find_all('a') if main_url in urljoin(url, link.get('href'))})\n\n if not links:\n target[url]['links'] = 'set()'\n else:\n target[url]['links'] = links\n\n return target", "def show_refs(config, args):\n for item in lib.input_json_lines():\n yield config.repo.ref(item)", "def get_urls(links):\n\n temp_list=[]\n url_list=[]\n temp_list2=[]\n #Open the file where the url's are saved and copy the tuple values into an empty list\n z=open('dbdocs.txt','r')\n for line in z:\n temp_list.append(line)\n #print temp_list\n for x in temp_list:\n index=x.find(',')\n if index==-1:\n y=x.split(\" \",1)\n key=int(y[0])\n val=str(x[1]).replace('\\n','')\n url_list.append((key,val))\n else:\n #find the tab seperator between the key and the url, and\n #split them, in order to put in a list\n key=x[0:index-1]\n #print key\n value=str(x[index+3:len(x)-1])\n #print value\n temp_list2.append((int(key),value))\n #Find the url's of the links where the word was found\n for k in links:\n for i,j in temp_list2:\n #print j\n if i==k:\n url_list.append((i,j))\n break\n #print len(url_list)\n #print len(links)\n z.close()\n return url_list", "def _get_links(self):\n with open(self.source, \"r\", encoding=\"utf-8\") as link_doc:\n return link_doc.readlines()", "def getLinksToPhonesPerBrands(url):\n urls = {}\n print(\"brand link being scrapped : \", url)\n try:\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.content, \"html.parser\")\n li = sourceCode.select('#review-body div > ul > li > a')\n for link in li:\n title = link.get_text()\n url = processUrl(link['href'])\n if title not in urls.keys():\n urls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return urls", "def get_links(self):\n soup = BeautifulSoup(requests.get(self.locations_url).text.strip(), features=\"lxml\")\n for region in soup.select('td[class=\"navbox-list navbox-odd\"]'):\n self.links.extend(region.div.find_all('a'))\n\n soup_prague = BeautifulSoup(requests.get(self.url_prague).text.strip(), features=\"lxml\")\n table_prague = soup_prague.findAll('table', {\"class\": \"wikitable\"})[3]\n for prague_parts in table_prague.select(\"tr > td:nth-child(3)\"):\n self.links.extend(prague_parts.find_all('a'))\n\n self.links = [self.url + i['href'] for i in self.links]\n self.links.append(self.url_prague)\n return None", "def search_urls():\n r = req('GET', SUB_API + 'search/urls', params=apply_search_filters())\n urls = []\n for url in demisto.get(r.json(), 'data.items'):\n urls.append({\n 'Result': demisto.get(url, 'result'),\n 'Details': demisto.get(url, 'details')\n })\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.URLs': urls},\n 'HumanReadable': tableToMarkdown('ThreatGrid - URL Search', urls, ['Result', 'Details']),\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def getLinkstoBrands(url):\n brandUrls = {}\n try:\n print(\"Maker link being crawled : \", url)\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.text, \"html.parser\")\n for td in sourceCode.findAll('td'):\n link = td.find('a', href=True)\n title = td.get_text()\n url = processUrl(link['href'])\n if title not in brandUrls.keys():\n brandUrls[title] = url\n print(title, ' ', url)\n else:\n print('no table or row found ')\n except requests.HTTPError as e:\n print('Unable to open url', e)\n return brandUrls", "async def json_selector(self, url: Url, alias: str,\n json_key: Dict, config_id: int = 0):\n await self.responses[url]\n json_out = loads(self.responses[url])\n\n if isinstance(json_key['filter'], str):\n json_key['filter'] = [json_key['filter']]\n results = json_out\n for fltr in json_key['filter']:\n results = results[fltr]\n\n news_dump = NewsDump(config_id, url, alias)\n\n for story in results:\n story_dict = StoryDict()\n for k, val in json_key['attribute'].items():\n if k in ('H0', 'H1', 'H2'):\n new_val = story[val].encode(\n 'ascii', errors='ignore').decode('utf-8')\n else:\n new_val = story[val]\n story_dict.update(**{k: new_val})\n news_dump.add_story(config_id, **story_dict)" ]
[ "0.64437044", "0.6328608", "0.6288369", "0.62582314", "0.5989727", "0.5896046", "0.58893055", "0.58317786", "0.58317786", "0.5802547", "0.5788355", "0.57476854", "0.5663333", "0.5635389", "0.5627574", "0.5625349", "0.56044173", "0.5590741", "0.55799675", "0.5561591", "0.5554465", "0.5546976", "0.55460906", "0.5545992", "0.5532719", "0.55235773", "0.55177546", "0.551553", "0.54738", "0.54729325", "0.54529375", "0.5445355", "0.5444845", "0.5444845", "0.5415596", "0.5415006", "0.540251", "0.5399804", "0.538643", "0.5385985", "0.537527", "0.5368712", "0.5366991", "0.5361116", "0.5354153", "0.5347412", "0.53428566", "0.5342358", "0.53342724", "0.53214806", "0.53198117", "0.52830887", "0.5279047", "0.5266355", "0.526428", "0.52588797", "0.52465254", "0.5241545", "0.52411216", "0.52339107", "0.5232173", "0.5230028", "0.5229356", "0.52185965", "0.52182096", "0.5216245", "0.5208036", "0.519971", "0.5191663", "0.51890326", "0.5183591", "0.51821357", "0.51691526", "0.5166064", "0.51594007", "0.51569545", "0.51560986", "0.51430506", "0.5137079", "0.5133127", "0.512728", "0.5121356", "0.5119623", "0.5118376", "0.51114786", "0.51090926", "0.51090926", "0.51090926", "0.51090926", "0.51090926", "0.51004565", "0.5098428", "0.509322", "0.5093021", "0.5080927", "0.507522", "0.5074364", "0.50726503", "0.5070578", "0.5067554" ]
0.66384387
0
Load the correct backend driver for data persistent.
def _load_driver(backend, **kargs): bk_module = importlib.import_module('backend', __package__) driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend') return driver_cls(**kargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_backend(cls) -> IBackend:\n cls.Lock.acquire()\n try:\n return cls._load_backend()\n finally:\n cls.Lock.release()", "def set_backend(self, backend):\n if backend not in AVAILABLE_BACKENDS:\n raise StorageError(f'Unrecognized backend {backend}; use one of {AVAILABLE_BACKENDS}')\n if backend == 'tinydb':\n LOGGER.debug(\"Using TinyDB database as requested for %s\", self.name)\n self._backend = DB_TINYDB\n elif backend == 'sqlite':\n LOGGER.debug(\"Using SQLite database as requested for %s\", self.name)\n self._backend = DB_SQLITE\n elif backend == 'auto':\n if self._sqlite_storage.database_exists():\n LOGGER.debug(\"Using SQLite database in AUTO mode because one already exists for %s\", self.name)\n self._backend = DB_SQLITE\n else:\n LOGGER.debug(\"Using TinyDB (default) in AUTO because no database already exists for %s\", self.name)\n self._backend = DB_TINYDB", "def _load_driver_module(self):\n driver = get_dbapi_module(self.driver_module)\n exceptions.register(driver.DatabaseError)\n return driver", "def get_storage_backend(self):\n return self.client.info()['Driver']", "def _switch_backend(self, model_db):\n if model_db['backend_name'] != self.backend_name:\n backend = switch_backend(model_db['backend_name'])\n self.backend_name = backend.__name__\n self.backend_version = None\n if self.backend_name == 'keras':\n from ..backend import keras_backend\n self.backend = keras_backend\n elif self.backend_name == 'sklearn':\n from ..backend import sklearn_backend\n self.backend = sklearn_backend\n if hasattr(backend, '__version__'):\n check = self.backend_version != backend.__version__\n self.backend_version = backend.__version__\n if check and self.verbose > 0: # pragma: no cover\n sys.stderr.write('Warning: the backend versions'\n 'do not match.\\n') # pragma: no cover", "def load_backend(self):\r\n if self.current_presentation():\r\n presentation = self.current_presentation()\r\n\r\n # If current presentation is no existant (empty talk database)\r\n # use a default recording name.\r\n else:\r\n presentation = Presentation(title=unicode(\"default\"))\r\n\r\n initialized, self.recently_recorded_video = self.controller.load_backend(presentation)\r\n if initialized:\r\n return True\r\n else:\r\n return False # Error something failed while loading the backend\r", "def load_backend(backend: str | Type[Backend]) -> Type[Backend]:\n if isinstance(backend, type) and issubclass(backend, Backend):\n return backend\n elif isinstance(backend, str):\n try:\n backend = BUILTIN_BACKENDS[backend]\n except KeyError:\n raise ValueError(f'No such backend \"{backend}\"')\n p, m = backend.rsplit('.', 1)\n mod = importlib.import_module(p)\n attr = getattr(mod, m)\n if isinstance(attr, type) and issubclass(attr, Backend):\n return attr\n else:\n raise TypeError('Backend must be subclass of Backend class.')\n else:\n raise ValueError('Expecting string or Backend subclass.')", "def get_backend(self):\n return self.analyze_db_task(constants.TRAIN_DB).backend", "def driver_load(self, name):\r\n return AbstractServiceManager.service_load(self, name)", "def _getDriver(self):\n if not hasattr(self, '_driver'):\n with self._getDatasetLock:\n if not self.dataset or not self.dataset.GetDriver():\n self._driver = None\n else:\n self._driver = self.dataset.GetDriver().ShortName\n return self._driver", "def load_backend(name, options=None):\n if name is None:\n assert options is None\n return get_default()\n if options is None:\n options = {}\n if name not in _backends:\n raise UnknownBackend(name)\n try:\n res = _backends[name]()(**options)\n except Exception as e:\n raise LoadingError(name) from e\n return res", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']", "def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend", "def load(self):\n with self.__lock:\n self._d.update(self.backend.load())\n log.debug(\"load: {}\".format(self.backend.filename))", "def driver(self) -> GraphDatabase.driver:\n raise NotImplementedError\n # if not self._driver:\n # self._driver = GraphDatabase.driver(\n # self.url,\n # auth=(self.username, self.password),\n # )\n #\n # return self._driver", "def get_backend():\n return Connection()", "def _load_backend(backend: str) -> types.ModuleType:\n from importlib.metadata import entry_points\n\n if backend == \"matplotlib\":\n # Because matplotlib is an optional dependency and first-party backend,\n # we need to attempt an import here to raise an ImportError if needed.\n try:\n module = importlib.import_module(\"pandas.plotting._matplotlib\")\n except ImportError:\n raise ImportError(\n \"matplotlib is required for plotting when the \"\n 'default backend \"matplotlib\" is selected.'\n ) from None\n return module\n\n found_backend = False\n\n eps = entry_points()\n key = \"pandas_plotting_backends\"\n # entry_points lost dict API ~ PY 3.10\n # https://github.com/python/importlib_metadata/issues/298\n if hasattr(eps, \"select\"):\n entry = eps.select(group=key) # pyright: ignore[reportGeneralTypeIssues]\n else:\n # Argument 2 to \"get\" of \"dict\" has incompatible type \"Tuple[]\";\n # expected \"EntryPoints\" [arg-type]\n entry = eps.get(key, ()) # type: ignore[arg-type]\n for entry_point in entry:\n found_backend = entry_point.name == backend\n if found_backend:\n module = entry_point.load()\n break\n\n if not found_backend:\n # Fall back to unregistered, module name approach.\n try:\n module = importlib.import_module(backend)\n found_backend = True\n except ImportError:\n # We re-raise later on.\n pass\n\n if found_backend:\n if hasattr(module, \"plot\"):\n # Validate that the interface is implemented when the option is set,\n # rather than at plot time.\n return module\n\n raise ValueError(\n f\"Could not find plotting backend '{backend}'. Ensure that you've \"\n f\"installed the package providing the '{backend}' entrypoint, or that \"\n \"the package has a top-level `.plot` method.\"\n )", "def _init_driver(self, shard_id):\n shard = self._shards_ctrl.get(shard_id, detailed=True)\n conf = utils.dynamic_conf(shard['uri'], shard['options'])\n return utils.load_storage_driver(conf, self._cache)", "def load_backend(backend_name):\n try:\n module_bits = backend_name.split(\".\")\n klass = module_bits.pop()\n return getattr(import_module(\".\".join(module_bits)), klass)\n except ImportError as e_user:\n # The nlp backend wasn't found. Display a helpful error message\n # listing all built-in nlp backends.\n backend_dir = str(Path(__file__).parent / 'backends')\n available_backends = [\n name for _, name, ispkg in pkgutil.iter_modules([backend_dir])\n if ispkg and name not in {'base'}\n ]\n if backend_name not in [\n 'poetaster.nlp.backends.%s' % b for b in available_backends\n ]:\n backend_reprs = map(repr, sorted(available_backends))\n raise ImproperlyConfigured(\n \"%r isn't an available nlp backend.\\n\"\n \"Try using 'poetaster.nlp.backends.X', where X is one of:\\n\"\n \" %s\" % (backend_name, \", \".join(backend_reprs))\n ) from e_user\n else:\n # If there's some other error, this must be an error in Django\n raise", "def get_backend_from_coredata(builddir: Path) -> str:\n return coredata.load(str(builddir)).get_builtin_option('backend')", "def get_backend(\n self,\n backend_id: str,\n ) -> Optional[Type[BaseCertificateStorageBackend]]:\n return self.get('backend_id', backend_id)", "def backends():\n return list(loader.backend_dict.keys())\n # return loader._preference", "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def load_backend(self, presentation=None):\r\n initialized, filename_for_frontend = self.media.load_backend(presentation)\r\n if initialized:\r\n return True, filename_for_frontend\r\n else:\r\n return False # Error something failed while loading the backend\r", "def get_backend():\n return _BACKEND", "def _get_storage_backend(fq_classname):\n LOG.debug('Running _get_storage_backend with fq_classname [%s]'\n % fq_classname)\n\n if not fq_classname:\n return None\n\n (modname, clname) = fq_classname.rsplit('.', 1)\n # A test import of the backend storage class should have been undertaken\n # at app startup in django_drf_filepond.apps.ready so any failure\n # importing the backend should have been picked up then.\n mod = importlib.import_module(modname)\n storage_backend = getattr(mod, clname)()\n LOG.info('Storage backend instance [%s] created...' % fq_classname)\n\n return storage_backend", "def backend_name(self) -> str:\n return self._db_data.backend", "def which_backend(self, backend_name, type_name, conf):\n print(\"backend_name is : <{}>\".format(backend_name))\n if backend_name not in self.records.keys():\n print(\"first get object\")\n self.port_obj = PortFactory.backends.get(backend_name)(type_name, conf)\n print(\"get object from factory : {}\".format(self.port_obj))\n self.records[backend_name] = [type_name]\n else:\n print(\"re-init get object\")\n self.port_obj.reinit(type_name,conf)\n self.records[backend_name].append(type_name)\n print(\"factory records: {}\".format(self.records))\n return self.port_obj", "def init_backend(self, *args, **kwargs):\n super(GeppettoBackend, self).init_backend(*args, **kwargs)", "def load_device():", "def _backend(self) -> Backend:\n return self.__backend", "def access_db(self):\n try:\n driver = GraphDatabase.driver(self.url, auth=(self.username, self.password))\n except Exception:\n raise ConnectionError\n return driver", "def _configure_backend(settings: Settings):\n if not settings.main.backend and (not settings.inventory.inventory_class or not settings.adapters.sot_class):\n raise ConfigLoadFatalError(\n \"You must define a valid backend or assign inventory.inventory_class and adapters.sot_class manually.\"\n )\n\n if not settings.main.backend:\n return settings\n\n supported_backends = DEFAULT_BACKENDS.keys()\n if settings.main.backend not in supported_backends:\n raise ConfigLoadFatalError(f\"backend value one of : {', '.join(supported_backends)}\")\n\n if not settings.inventory.inventory_class:\n settings.inventory.inventory_class = DEFAULT_BACKENDS[settings.main.backend][\"inventory\"]\n\n if not settings.adapters.sot_class:\n settings.adapters.sot_class = DEFAULT_BACKENDS[settings.main.backend][\"adapter\"]\n\n return settings", "def backend(self):\n # This never changes (so no read locking needed).\n return self._backend", "def get_backend_class(backend):\n # NOTE(sirp): avoiding circular import\n from glance.store.http import HTTPBackend\n from glance.store.s3 import S3Backend\n from glance.store.swift import SwiftBackend\n from glance.store.filesystem import FilesystemBackend\n\n BACKENDS = {\n \"file\": FilesystemBackend,\n \"http\": HTTPBackend,\n \"https\": HTTPBackend,\n \"swift\": SwiftBackend,\n \"s3\": S3Backend}\n\n try:\n return BACKENDS[backend]\n except KeyError:\n raise UnsupportedBackend(\"No backend found for '%s'\" % backend)", "def set_backend(self, backend):\n self.backend = backend", "def detect_backend():\n try:\n from termpixels.unix import UnixBackend\n return UnixBackend()\n except:\n try:\n from termpixels.win32_vt import Win32VtBackend\n return Win32VtBackend()\n except Exception as e:\n raise e\n from termpixels.win32 import Win32Backend\n return Win32Backend()", "def _get_driver():\n return etcd_driver.get_driver()", "def get_default():\n backend, opts = parse_default()\n assert backend is not None\n return load_backend(backend, opts)", "def defaultDriver(self):\n return Enums.SQLite3", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def get_datastore(self, datastore_cls: Type[BaseDatasetLoader], dataset_conf: Dict) -> BaseDatasetLoader:\n pass", "def set_backend(*backend):\n global _BACKEND\n if not backend:\n raise ValueError('Need at least one backend.')\n _BACKEND = backend", "def _init_driver(self, pool_id, pool_conf=None):\n if pool_id is not None:\n pool = self._pools_ctrl.get(pool_id, detailed=True)\n else:\n pool = pool_conf\n conf = utils.dynamic_conf(pool['uri'], pool['options'],\n conf=self._conf)\n storage = utils.load_storage_driver(conf,\n self._cache,\n control_driver=self.control)\n return pipeline.DataDriver(conf, storage, self.control)", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'compras':\n return 'db2'\n return None", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n\n return sys.modules[__name__]", "def get_default_backend():\n return __default_backend", "def _get_active_backend(\n prefer=default_parallel_config[\"prefer\"],\n require=default_parallel_config[\"require\"],\n verbose=default_parallel_config[\"verbose\"],\n):\n\n backend_config = getattr(_backend, \"config\", default_parallel_config)\n\n backend = _get_config_param(\n default_parallel_config['backend'], backend_config, \"backend\"\n )\n prefer = _get_config_param(prefer, backend_config, \"prefer\")\n require = _get_config_param(require, backend_config, \"require\")\n verbose = _get_config_param(verbose, backend_config, \"verbose\")\n\n if prefer not in VALID_BACKEND_HINTS:\n raise ValueError(\n f\"prefer={prefer} is not a valid backend hint, \"\n f\"expected one of {VALID_BACKEND_HINTS}\"\n )\n if require not in VALID_BACKEND_CONSTRAINTS:\n raise ValueError(\n f\"require={require} is not a valid backend constraint, \"\n f\"expected one of {VALID_BACKEND_CONSTRAINTS}\"\n )\n if prefer == 'processes' and require == 'sharedmem':\n raise ValueError(\n \"prefer == 'processes' and require == 'sharedmem'\"\n \" are inconsistent settings\"\n )\n\n explicit_backend = True\n if backend is None:\n\n # We are either outside of the scope of any parallel_(config/backend)\n # context manager or the context manager did not set a backend.\n # create the default backend instance now.\n backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)\n explicit_backend = False\n\n # Try to use the backend set by the user with the context manager.\n\n nesting_level = backend.nesting_level\n uses_threads = getattr(backend, 'uses_threads', False)\n supports_sharedmem = getattr(backend, 'supports_sharedmem', False)\n # Force to use thread-based backend if the provided backend does not\n # match the shared memory constraint or if the backend is not explicitely\n # given and threads are prefered.\n force_threads = (require == 'sharedmem' and not supports_sharedmem)\n force_threads |= (\n not explicit_backend and prefer == 'threads' and not uses_threads\n )\n if force_threads:\n # This backend does not match the shared memory constraint:\n # fallback to the default thead-based backend.\n sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](\n nesting_level=nesting_level\n )\n # Warn the user if we forced the backend to thread-based, while the\n # user explicitely specified a non-thread-based backend.\n if verbose >= 10 and explicit_backend:\n print(\n f\"Using {sharedmem_backend.__class__.__name__} as \"\n f\"joblib backend instead of {backend.__class__.__name__} \"\n \"as the latter does not provide shared memory semantics.\"\n )\n # Force to n_jobs=1 by default\n thread_config = backend_config.copy()\n thread_config['n_jobs'] = 1\n return sharedmem_backend, thread_config\n\n return backend, backend_config", "def _get_plot_backend(backend: str | None = None):\n backend_str: str = backend or get_option(\"plotting.backend\")\n\n if backend_str in _backends:\n return _backends[backend_str]\n\n module = _load_backend(backend_str)\n _backends[backend_str] = module\n return module", "def register_backend(name, load_fn):\n assert name not in _backends\n _backends[name] = load_fn", "def ensure_backend(resource, backend, backends, opt, managed=True):\n existing_mount = find_backend(resource.mount, backends)\n if not existing_mount:\n new_mount = backend(resource, opt, managed=managed)\n backends.append(new_mount)\n return new_mount\n\n return existing_mount", "def get_driver(self, pool_id, pool_conf=None):\n\n try:\n return self._drivers[pool_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[pool_id] = self._init_driver(pool_id, pool_conf)\n\n return self._drivers[pool_id]", "def fake_backend_init(obj, *args, **kwargs):\n\n from armstrong.apps.embeds.backends import get_backend\n super(Backend, obj).__init__(*args, **kwargs)\n\n # patching this part\n obj._backend = get_backend(\n 'armstrong.apps.embeds.backends.default.DefaultBackend')\n obj._setup_backend_proxy_methods()", "def get_instance():\n \"\"\"Add more judgement for selecting more database backend\"\"\"\n return IMPL", "def _instantiate_graph_db(self):\n graph_db_name = self.conf_manager.get_graph_db()\n plugin_parameters = [self.conf_manager]\n self.graph_db = self._load_plugins([graph_db_name],\n common.GRAPH_PACKAGE,\n paths.GRAPH_DB_DIR,\n plugin_parameters)[0]", "def find_backend(path, backends):\n for backend in backends:\n if backend.path == path:\n return backend\n\n return None", "def find_backend(path, backends):\n for backend in backends:\n if backend.path == path:\n return backend\n\n return None", "def get_driver(self):\n\t\treturn self.driver", "def _get_infrastructure_engine():\n\n LOG.debug(\"Infrastructure engine {engine} is loading\".format(\n engine=CONF.infrastructure_engine))\n\n return _load_driver('sahara.infrastructure.engine',\n CONF.infrastructure_engine)", "def get_backend():\n return __SETTINGS__._BACKEND", "def _load_global_backends(pytest_config: pytest.Config) -> Dict[str, Any]:\n backend_settings = {}\n\n backends = [\"http\", \"mqtt\"]\n for b in backends:\n backend_settings[b] = get_option_generic(\n pytest_config, \"tavern-{}-backend\".format(b), None\n )\n\n return backend_settings", "def get_driver(self, shard_id):\n\n try:\n return self._drivers[shard_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[shard_id] = self._init_driver(shard_id)\n\n return self._drivers[shard_id]", "def _import_dbo_driver(self, dbo_driver):\n\n return importlib.import_module(dbo_driver)", "def _load_global_backends(pytest_config):\n backend_settings = {}\n\n backends = [\"http\", \"mqtt\"]\n for b in backends:\n # similar logic to above - use ini, then cmdline if present\n ini_opt = pytest_config.getini(\"tavern-{}-backend\".format(b))\n cli_opt = pytest_config.getoption(\"tavern_{}_backend\".format(b))\n\n in_use = ini_opt\n if cli_opt and (cli_opt != ini_opt):\n in_use = cli_opt\n\n backend_settings[b] = in_use\n\n return backend_settings", "def driver_from_file(input_file):\n file_ext = os.path.splitext(input_file)[1].split(\".\")[1]\n try:\n driver = _file_ext_to_driver()[file_ext]\n except KeyError:\n raise errors.MapcheteDriverError(\n \"no driver could be found for file extension %s\" % file_ext)\n if len(driver) == 1:\n return driver[0]\n else:\n raise errors.MapcheteDriverError(\n \"error determining read driver from file %s\" % input_file)", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_TYPE + driver_name\n return self.drivers[driver]", "def storage_backend_get_by_name(context, name, inactive=False):\n return _find_storage_backend(context, dict(name = name), True, None, inactive=inactive)", "def set_default_backend(new_default_backend):\n global __default_backend\n assert new_default_backend in __SUPPORTED_BACKENDS, (\n \"Backend %s is not supported\" % new_default_backend\n )\n __default_backend = new_default_backend", "def get_profile_backend(self, profile):\n return self._get_attribute(profile, 'backend')", "def test_default_backend_used_when_not_specified():\n money_rates_settings.DEFAULT_BACKEND = CustomBackend\n call_command(\"update_rates\")\n\n assert 1 == RateSource.objects.filter(name=\"custom-backend\").count()\n assert 2 == Rate.objects.filter(source__name=\"custom-backend\").count()", "def get_driver(self, **kwargs) -> Driver:\n from squirrel.framework.plugins.plugin_manager import squirrel_plugin_manager\n\n plugins: list[list[type[Driver]]] = squirrel_plugin_manager.hook.squirrel_drivers()\n for plugin in plugins:\n for driver_cls in plugin:\n if driver_cls.name == self.driver_name:\n # Problem: If users provide \"storage_options\" in the `kwargs` and the `self.driver_kwargs`\n # already defines \"storage_options\", then vanilla dict merging\n # (i.e., {**self.driver_kwargs, **kwargs}) will overwrite the \"storage_options\" in\n # `self.driver_kwargs` entirely. This is undesired, since important information like\n # bucket configurations (e.g., \"requester_pays\") may be stored in the `self.driver_kwargs`\n # \"storage_options\", which users don't want to provide again using `kwargs`.\n # Solution: The below mechanism merges the \"storage_options\" in `kwargs` with the existing\n # \"storage_options\" in `self.driver_kwargs` (while the newly passed \"storage_options\"\n # in `kwargs` take precendence).\n kwargs[\"storage_options\"] = {\n **self.driver_kwargs.get(\"storage_options\", {}),\n **kwargs.get(\"storage_options\", {}),\n }\n return driver_cls(catalog=self._catalog, **{**self.driver_kwargs, **kwargs})\n\n raise ValueError(f\"driver {self.driver_name} not found\")", "def load_backends(backend_name):\n backend_data = _get_backend_data().get(backend_name)\n if not backend_data:\n raise ImproperlyConfigured('The specified backend \"%s\" does not exist. Is NEWAUTH_BACKENDS a correctly defined dict?' % backend_name)\n\n backends = []\n for path in backend_data['backend']:\n try:\n cls = import_string(path)\n except (ImportError, AttributeError) as e:\n raise ImproperlyConfigured('Error importing authentication backend %s: \"%s\"' % (path, e))\n except ValueError:\n raise ImproperlyConfigured('Error importing authentication backends. Is NEWAUTH_BACKENDS a correctly defined dict?')\n backends.append(cls(backend_name))\n return backends", "def set_backend(name):\n # perform checks\n if name == 'autograd' and not AG_AVAILABLE:\n raise ValueError(\"Autograd backend is not available, autograd must \\\n be installed.\")\n\n # change backend by monkeypatching\n if name == 'numpy':\n backend.__class__ = NumpyBackend\n elif name == 'autograd':\n backend.__class__ = AutogradBackend\n else:\n raise ValueError(f\"unknown backend '{name}'\")", "def db_for_read(self, model, **hints):\n\t\tif model._meta.app_label == 'product':\n\t\t\treturn 'product_dbs'\n\t\treturn None", "def load_data(ctx, klass=None):\n if klass:\n if klass and not klass.startswith(\"public_data.models\"):\n klass = f\"public_data.models.{klass}\"\n options = {\"class\": klass}\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"load_data\", **options)", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n 'L2GW', self)", "def get_backend_disk(self, disk):\n backend_vm = self.get_backend_vm(disk.vm)\n for device in backend_vm.config.hardware.device:\n if (\n isinstance(device, vim.VirtualDisk)\n and str(device.key) == disk.backend_id\n ):\n return device", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def get_backend():\n global _ACTIVE_BACKEND\n if not _ACTIVE_BACKEND:\n _ACTIVE_BACKEND = locate(settings.SITE_BACKEND)()\n return _ACTIVE_BACKEND", "def storage_backend_get_by_id(context, id, inactive=False):\n return _find_storage_backend(context, dict(id = id), True, None, inactive=inactive)", "def get_backend_by_name(cls_str):\n # type: (str) -> Backend\n try:\n return globals()[cls_str]()\n except KeyError:\n raise InvalidBackendClass('Invalid backend class name: {cls}'.format(cls=cls_str))", "def _set_backend_entity(self, model: TModel) -> None:\n model_backend = get_backend(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.quantization.algorithms.channel_alignment.openvino_backend import OVChannelAlignmentAlgoBackend\n\n self._backend_entity = OVChannelAlignmentAlgoBackend()", "def backend_protocol(self) -> Optional[pulumi.Input[Union[str, 'BackendProtocol']]]:\n return pulumi.get(self, \"backend_protocol\")", "def backend_quotes(threescale, backend_usages):\n\n return threescale.backends.read(backend_usages[1][\"backend_id\"])", "def driver(self):\n\n if not self._driver_cache:\n self._driver_cache = self._driver(self)\n\n return self._driver_cache", "def driver(self):\n \n return self.__driver", "def _instantiate_backend_from_name(name, options):\r\n # Parse backend name\r\n\r\n try:\r\n parts = name.split('.')\r\n module_name = '.'.join(parts[:-1])\r\n class_name = parts[-1]\r\n except IndexError:\r\n raise ValueError('Invalid event track backend %s' % name)\r\n\r\n # Get and verify the backend class\r\n\r\n try:\r\n module = import_module(module_name)\r\n cls = getattr(module, class_name)\r\n if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):\r\n raise TypeError\r\n except (ValueError, AttributeError, TypeError, ImportError):\r\n raise ValueError('Cannot find event track backend %s' % name)\r\n\r\n backend = cls(**options)\r\n\r\n return backend", "def get_boot_driver(self):\n return self._boot_driver", "def _execute_backend_on_spec(self):\n\n api_no_aliases_cache = None\n for attr_key in dir(self.backend_module):\n attr_value = getattr(self.backend_module, attr_key)\n if (inspect.isclass(attr_value) and\n issubclass(attr_value, Backend) and\n not inspect.isabstract(attr_value)):\n self._logger.info('Running backend: %s', attr_value.__name__)\n backend = attr_value(self.build_path, self.backend_args)\n\n if backend.preserve_aliases:\n api = self.api\n else:\n if not api_no_aliases_cache:\n api_no_aliases_cache = remove_aliases_from_api(self.api)\n api = api_no_aliases_cache\n\n try:\n backend.generate(api)\n except Exception:\n # Wrap this exception so that it isn't thought of as a bug\n # in the stone parser, but rather a bug in the backend.\n # Remove the last char of the traceback b/c it's a newline.\n raise BackendException(\n attr_value.__name__, traceback.format_exc()[:-1])", "def _load_state(\n self, datapath: str, dpr_model: str, pretrained_path: str, encoder_type: str\n ):\n if dpr_model == 'bert':\n state_dict = BertConversionUtils.load_bert_state(\n datapath,\n self.state_dict(),\n pretrained_dpr_path=pretrained_path,\n encoder_type=encoder_type,\n )\n self.load_state_dict(state_dict)\n elif dpr_model == 'bert_from_parlai_rag':\n state_dict = torch.load(pretrained_path, map_location='cpu')[\"model\"]\n key = f\"{encoder_type}_encoder.\"\n state_dict = {\n k.split(key)[-1]: v for k, v in state_dict.items() if key in k\n }\n self.load_state_dict(state_dict)", "def load_drivers(sc, conf):\n cutils = utils.ConfiguratorUtils(conf)\n drivers = cutils.load_drivers(lb_const.SERVICE_TYPE)\n\n plugin_rpc = LBaaSV2RpcSender(sc)\n\n for service_type, dobj in six.iteritems(drivers):\n '''LB Driver constructor needs plugin_rpc as a param'''\n instantiated_dobj = dobj(plugin_rpc=plugin_rpc, conf=conf)\n drivers[service_type] = instantiated_dobj\n\n return drivers", "def get_persistent_boot_device(self):\n system = self._get_host_details()\n try:\n # Return boot device if it is persistent.\n if system['Boot']['BootSourceOverrideEnabled'] == 'Continuous':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n except KeyError as e:\n msg = \"get_persistent_boot_device failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)\n\n # Check if we are in BIOS boot mode.\n # There is no resource to fetch boot device order for BIOS boot mode\n if not self._is_boot_mode_uefi():\n return None\n\n # Get persistent boot device order for UEFI\n boot_sources, boot_devices = self._get_persistent_boot_devices()\n\n boot_string = \"\"\n try:\n for source in boot_sources:\n if (source[\"StructuredBootString\"] == boot_devices[0]):\n boot_string = source[\"BootString\"]\n break\n except KeyError as e:\n msg = \"get_persistent_boot_device failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)\n\n if 'HP iLO Virtual USB CD' in boot_string:\n return 'CDROM'\n\n elif ('NIC' in boot_string or\n 'PXE' in boot_string or\n \"iSCSI\" in boot_string):\n return 'NETWORK'\n\n elif common.isDisk(boot_string):\n return 'HDD'\n\n else:\n return None", "def backend_specific_model(model: TModel, tmp_dir: str):", "def get_backend():\n backend_path = settings.CALENDAR_BACKEND\n\n try:\n backend_modulename, backend_classname = backend_path.rsplit('.', 1)\n except ValueError:\n raise ImproperlyConfigured('{0} isn\\'t a backend module'.format(backend_path))\n\n # See if the module has already been imported.\n try:\n backend_module = sys.modules[backend_modulename]\n except KeyError:\n # ok, then import it.\n try:\n backend_module = import_module(backend_modulename)\n except ImportError as e:\n raise ImproperlyConfigured('Error importing backend {0}: \"{1}\"'.format(backend_modulename, e))\n\n try:\n backend_class = getattr(backend_module, backend_classname)\n except AttributeError:\n raise ImproperlyConfigured(\n 'Backend module \"{0}\" does not define a \"{1}\" class'.format(backend_modulename, backend_classname)\n )\n\n backend_instance = backend_class()\n\n if not isinstance(backend_instance, BaseBackend):\n raise ImproperlyConfigured(\n 'Backend class \"{0}\" is not a subclass of \"django_calendar.backends.BaseBackend\"'.format(backend_classname)\n )\n\n return backend_instance", "def register(dbengine, backendclass):\n backendregistry.register(dbengine, backendclass)", "def _load_disk(self):\r\n pass" ]
[ "0.64767873", "0.6454533", "0.6352563", "0.6303405", "0.6164173", "0.59922636", "0.5975386", "0.58862835", "0.58696896", "0.5819013", "0.58041245", "0.5792222", "0.5769137", "0.575706", "0.5755538", "0.5722598", "0.5693296", "0.5691711", "0.5683557", "0.5676043", "0.5659401", "0.56327343", "0.5607392", "0.5552141", "0.5546877", "0.554518", "0.5541243", "0.55331874", "0.55314845", "0.5504782", "0.54868543", "0.5481116", "0.547085", "0.54628164", "0.54588133", "0.54480296", "0.5441335", "0.54408085", "0.5431234", "0.54010385", "0.5378919", "0.53685045", "0.53675884", "0.53675884", "0.5364221", "0.53420043", "0.5337969", "0.53164417", "0.53038365", "0.53038365", "0.5286081", "0.52752525", "0.5273889", "0.526025", "0.5259654", "0.52541", "0.52401584", "0.5233972", "0.5227432", "0.5217355", "0.5217355", "0.52114516", "0.52041954", "0.5195856", "0.5195115", "0.5191266", "0.5180139", "0.51794547", "0.5172159", "0.51686704", "0.5166398", "0.5161553", "0.51467603", "0.51432854", "0.5143067", "0.51404977", "0.51379627", "0.51359206", "0.51282984", "0.5127995", "0.51101905", "0.51022434", "0.5091625", "0.50800997", "0.504872", "0.5043972", "0.5043173", "0.5033687", "0.5023456", "0.50231993", "0.50189304", "0.5013984", "0.5008678", "0.49933165", "0.49918547", "0.49909696", "0.49824002", "0.49812806", "0.49805224", "0.49762866" ]
0.7439424
0
Get the table name to save data from the url.
def _get_table_name(url): try: return urlparse(url).path.strip('/').split('/')[1] except IndexError: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name() -> str:\n pass", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def get_table_name(self):\n return self._table", "def get_tablename(self):\n return self.ds_table", "def getTableByName(self, tablename):\n pass", "def table(self):\n return self._table_name", "def table_name(self):\n return self._new_table.name", "def table_name(self) -> str:\n return self.model._meta.db_table", "def create_table_url(self, table_id):\n return self.base_url + \"/table?table=\" + str(table_id)", "def tablename(entity) -> str:\n return entity.__tablename__", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def construct_bq_table_path(table_name: str) -> str:\n if not re.match(r'^\\w+$', table_name):\n raise ValueError(\n f'{table_name} should contain only letters, numbers and underscore.')\n\n return '{}.{}.{}'.format(\n get_airflow_variable('dest_project'),\n get_airflow_variable('dest_dataset'), table_name)", "def get_tablepath(self, groupname, tablename):\n return '/' + groupname + '/' + tablename", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def tableName():\n return \"people\"", "def save(self, response):\n url = response.url\n if self.item_url(url):\n table_name = self._get_table_name(url)\n if table_name:\n data = response.json()\n self.backend.save(table_name, data)", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def get_table_name(model_id: Text) -> Text:\n return model_id if not cfg.app.db.schema else cfg.app.db.schema + \".\" + model_id", "def get_table_from_dataset_path(ds_path: str):\n return ds_path.split(\".\")[0].split(\"/\")[-1]", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def split_table_name(table):\n\n if 'exch_' not in table:\n return None, None\n\n # table1: exch_bitstamp_btcusd_snapshot_20170908\n # table2: exch_btcc_spot_btccny_snapshot_20170908\n table = table.split('_', 1)[1]\n table = table.rsplit('_', 2)[0]\n tick = table.rsplit('_', 1)[1]\n exchange_name = table.rsplit('_', 1)[0]\n\n return exchange_name, tick", "def get_table_name_from_model(model):\n return \"{0};{1}\".format(model._meta.app_label, model._meta.model_name)", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def test_table_name(self):\n obs = SampleTemplate._table_name(self.test_study.id)\n self.assertEqual(obs, \"sample_1\")", "def get_table(tname, request):\n pyramid_sacrud_models = get_models_from_settings(request)\n try:\n models = dict(pyramid_sacrud_models)\n except ValueError:\n models = dict((pyramid_sacrud_models, ))\n finally:\n models = models.values()\n\n tables = itertools.chain(*[model for model in models if model])\n tables = [\n table for table in tables\n if (table.__tablename__).lower() == tname.lower()\n and table\n ]\n if not tables:\n return None\n return tables[0]", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def get_url_name(self, url_id):\n return self.con.execute(\"select url from urllist where rowid=%d\"\n % url_id).fetchone()[0]", "def get_context_table_name(self, table):\r\n return self.context_table_name or \"table\"", "def urltable(self):\n return self._urltable", "def test_table_name(self):\n obs = PrepTemplate._table_name(1)\n self.assertEqual(obs, \"prep_1\")", "def table_name(self) -> str:\n return \"OLTP\"", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def _get_table(self):\n\t\treturn self._table", "def _table_path(self):\n return self._switch.path_on_odl + \"flow-node-inventory:table/%d/\" % self._table_id", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def tablename(klass):\n if not hasattr(klass, 'TABLENAME'):\n inf = Inflector()\n klass.TABLENAME = inf.tableize(klass.__name__)\n return klass.TABLENAME", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]", "def _table_name(cls, suffix, relative=False):\n mname = inspect.getmodule(cls).__name__ + '_' + suffix\n if relative:\n mname = mname.split('.')[-1]\n return mname", "def set_tablename(self, name):\n self.ds_table = name", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def get_id(self, url):\n return url.split('/')[-1]", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def get_table_byname(self, aTable):\n if aTable in self._tablesObjects.keys():\n oTable = self._tablesObjects[aTable]\n else:\n oTable = None\n return oTable", "def get_table(self, groupname, tablename):\n self.open_db()\n p = self.get_tablepath(groupname, tablename)\n try:\n return self.tablehandles[p]\n except KeyError:\n msg = \"table path \" + p + \" not found among table handles.\"\n raise KeyError(msg)", "def table(cls):\n return cls.__name__", "def getTable(self):\n return self.table", "def _get_object_name(self, object_url):\n infos = str(object_url).split('/')\n return infos[len(infos) - 1]", "def _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"", "def get_table_pk_name(table):\n return '_'.join([table._meta.model_name, table ._meta.pk.name])", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def get_sandbox_table_name(dataset_id, rule_name):\n return '{dataset_id}_{rule_name}'.format(dataset_id=dataset_id,\n rule_name=re.sub(\n r'\\W', '_', rule_name))", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def _get_name_from_url(self, request):\n\n format = request.GET.get('format', None)\n if not format:\n match = self._format_query_pattern.match(request.path)\n if match and match.group('format'):\n format = match.group('format')\n return format", "def observationsTableName(self):\n return 'observations'", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def get_url_name(self, row_id):\n return self.con.execute(\n \"SELECT url FROM urllist WHERE rowid={}\".format(row_id)\n ).fetchone()[0]", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def save_table_scraperwiki(uniques,table,name):\n for row in table:\n scraperwiki.sqlite.save(\n unique_keys=uniques\n , data=row\n , table_name=name\n )", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def get_model_from_table_name(table_name):\n app_label, model_name = split_table_name(table_name)\n try:\n model = ContentType.objects.get(\n app_label=app_label,\n model=model_name\n ).model_class()\n except ContentType.DoesNotExist:\n raise ModelDoesNotExistException(\n \"Model '{0}' does not exist.\".format(\n table_name,\n\n )\n )\n return model", "def _table_name(self, name: AnyStr) -> bytes:\n name = ensure_bytes(name)\n if self.table_prefix is None:\n return name\n return self.table_prefix + self.table_prefix_separator + name", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def convertTableName(fileName: str):\n if m := pattern.match(fileName):\n return m.group(\"tableName\")", "def get_name(self) -> str:\n return self.dbname", "def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)", "def url_filename(url):\n return os.path.basename(urlparse.urlparse(url).path)", "def tab_url(self) -> str:", "def get_filename(url: str) ->str:\n if 'drive.google.com' in url:\n return _extract_google_drive_file_id(url)\n url, filename = os.path.split(url)\n return filename or os.path.basename(url)", "def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename", "def _path(self):\n return self._table_path+\"flow/%s/\" % self._id", "def _find_table(name):\n tables = Base.metadata.tables\n table = tables.get(name, None)\n if table is not None:\n return table\n else:\n raise NameError('Unable to locate table: %s' % name)", "def get_url(self):\n return self.db_url", "def getTable(self):\n return self.db.table(self.entity)", "def _get_table(self, key):\n table = getattr(self, key)\n if table is None or isinstance(table, int):\n return table\n return table.tid", "def table(self):\n return self.snowflake_options.table", "def get_tilename_cache_file(tablename):\n dir=get_tilename_cache_dir()\n fname='%s-tilenames.fits' % tablename\n return os.path.join(dir, fname)", "def get_table_name(query: str) -> str:\n find_table_name_from_query = r'(FROM `)(\\w+.\\w+)(`)'\n search_result = re.search(find_table_name_from_query, query)\n if search_result:\n return search_result.group(2)\n return \"Unrecognized table name\"", "def _split_table_name(table_name):\n table_name_items = table_name.split(\".\")\n if len(table_name_items) == 1:\n schema_name = None\n elif len(table_name_items) == 2:\n schema_name, table_name = table_name_items\n else:\n raise ValueError(\"Cannot determine schema/table name from input {}\".format(table_name))\n return schema_name, table_name", "def db_for_read(self, model, **hints):\n model_name = model._meta.label_lower\n pos = model_name.find('.')\n table_name = model_name[pos+1:]\n if table_name in self.route_encuestas:\n return 'encuestas'\n elif table_name in self.route_uxxienc_resul:\n return 'uxxienc_resul'\n return None", "def url(self) -> str:\n return self.DATASET_URLS[self.name]", "def _UrlBaseName(url):\n return url.rstrip('/').rpartition('/')[-1]", "def rename_table(base, tablename: str, table: Table) -> str:\n return snake_to_camel(tablename, upper=True)", "def table(self):\n return self.reference.table", "def table(self, name):\r\n if name in self._tables:\r\n return _tables[name]\r\n\r\n table = Table(name, self._storage)", "def _get_table_obj(self, mode):\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])", "def _extract_ks_tab(name):\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower().encode('UTF8'), table.lower().encode('UTF8')", "def _download_to_df(url, table_name, year, month):\n # Insert the table_name, year and month into the url.\n url = url.format(table=table_name, year=year, month=str(month).zfill(2))\n # Download the file.\n r = requests.get(url)\n if r.status_code != 200:\n raise _MissingData((\"\"\"Requested data for table: {}, year: {}, month: {} \n not downloaded. Please check your internet connection. Also check\n http://nemweb.com.au/#mms-data-model, to see if your requested\n data is uploaded.\"\"\").format(table_name, year, month))\n # Convert the contents of the response into a zipfile object.\n zf = zipfile.ZipFile(io.BytesIO(r.content))\n # Get the name of the file inside the zip object, assuming only one file is zipped inside.\n file_name = zf.namelist()[0]\n # Read the file into a DataFrame.\n data = pd.read_csv(zf.open(file_name), skiprows=1)\n # Discard last row of DataFrame\n data = data[:-1]\n return data", "def get_schema_url(self):\n return self.NAME_TYPE_SCHEMAS.get(self.name_type, None)", "def url_file_name(url):\r\n return url[url.rfind('/') + 1:]", "def getTable(self):\n\n raise NotImplementedError", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def autoname(self):\n ret = \"%(table)s_%(reftable)s_fkey\"%dict(\n table=self.table.name,\n reftable=self.reftable.name,\n )\n return ret", "def getAllName(table):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table)\n\t\tnames = cur.fetchall()\n\t\tcon.commit()\n\t\tcon.close()\n\t\treturn names\n\texcept:\n\t\tprint('Could not run function getAllName from DbController')", "def name_for(cls, table, id):\n try:\n return cls.lookups[table][id]\n except KeyError:\n cur.execute('SELECT id, name FROM \"{}\"'.format(table))\n cls.lookups[table] = {row[0]: row[1] for row in cur.fetchall()}\n return cls.lookups[table][id]" ]
[ "0.72730315", "0.6656085", "0.6656085", "0.6656085", "0.65448517", "0.64656055", "0.6451091", "0.6374937", "0.6281148", "0.6230515", "0.61168766", "0.60657483", "0.6009189", "0.5981553", "0.5945678", "0.5933731", "0.5901814", "0.5891688", "0.5882904", "0.58783686", "0.58702534", "0.58645785", "0.5856004", "0.58465433", "0.58392376", "0.58331996", "0.5789683", "0.5780634", "0.57715046", "0.57299197", "0.57148635", "0.5708038", "0.5705588", "0.5693963", "0.56700367", "0.56282955", "0.5585705", "0.55402094", "0.55060536", "0.5504383", "0.548953", "0.54872453", "0.54787964", "0.54649836", "0.5453764", "0.54499847", "0.54462504", "0.5440714", "0.5438504", "0.54157454", "0.5414489", "0.54049754", "0.5396597", "0.53620803", "0.5354388", "0.5299659", "0.52861947", "0.5276555", "0.52736425", "0.52636355", "0.523832", "0.5232418", "0.52310365", "0.523097", "0.52288294", "0.5223157", "0.5207908", "0.5198585", "0.51956046", "0.5193589", "0.51931995", "0.51889384", "0.5188436", "0.51836044", "0.5182364", "0.51809204", "0.5179765", "0.5153189", "0.5147809", "0.5147153", "0.5138872", "0.5136548", "0.51198345", "0.511798", "0.5112045", "0.5108522", "0.51045763", "0.5102614", "0.5081463", "0.5078229", "0.5069902", "0.5058426", "0.50564903", "0.5054686", "0.5053304", "0.50519454", "0.5049157", "0.5048586", "0.50427955", "0.5035927" ]
0.7577692
0
Check if this url contains an item detail.
def item_url(url): return all(map(lambda x: str.isdigit(x), str(url.strip('/').split('/')[-1])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_details(self):\n return hasattr(self, 'details')", "def is_view_price_list_detail_present(self):\n return self.is_element_present(self.view_price_list_detail_locator)", "def is_item_complete(self, item):\n return (item.get('id') and\n item.get('name') and\n 'description' in item and\n 'image' in item)", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def has_item(self, item):\n if item in self._reverse_store:\n return True\n else:\n return False", "def has_details(self):\n\n return len(self.attributes) > 1", "def get_item_detail(item_id):\n pass", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def check_item_in(self, url):\n item_hash = tools.url_hash(url)\n if item_hash not in self.__items:\n self.__item_lock.acquire()\n self.__items.add(item_hash)\n self.__item_lock.release()\n return False\n else:\n return True", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True", "def has_item(self, item):\n return item in self.cache", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def _item_exists(self, location):\n \"Does nothing\"", "def __contains__(self, item):\n return item in self._fetch()", "def item_detail(request, pk):\n\n data = request.data\n try:\n item = validations_utils.item_validation(pk) # Validates if user exists or not.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n if request.method == 'GET':\n item_serializer = ItemSerializer(item)\n return Response(item_serializer.data, status=status.HTTP_200_OK)", "def test_detail_exists(self):\n name_exists = 'detail' in self.views_module_listing\n is_callable = callable(self.views_module.detail)\n \n self.assertTrue(name_exists, f\"{FAILURE_HEADER}detail() view does not exist{FAILURE_FOOTER}\")\n self.assertTrue(is_callable, f\"{FAILURE_HEADER}detail() function does not exist or will not execute{FAILURE_FOOTER}\")", "def GetDetailsItem(self):\r\n if self.details: return self.details.GetDetailsItem()\r\n return None", "def has_details(self):\n\n if self.master:\n return self.master.has_details\n\n return any([level.has_details for level in self._levels.values()])", "def __contains__(self, item: Any) -> bool:\n try:\n self.__getattr__(item)\n return True\n except RuntimeError:\n return False", "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def has_item(self, usage_key):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.has_item(usage_key)", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def existsitem(self,item,listwidgets):\n exists = listwidgets.findItems(item, Qt.MatchExactly)\n if exists:\n return True\n else:\n return False", "def is_satisfied(self, item: Any) -> bool:", "def test_detail(self):\n # Test detail URL using ad_guid.\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Test URL using email also.\n url = '/api/users/{}/'.format(self.user1.email.lower())\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def isin(self, item):\n return self.get(item) is not None", "def test_view_url_exists(self):\n response = self.client.get('/details/' + str(self.s.id))\n response2 = self.client.get(reverse('details', args=(self.s.id,)))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response2.status_code, 200)\n self.assertTemplateUsed(response2, 'notifications/details.html')", "def __contains__(self, item):\n if item == self.profile_id:\n return True", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def __contains__(self, item):\n try:\n hdu = self[item] # noqa\n return True\n except Exception:\n return False", "def item_detail(request, slug):\n\n item = get_object_or_404(Item, slug=slug)\n\n context = {\n 'item': item,\n }\n\n return render(request, 'items/item_detail.html', context)", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def test_details_id_ok(self):\n self.check_response('/attributes/1',\n ('Attribute ID#1 not found',))", "def test_detail_format(self) -> None:\n r = self.perform_request('detail', True)\n self.assert_json_schema(r.json(), self.get_details_schema())", "def __contains__(self, item):\n return item in self._data", "def __contains__(self, item):\n return item in self.contents", "def __contains__(self, item: Any) -> bool:\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def test_issue_detail(self):\n response = self.client.get(url_for(\n 'issues.issuedetailresource',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def test_book_detail_view(self):\n url = reverse(\"book:book-detail\", kwargs={\"slug\": self.book.slug})\n response = self.client.get(url)\n assert response.status_code == 200\n assert json.loads(response.content)[\"slug\"] == self.book.slug", "def has_items(self):\r\n return self.orderitem_set.exists() # pylint: disable=E1101\r", "def _item_exists(self, item):\n cursor = self.conn.cursor()\n cursor.execute(\n 'SELECT * FROM Members where first_name = ?;',\n (item['first_name'])\n )\n return True if len(cursor.fetchall()) else False", "def get_item_detail(self, identifier):\n\n try:\n return self.get_billing_item(identifier)\n except SoftLayerAPIError as exception:\n if exception.faultCode == 404:\n return self.get_billing_item_from_invoice(identifier)\n raise", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def _validate_item(self, item):\n try:\n self._validate_item_required_attrs(item=item)\n self._validate_item_link(item=item)\n except ValueError as ex:\n logger.info(str(ex))\n return False\n\n return True", "def test_get_product_detail(self):\n\n response = self.client.get(reverse('website:product_details', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Product title appears in HTML response content\n self.assertIn('<h1>Test Product</h1>'.encode(), response.content)\n self.assertNotIn('<h1>Test Product2</h1>'.encode(), response.content)", "def __contains__(self, item: object) -> bool:\n return item in self._used", "def __contains__(self, item):\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def _is_current_page(self, **kwargs):\n if kwargs:\n # do a lookup to get the object i\n object_id = self._get_object(**kwargs)[\"Id\"]\n pattern = r\"/lightning/r/{}/{}/view$\".format(self.object_name, object_id)\n else:\n # no kwargs means we should just verify we are on a detail\n # page without regard to which object\n pattern = r\"/lightning/r/{}/.*/view$\".format(self.object_name)\n\n location = self.selenium.get_location()\n if not re.search(pattern, location):\n raise Exception(\n \"Location '{}' didn't match pattern {}\".format(location, pattern)\n )", "def item_exists(item_id):\n return item_id in all_items", "def __eq__(self, other):\n if not isinstance(other, DetailItem):\n return False\n\n return self.__dict__ == other.__dict__", "def assertContains(self, response, item):\n self.assertTrue(item in response.body)", "def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item", "def has_summary(self) -> bool:\n return \"get_summary_items\" not in self.__abstractmethods__", "def __contains__(self, item):\n try:\n self[item]\n return True\n except KeyError:\n return False", "def should_link(self, item):\r\n return item.__class__ in self.class_map.keys()", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def has_item(self, usage_key):\r\n if usage_key.block_id is None:\r\n raise InsufficientSpecificationError(usage_key)\r\n try:\r\n course_structure = self._lookup_course(usage_key)['structure']\r\n except ItemNotFoundError:\r\n # this error only occurs if the course does not exist\r\n return False\r\n\r\n return self._get_block_from_structure(course_structure, usage_key.block_id) is not None", "def matcher(item):\n hit = item.get(lookup_key)\n if not isinstance(hit, list):\n return hit == identifier\n return any([el for el in hit if el == identifier])", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))['item']", "def test_product_detail(self):\n # first performing create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performing detail\n self._detail_model(\"product\", self.product_data, id, [\"name\", \"description\", \"image_link\", \"price\"])\n \n self.assertIsNotNone(id)", "def get_item_url(self, item):\n return self.get_absolute_url(item, 'detail')", "def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)", "def _is_missing(self, item):\n pass", "def __contains__(self, fragment):\n return fragment in self._items", "def item_detail(request, item_id):\n # Select product based on URL param\n item = SELECT('item', where=f'id = {item_id}', _print=False)\n\n context = {\n 'item': item,\n 'photos': [item['photo_primary']] + item['photos']\n }\n return render(request, 'item_detail.html', context)", "def is_store_page(entry):\n pattern = re.compile(\"^/view\\d*/.*$\")\n return entry[\"method\"] == \"GET\" and pattern.match(entry[\"uri\"]) != None", "def check_url(url):\n return 'products.json' in url", "def detail(request, slug):\n\tarticle = get_object_or_404(Article, slug__exact=slug)\n\tcontext = {\n\t\t'article': article\n\t}\n\ttemplate = 'articles/detail.html'\n\treturn render(request, template, context)", "def test_mineral_detail_view(self):\n resp = self.client.get(reverse(\n 'minerals:detail',\n kwargs={'name': self.mineral.url_name}))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(self.mineral, resp.context['mineral'])", "def test_detail(self):\n response = self.client.get('/routines/{}/'.format(self.rout1.id))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['id'], self.rout1.id)", "def test_search_product_detail(self, setup):\n product_id = self.nutella.id\n path = reverse('website:detail', args=(product_id,))\n assert resolve(path).view_name == 'website:detail'", "def show(self, item_id):\n pass", "def test_detail_route_loads_proper_entry(testapp, fill_the_db):\n response = testapp.get('/journal/2', status=200)\n title = response.html.find_all(class_='articleTitle')[0].contents[0]\n assert title == ENTRIES[1][\"title\"]", "def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)", "def test_invoice_item_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [ \"name\", \"description\", \"image_link\", \"price\" ])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n id_itm = self._create_model(\"invoiceitem\", data, [ \"quantity\", \"quote_price\" ])\n if id_itm:\n # then performing detail\n self._detail_model(\"invoiceitem\", self.invoice_item_data, id, [ \"quantity\", \"quote_price\" ])\n self.assertIsNotNone(id_itm)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_detail(self):\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)", "def containItem(self, value):\n\t\tif self._linkHead == None:\n\t\t\treturn False\n\n\t\t_nodeCursor = self._linkHead\n\n\t\twhile _nodeCursor != None and _nodeCursor._itemValue != value:\n\t\t\t_nodeCursor = _nodeCursor._itemNext\n\n\t\tif _nodeCursor == None:\n\t\t\treturn False\n\n\t\treturn True", "def __contains__(self, item):\n return item in self.attrs", "def item_details(request, product_id):\n\n item = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': item,\n }\n\n return render(request, 'products/item_details.html', context)", "def Item(self) -> bool:", "def is_satisfied(self, item: Product) -> bool:\n return item.size == self.size", "def test_detail(client, auth):\n response = client.get(\"/100/detail\")\n assert response.status_code == 404\n\n response = client.get(\"/1/detail\")\n assert response.status_code == 200\n assert b\"test title\" in response.data\n assert b\"by test on 2018-01-01\" in response.data\n assert b\"test\\nbody\" in response.data\n assert b'href=\"/1/update\"' not in response.data\n\n auth.login()\n response = client.get(\"/1/detail\")\n assert b'href=\"/1/update\"' in response.data", "def test_detail_task_view(self):\n detail_url = reverse('task_detail', kwargs={'pk': self.task.pk})\n response = self.client.get(detail_url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context_data['task'], self.task)\n self.assertTemplateUsed(response, 'tasks/task_detail.html')", "def test_retrieve_item(self):\n\n\t\titem_id, title, author, location = mock_item()\n\t\titem = models.item(item_id)\n\n\t\tself.assertIsNotNone(item)\n\t\tself.assertEqual(item['id'], item_id)\n\t\tself.assertEqual(item['title'], title)\n\t\tself.assertEqual(item['author'], author)\n\t\tself.assertEqual(item['location'], location)", "def is_inline_action_item_present(self, item_name):\n inline_action_item_locator = (By.XPATH, \"//ul[@id='Actions_listbox']/li[text()='%s']\" % item_name)\n return self.is_element_present(inline_action_item_locator)", "def canDo_url(self, artMeta):\n return False", "def inconclusive_detail(self) -> Optional[pulumi.Input['InconclusiveDetailArgs']]:\n return pulumi.get(self, \"inconclusive_detail\")", "def has(cls, item):\n return item in cls.values()", "def __contains__(self, item):\n return self.contains(item)", "def test_post_detail_content(self):\n url = reverse(\n 'blog:post_detail',\n kwargs={'slug': self.post.slug}\n )\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n self.assertTemplateUsed(response, 'blog/blog_detail.html')\n self.assertContains(response, self.post.body)", "def is_final_item(item_id):\n return \"into\" not in items[\"data\"][str(item_id)]", "def contains(self, item):\n # Find a node with the given item, if any\n node = self._find_node(item)\n # Return True if a node was found, or False\n return node is not None", "def has_url(self):\n return hasattr(self, \"_url\") and self._url is not None", "def is_valid_menu_item(self, item_name: str) -> bool:\n return item_name in self._items()" ]
[ "0.6802034", "0.6584066", "0.64352536", "0.62934774", "0.6240942", "0.6176948", "0.612433", "0.61073184", "0.59965223", "0.59712005", "0.5829178", "0.5820094", "0.58197546", "0.58129585", "0.5806847", "0.5757746", "0.57442445", "0.57396597", "0.572949", "0.5726727", "0.57168305", "0.57042515", "0.5703651", "0.56697196", "0.56697196", "0.5660976", "0.56518453", "0.56415296", "0.56321764", "0.55985856", "0.5590979", "0.5585779", "0.55781525", "0.55759645", "0.5551281", "0.5541716", "0.5540225", "0.5532723", "0.5518502", "0.5516154", "0.55110246", "0.5510332", "0.5509915", "0.5509741", "0.5474821", "0.5444169", "0.5438921", "0.5434749", "0.5432228", "0.5419128", "0.5416768", "0.53949153", "0.53850824", "0.5370724", "0.5352381", "0.53449243", "0.53430706", "0.5319778", "0.53180385", "0.5316781", "0.53045446", "0.5287856", "0.5278003", "0.52769583", "0.5264383", "0.52636784", "0.526344", "0.5261573", "0.52614313", "0.5260384", "0.5252535", "0.5249587", "0.52489644", "0.5224703", "0.52227193", "0.5211673", "0.5199542", "0.5198768", "0.5194938", "0.51919794", "0.51827157", "0.51797897", "0.5179212", "0.51673806", "0.5161658", "0.51595587", "0.5149517", "0.5148518", "0.51468956", "0.5144703", "0.5144284", "0.514094", "0.51339066", "0.51271534", "0.51239014", "0.5121219", "0.51135343", "0.51069355", "0.51050854", "0.5104033", "0.50995266" ]
0.0
-1
Save data from response to backend persistent driver. Only save the detail item from a url, filter out the overall items like
def save(self, response): url = response.url if self.item_url(url): table_name = self._get_table_name(url) if table_name: data = response.json() self.backend.save(table_name, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list", "def _crawler_result(item, response, spider):\n output_data.clear()\n output_data.append(dict(item))", "def save_data(self, soup, url):\n # get the web page title\n title = soup.find('title').string\n # get the h1 tag of the page\n h1 = soup.find('h1')\n # checks if there is a h1 tag in the page\n # because is possible that a product url redirects to\n # another page.\n # In this way, only a valid product will be save.\n if h1:\n product_name = h1.contents[0].string\n page_values = PageValues(product_name, title, url, self.__csv_file_name)\n page_values.save_csv()\n else:\n # Shows the web page that have some problem.\n print('It was not possible to open {}'.format(url))", "def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item", "def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content", "def save(self, url):\n self.database.insert({\n 'url': url,\n 'last_crawled': None,\n 'valid': True,\n 'sub_urls': [],\n })", "def save_data(db, dict_key, url, data_to_store):\n if dict_key not in db:\n db[dict_key] = []\n data = db[dict_key]\n data.append({\n 'url': url,\n 'data': data_to_store,\n })\n db[dict_key] = data", "def process_item_data(self, db, ref, response):\n raise Exception(\"To be implemented\")", "def store_item(self, item_in_json):\n item = item_in_json.copy()\n if pecan.request.headers.get('X-Public-Key'):\n if 'metadata' not in item:\n item['metadata'] = {}\n item['metadata']['public_key'] = \\\n pecan.request.headers.get('X-Public-Key')\n test_id = db.store_results(item)\n LOG.debug(item)\n return {'test_id': test_id,\n 'url': CONF.api.test_results_url % test_id}", "def process_item(self, item, spider):\n item['url'] = spider.config['site_domain'] + item[\"url\"]\n item[\"rating\"] = extract_rating(item[\"rating\"])\n item['price'] = get_price(item['price_integer'], item[\"price_decimal\"])\n item['no_discount_price'] = get_price(item['no_discount_price_integer'], item[\"no_discount_price_decimal\"])\n item[\"brand\"] = get_brand(item[\"brand\"])\n item[\"number_of_ratings\"] = get_number_of_ratings(item[\"number_of_ratings\"])\n del item['price_integer']\n del item['price_decimal']\n del item['no_discount_price_integer']\n del item[\"no_discount_price_decimal\"]\n return item", "def parse_item(self, response):\n item = IphoneSpiderItem()\n\n item['sku'] = response.meta.get('sku')\n item['price'] = response.meta.get('price')\n item['name'] = response.meta.get('name')\n item['seller'] = response.meta.get('seller')\n #pass the data from parse to parse_item\n\n url = response.url\n model = response.xpath('//*[@id=\"crumb-wrap\"]/div/div[1]/div[9]/text()').extract_first()\n color = response.xpath('//div[@data-type=\"颜色\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/i/text()').extract_first()\n memory = response.xpath('//div[@data-type=\"版本\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n memory2 = response.xpath('//div[@data-type=\"内存\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n #memory data can be stored in 版本 or 内存\n\n if memory2:\n memory = memory2.strip()\n elif memory:\n memory = memory.strip()\n\n item['model'] = model\n item['color'] = color\n item['memory'] = memory\n item['url'] = url\n\n return item", "def save(self, scraper):\n entry = HistoryEntry(scraper.url, scraper.response)\n self.load_history_entries(entry)", "def _save_SERP(\n self, response: Union[SplashJsonResponse, ScrapyHttpResponse, ScrapyTextResponse]\n ) -> None:\n\n scraped_page = ScrapedPage(\n timestamp=self.timestamp,\n source=self.source,\n merchant=self.merchant,\n country=self.country,\n url=response.url,\n html=response.body.decode(\"utf-8\"),\n page_type=PageType.SERP.value,\n category=response.meta.get(\"category\"),\n gender=response.meta.get(\"gender\"),\n consumer_lifestage=response.meta.get(\"consumer_lifestage\"),\n meta_information=response.meta.get(\"meta_data\"),\n )\n\n self.message_queue.add_scraping(table_name=self.table_name, scraped_page=scraped_page)", "def process_item(self, item, spider):\n if item is None:\n raise DropItem(\"Something went wrong in parsing data...\")\n try:\n self.curr.execute(\n SqlStatements.insert_new_real_estate(),\n (\n item['listing_type'],\n item['property_type'], \n item['price'], \n item['location_city'], \n item['location_city_district'], \n item['area_property'],\n item['area_land'],\n item['construction_type'],\n item['num_floors_building'],\n item['apartment_floor'],\n item['registered'],\n item['heating_type'],\n item['num_rooms'],\n item['num_bathrooms'],\n item['source']\n )\n )\n self.conn.commit()\n except Exception as e:\n print(e)\n self.conn.rollback()\n return item\n self._log_progress()\n return item", "def process_item(self, item, spider):\n\n url = item['url']\n iso_code = item['iso_code']\n result = self.item_data_store.get_item(url, iso_code)\n\n if result.data is not None:\n raise DropItem(\n f'Resource already indexed for language {iso_code}: {url}')\n\n create_result = self.item_data_store.create_item(item)\n\n if create_result.has_error():\n self.logger.error('\\n'.join(create_result.messages))\n\n return item", "def put_response(self, item):\n self.export.put_response(item)", "def store_item(self, item_in_json): # pragma: no cover\n raise NotImplementedError", "def save_item(self):\r\n raise NotImplementedError(\"Function not implemented, please implement in sub class\")", "def process_item(self, item, spider):\n session = self.Session()\n article = Article()\n restaurant = Restaurant()\n\n # populate article\n article.url = item['article_url']\n article.title = item['article_title']\n article.datetime = item['article_datetime']\n \n # populate restaurant\n restaurant.name = item['restaurant_name']\n restaurant.slug = item['restaurant_slug']\n restaurant.address = item['restaurant_address']\n restaurant.googlemaps_url = item['restaurant_googlemaps']\n restaurant.googlemaps_id = parse_googlemaps_id(restaurant.googlemaps_url)\n restaurant.lat = parse_lat(restaurant.googlemaps_url)\n restaurant.lng = parse_lng(restaurant.googlemaps_url)\n\n # determine if new article\n exist_article = session.query(Article).filter_by(url = article.url).first()\n if exist_article: \n article = exist_article\n\n # determine if new restaurant\n exist_restaurant = session.query(Restaurant).filter_by(slug = restaurant.slug).first()\n if exist_restaurant: \n restaurant = exist_restaurant\n if article not in restaurant.articles: \n restaurant.articles.append(article)\n else:\n # geocode for lat lng if necessary\n if restaurant.googlemaps_id: \n restaurant.lat, restaurant.lng, restaurant.address = convert_id(restaurant.googlemaps_id)\n # add article to restaurant.articles\n restaurant.articles.append(article)\n\n try:\n session.add(restaurant)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item", "def save_data(url, file):\n with open(file, 'w') as f:\n json.dump(get_json_data(url), f)", "def download(self, item, save_dir='./'):\r\n try:\r\n os.makedirs(save_dir)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST and os.path.isdir(save_dir):\r\n # another thread beat us to creating this dir\r\n pass\r\n else:\r\n # target dir exists as a file, or a different error\r\n raise\r\n\r\n item['url'] = item[item['type'] + 's']['standard_resolution']['url'].split('?')[0]\r\n # remove dimensions to get largest image\r\n item['url'] = re.sub(r'/s\\d{3,}x\\d{3,}/', '/', item['url']) \r\n\r\n base_name = item['url'].split('/')[-1]\r\n file_path = os.path.join(save_dir, base_name)\r\n\r\n if not os.path.isfile(file_path):\r\n\r\n with open(file_path, 'wb') as file:\r\n try:\r\n bytes = requests.get(item['url']).content\r\n except requests.exceptions.ConnectionError:\r\n\t\t\t\t\tsleep(5)\r\n\t\t\t\t\tbytes = requests.get(item['url']).content\r\n\t\t\t\t\t\r\n file.write(bytes)\r\n\r\n file_time = int(item['created_time'])\r\n os.utime(file_path, (file_time, file_time))", "def parse_product(self, resp):\n loader = ItemLoader(item=EstateProperty(), response=resp)\n loader.add_value(\"url\", resp.request.url)\n\n # for the standard fields, extraction is straight forward\n for field, xpath in list(self.standard_fields.items()):\n loader.add_xpath(field, xpath)\n\n # exclude items where price is blank\n # may correspond to rentals\n price = resp.xpath(self.standard_fields['price']).extract_first()\n if price is None or price.strip()==\"\":\n # mark the item as dirty\n # to avoid sending it\n loader.add_value('is_dirty', True)\n\n # some items' titles are stored in a legacy path\n title = resp.xpath(self.standard_fields['title']).extract_first()\n if title is None or title.strip()==\"\":\n # try another way\n title = resp.xpath(self.special_fields['title_legacy']).extract_first()\n if title is None or title.strip()==\"\":\n # mark it dirty\n loader.add_value('is_dirty', True)\n else:\n loader.add_value('title', title)\n\n # sku is preprended by dirty text\n sku_dirty = resp.xpath(self.special_fields['sku']).extract_first()\n try:\n m = re.search(r'\\s{0,}\\S{3}\\s{1,}(?P<ref>.+)\\s{0,}', sku_dirty)\n loader.add_value('sku', m.group('ref'))\n except Exception as e:\n self.logger.error(e)\n loader.add_value('is_dirty', True)\n\n area_dirty = resp.xpath(self.special_fields['area']).extract_first()\n try:\n m = re.search(r'(?P<area>\\d+)\\sm.+', area_dirty)\n float_area = float(m.group('area'))\n loader.add_value('area', float_area)\n except Exception as e:\n self.logger.error(e)\n # parsing error on area is not a cause of dirty item\n\n yield loader.load_item()", "def process_item(self, item, spider):\n session = self.Session()\n real = Reals(**item)\n\n try:\n session.add(real)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def extract_data():\n books = WebScraper().get_top_100_data()\n time.sleep(2)\n BookDetailsWebScrapper().save_book_details(books)\n _save_extract_state(books)", "def get_detail(self, appid):\n item = {}\n detail = self.details(appid)\n if not detail.docV2.docid:\n raise AppNotFoundError(appid)\n item[\"appid\"] = appid\n item[\"version_code\"] = detail.docV2.details.appDetails.versionCode\n item[\"offer_type\"] = detail.docV2.offer[0].offerType\n category = detail.docV2.details.appDetails.appCategory[0]\n item[\"category_id\"] = CATEGORY_MAP[category]\n item[\"description\"] = detail.docV2.descriptionHtml\n # detect the string language from description, return ISO 639-1 language code.\n item[\"lang\"] = unicode(guess_language(item[\"description\"] or 'en'))\n item[\"developer\"] = detail.docV2.details.appDetails.developerName\n item[\"group\"] = GROUP_MAP.get(detail.docV2.details.appDetails.appType) or 'app'\n item[\"icon\"] = [img.imageUrl for img in detail.docV2.image if img.imageType == 4][0]\n item[\"is_deleted\"] = False\n item[\"name\"] = detail.docV2.title\n # for url seo\n name = re.sub(ur\"\"\"\\$|\\%|\\(|\\)|\\[|\\[|\\]|\\*|\\ |\\®|\\#|\\~|\\`|\\@|\\^|\\&|\\{|\\}|\\<|\\>|\\?|\\\"|\\'|\\’|\\–|\\:|\\;|\\||\\/|\\+|\\!|\\•|\\,|\\™|\\_|\\.\"\"\", '-', item['name'])\n name_url = urllib.quote(name.encode('utf-8'))\n if \"%\" not in name_url:\n item['name_url'] = name_url\n item[\"operating_systems\"] = \"\"\n item[\"order\"] = 0\n item[\"rating\"] = detail.docV2.aggregateRating.starRating\n item['rating_user'] = humanize.intcomma(detail.docV2.aggregateRating.ratingsCount)\n\n total_count = detail.docV2.details.appDetails.numDownloads\n item[\"total_count\"] = remove_downloads(total_count)\n item[\"download_count\"] = strCount_to_intCount(total_count)\n\n item[\"release_time\"] = detail.docV2.details.appDetails.uploadDate\n item[\"screenshot\"] = [img.imageUrl for img in detail.docV2.image if img.imageType == 1]\n item[\"update_info\"] = detail.docV2.details.appDetails.recentChangesHtml\n item[\"version\"] = detail.docV2.details.appDetails.versionString\n item[\"offer_type\"] = detail.docV2.offer[0].offerType\n item[\"size\"] = humanize.naturalsize(detail.docV2.details.appDetails.installationSize, gnu=True)\n item[\"source\"] = 'crawler'\n item[\"channel\"] = 'googleplay'\n item[\"price\"] = detail.docV2.offer[0].formattedAmount.lower()\n item[\"paid\"] = 1\n item[\"search_order\"] = 0\n item[\"search_reindex\"] = 1\n item['app_status'] = 0\n\n return item", "def store_feed(e):\n query = WebResource.query().filter(WebResource.url == e[\"link\"])\n if query.count() == 0:\n print \"STORING: \" + e[\"link\"]\n try:\n if 'summary' in e:\n s, t = BeautifulSoup(e['summary'], \"lxml\"), BeautifulSoup(e['title'], \"lxml\")\n e['summary'], e['title'] = s.get_text(), t.get_text()\n else:\n t = BeautifulSoup(e['title'], \"lxml\")\n e['summary'], e['title'] = None , t.get_text()\n k = WebResource.store_feed(e)\n print \"STORED: \" + str(k)\n return k\n except Exception as e:\n print \"Cannot Store: \" + str(e)\n return None\n else:\n print \"Resource already stored\"\n return None", "def extract(self, response):\n\n #grab the BusinessItem passed in from the caller\n i = None\n try:\n i = response.meta['item']\n except Exception:\n i = BusinessItem()\n\n log.msg('passed in item={0}'.format(i), log.DEBUG)\n\n l = BusinessLoader(item=i, response=response)\n\n #Assume url pattern is /<addressLocality>/<category>/<duid>/<name>.html\n data_uid = re.match(pattern=u'.*COMPANYID=(\\d+)$', string=response.url).group(1).lstrip('0')\n\n l.add_xpath('description', '//*[@id=\"ctl00_ctl00_body_maincontentblock_lblProductandServices\"]/ text()')\n\n #List of strings which, when joined, form the address. form is <streetAddress>, <optional: streetAddress>, <addressLocality and state and postalCode>\n address_fields = response.xpath('//*[@id=\"ctl00_ctl00_body_maincontentblock_lblcoAddress\"]/ text()').extract()\n m = re.match(pattern=u'^([\\w\\s]*),\\s+([\\w\\s]+)[\\xa0]+(\\S+)$', string=address_fields[-1])\n\n l.add_value('streetAddress', address_fields[0])\n\n if len(address_fields) is 3:\n l.add_value('streetAddress', address_fields[1])\n\n l.add_value('addressLocality', m.group(1))\n l.add_value('addressRegion', m.group(2))\n l.add_value('postalCode', m.group(3))\n\n #Extract any social media links\n social_media_links = response.xpath('//table[@id=\"ctl00_ctl00_body_maincontentblock_gvSocialMedia\"]//a/ @href').extract()\n for link in social_media_links:\n if 'linkedin.com' in link:\n l.add_value('linkedin', unicode(link))\n elif 'twitter.com' in link:\n l.add_value('twitter', unicode(link))\n elif 'facebook.com' in link:\n l.add_value('facebook', unicode(link))\n\n l.add_value(\"data_uid\", unicode(data_uid))\n l.add_value(\"data_url\", unicode(response.url))\n\n return l.load_item()", "def save_response(self, request, response):\n response_dict = self.process_response(request.path, response)\n try:\n self.ser.info(pickle.dumps(response_dict))\n self.ser.info(RESPONSE_UNIQUE_STRING)\n except (TypeError, pickle.PicklingError):\n #Can't pickle wsgi.error objects\n pass", "def _handle_search_results(self, response: TextResponse) -> ScrapyYelpItem:\n\n # get yConfig\n pattern = re.compile(r\"\"\"\\n\\s+yConfig\\s+=\\s+\"\"\", re.MULTILINE | re.DOTALL)\n soup = BeautifulSoup(response.text, \"html.parser\")\n script = soup.find(\"script\", text=pattern)\n myjson = script.get_text()\n # remove start pattern (js assignment)\n s = re.sub(pattern, '', myjson)\n # remove html (parser problems)\n s = re.sub('<[^<]+?>', '', s)\n # remove last semi colon (end-of-data)\n s = s[0:s.rfind(';')]\n json_object = json.loads(s,strict=False)\n\n keys = [x for x in json_object[\"js_display\"][\"hovercard_data\"] if x.isnumeric()]\n # first part is the hovercard data - which contains most of the aggregate biz informative\n # such as total_reviews and summary_score\n df_hovercard_data = pd.DataFrame()\n for x in keys:\n tmpdf = json_normalize(json_object[\"js_display\"][\"hovercard_data\"][x])\n df_hovercard_data = df_hovercard_data.append(tmpdf,ignore_index=True)\n\n df_hovercard_data = df_hovercard_data.set_index(\"result_number\")\n df_hovercard_data.index = df_hovercard_data.index.astype(int)\n # second part is the resourceid which might be useful later on, not sure if this is used at all, but\n # it serves as a good example of how to join to other \"parts\" of the nested json structure and flatten it\n df_markers = json_normalize(json_object[\"js_display\"][\"map_state\"][\"markers\"])\n df_markers = df_markers[df_markers['resourceType'] == 'business'].loc[:, [\"url\",\"resourceId\",\"hovercardId\",\"label\",\"location.latitude\",\"location.longitude\",]]\n df_markers = df_markers.set_index('label')\n df_markers.index = df_markers.index.astype(int)\n\n # combine data into a single dataframe which will eventually be written out by our pipeline\n df = df_hovercard_data.join(df_markers)\n\n # at this point we want to also scrape the indvidual biz listing for the menu, syntax is verbose here\n\n\n ## deubg write to file\n #json_formatted = json.dumps(json_object, indent=2)\n # print(json_formatted)\n # with open(\"files/\"+'blah.json', 'wb') as file:\n # file.write(str.encode(json_formatted))\n\n \"\"\"\n\n Here is a smample of what the yConfig object looks like:\n\n json_object.keys() ====>\n ['cookies', 'gaConfig', 'adjustAndroidPaidTrafficUrl', 'webviewFlow', 'enabledSitRepChannels',\n isWebviewRequest', 'js_display', 'isLoggedIn', 'uaInfo', 'isSitRepEnabled', 'comscore', 'isBugsnagEnabled',\n 'support', 'deprecatedEncryptedYUV', 'vendorExternalURLs', 'smartBannerFallbackActive', 'version',\n 'recaptchaV3PublicKey', 'googlePlacesUrl', 'redesignActive', 'currentBaseLang', 'isClientErrorsEnabled',\n 'uniqueRequestId', 'yelpcodeTemplateVersion', 'appInstallDialogEnabled', 'smartBannerPersistent',\n 'imageUrls', 'siteUrl', 'referrer', 'webviewInfo', 'cookieDomain', 'recaptchaPublicKey',\n 'send_user_agent_to_ga', 'pGifUrl']\n\n\n json_object[\"js_display\"].keys() ===>\n ['polyglot_translations', 'raq_links', 'locale', 'hovercard_data', 'is_first_ad_hovercard_opened',\n 'zoom', 'centerLng', 'map_state', 'advertising_business_id_list', 'centerLat', 'pager']\n\n json_object[\"js_display\"][\"hovercard_data\"] ==>\n '1': {'resource_id': None,\n 'result_number': 1,\n 'biz': {'alias': 'lou-malnatis-pizzeria-chicago',\n 'review_count': 5998,\n 'name': \"Lou Malnati's Pizzeria\",\n 'rating': 4.07785928642881,\n 'url': 'https://m.yelp.com/biz/lou-malnatis-pizzeria-chicago',\n 'price': '$$',\n 'categories': 'Pizza, Italian, Sandwiches',\n 'distance': '2.5 mi'},\n 'lat': 41.890357,\n 'lng': -87.633704,\n 'type': 'natural'},\n '2': {'resource_id': None,\n ....\n\n\n json_object[\"js_display\"][\"map_state\"][\"markers\"] ===>\n [{'resourceType': 'business',\n 'url': '/biz/lou-malnatis-pizzeria-chicago',\n 'resourceId': '8vFJH_paXsMocmEO_KAa3w',\n 'label': '1',\n 'shouldOpenInNewTab': False,\n 'location': {'latitude': 41.890357, 'longitude': -87.633704},\n 'key': 1,\n 'hovercardId': 'Q6nXAEw3UuAVFSztE4lPnA',\n 'icon': {'name': 'business',\n 'anchorOffset': [12, 32],\n 'activeOrigin': [24, 0],\n 'scaledSize': [48, 320],\n 'regularUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'size': [24, 32],\n 'activeUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'regularOrigin': [0, 0]}},\n {'resourceType': 'business',\n 'url': '/biz/pequods-pizzeria-chicago',\n 'resourceId': 'DXwSYgiXqIVNdO9dazel6w',\n 'label': '2',\n 'shouldOpenInNew\n ...\n\n \"\"\"\n #print(json_object[\"js_display\"][\"hovercard_data\"])\n\n\n\n return df", "def parse_detail(self, response):\n loader = BeiBeiProductLoader(BeibeiProduct(), response=response)\n match = re.search(r'/detail/p/([0-9]+)\\.html',\n response.url)\n if not match:\n self.logger.warn(\"product id not found from URL: %s\", response.url)\n return\n product_id = int(match.group(1))\n loader.add_value(\"id\", product_id)\n loader.add_css(\"name\", \"h3.over-title::text\")\n loader.add_value(\"category\", response.meta[\"cate_name\"])\n loader.add_css(\"description\", \"p.over-memo::text\")\n img_info = self.parse_images(response.text)\n for v in img_info.values():\n loader.add_value(\"file_urls\", v)\n loader.add_value(\"img_info\", img_info)\n yield loader.load_item()\n yield from self.parse_sku(product_id, response.text, img_info)", "def parse_inquiry_response(self, response):\n inquiry_item = response.meta.get('inquiry_item',None) # allow testing single urls for parsing errors\n source_link = response.url\n parl_id = response.url.split('/')[-2]\n title = INQUIRY.TITLE.xt(response)\n description = INQUIRY.RESPONSEDESCRIPTION.xt(response)\n LLP = inquiry_item.legislative_period if inquiry_item else None\n category = INQUIRY.CATEGORY.xt(response)\n\n # Get or create Category object for the inquiry and log to screen if new\n # category is created.\n cat, created = Category.objects.get_or_create(title=category)\n if created:\n log.msg(u\"Created category {}\".format(\n green(u'[{}]'.format(category))),level=log.DEBUG)\n\n try:\n sender_object = Person.objects.get(\n parl_id=INQUIRY.RESPONSESENDER.xt(response))\n except Exception, e:\n self.logger.warning(red(u'Sender \"{}\" was not found in database, skipping Inquiry {} in LLP {}'.format(\n INQUIRY.RESPONSESENDER.xt(response), parl_id, LLP)))\n return\n\n if not inquiry_item:\n print locals()\n return # allow testing single urls for parsing errors\n\n # Create or update Inquiry item\n inquiryresponse_item, inquiryresponse_created = InquiryResponse.objects.update_or_create(\n parl_id=parl_id,\n legislative_period=LLP,\n defaults={\n 'title': title,\n 'source_link': source_link,\n 'description': description,\n 'sender': sender_object\n }\n )\n\n # Attach foreign Keys\n inquiryresponse_item.documents = self.parse_response_docs(response)\n inquiryresponse_item.category = cat\n\n # Save InquiryResponse object\n inquiryresponse_item.save()\n\n if inquiryresponse_created:\n logtext = u\"[{} of {}] Created InquiryResponse {} with ID {}, LLP {} @ {}\"\n else:\n logtext = u\"[{} of {}] Updated InquiryResponse {} with ID {}, LLP {} @ {}\"\n\n logtext = logtext.format(\n self.SCRAPED_COUNTER,\n self.TOTAL_COUNTER,\n cyan(title),\n cyan(u\"{}\".format(parl_id)),\n green(unicode(LLP)),\n blue(response.url)\n )\n log.msg(logtext, level=log.DEBUG if self.SCRAPED_COUNTER!=0 else log.INFO)\n\n inquiry_item.response = inquiryresponse_item\n inquiry_item.status = 'response_received'\n inquiry_item.save()\n\n return", "def _do_upsert(self, conn, item, spider):\n query_check = \"select * from %s where url = %%s\" % spider.name\n conn.execute(query_check, (item['url'], ))\n result = conn.fetchone()\n if result:\n query_udpate = \"UPDATE %s SET price=%ss\" % spider.name\n conn.execute(query_udpate, (item['price']))\n log.msg(\"Item updated in db: %s\" % item, level=log.DEBUG)\n else:\n query_insert = \"INSERT INTO %s (title, company, description, price, status, image, url, category) VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, %%s)\" % spider.name\n conn.execute(query_insert,\n (item['title'], item['company'], item['description'], item['price'], item['status'], item['image'], item['url'], item['category']))\n log.msg(\"Item stored in db: %s\" % item, level=log.DEBUG)", "def parse_item(self, response):\n self.check_Tor_time()\n print(\"Looking\", response.url)\n # Create the loader using the response\n l = ItemLoader(item=PropertiesItem(), response=response)\n l.default_output_processor = TakeFirst()\n try:\n self.fill_from_Json(l)\n except Exception as e:\n print('exception->', e)\n print('1')\n for node in response.css('div.padding-phone-only > .padding-small-top'):\n try:\n title = node.xpath('div[1]/h6/text()').extract()\n except Exception as e:\n print 1, e\n print('title:', title)\n try:\n val = node.xpath('div[2]/text()').extract()\n except Exception as e:\n print 2, e\n try:\n if \"code\" in title[0]:\n l.add_value('unique_id', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Bedrooms\" in title[0]:\n l.add_value('property_rooms_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Construction\" in title[0]:\n l.add_value('construction_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Modified\" in title[0]:\n l.add_value('on_site_date', node.xpath('div[2]/time/text()').extract()[0],\n MapCompose(\n lambda i: parse(i, fuzzy=True)))\n print(node.xpath('div[2]/time/text()').extract())\n except Exception as e:\n print 3, e\n print('2')\n # Housekeeping fields\n l.add_value('url', response.url)\n # l.add_value('spider', self.name)\n l.add_value('source', self.allowed_domains[0])\n l.add_value('imported_date', datetime.now())\n l.add_value('asset_type', 'realestate')\n l.add_value('transaction_type', 'commercial')\n tp = response.xpath(\n '//*[@id=\\\"breadCrumbs\\\"]/a[1]/text()').extract()[0]\n print('3')\n if \"Sales\" in tp:\n l.replace_value('property_buy_or_rent', \"sale\")\n else:\n l.replace_value('property_buy_or_rent', \"rent\")\n if \"residential\" in tp:\n l.add_value('category_major', \"residential\")\n elif \"commercial\" in tp:\n l.add_value('category_major', \"commercial\")\n else:\n l.add_value('category_major', \"land\")\n # a = l.load_item()\n # print(a)\n # return\n print('4')\n\n print(l)\n return l.load_item()", "def get_details(self):\n url_data = self.url.get()\n\n self.scraper.parser.set_buffer(1, url_data)\n self.scraper.parser.set_buffer(2, self.id)\n ep_details = self.scraper.parser.parse(FN_GET_EPISODE_DETAILS,\n self.scraper.settings)\n \n self.extended_details = ep_details\n self.actors = []\n self.credits = []\n\n self.scraper.logger.debug(\"set_details: %s\" % repr(ep_details))\n dom = parseString(ep_details)\n episode = dom.firstChild\n\n self.title = get_child_data(episode, \"title\", self.title)\n self.plot = get_child_data(episode, \"plot\", \"\")\n self.aired = get_child_data(episode, \"aired\")\n self.thumbnail = get_child_data(episode, \"thumb\")\n self.director = get_child_data(episode, \"director\")\n self.rating = try_float(get_child_data(episode, \"rating\"))\n self.episode_number = try_int(get_child_data(episode, \"episode\"))\n self.season_number = try_int(get_child_data(episode, \"season\"))\n\n credit = first_child(episode, \"credits\")\n while credit:\n if credit.firstChild and len(credit.firstChild.data) > 0:\n self.credits.append(credit.firstChild.data)\n credit = next_sibling(credit, \"credits\")\n\n actor = first_child(episode, \"actor\")\n while actor:\n actor_name = get_child_data(actor, \"name\")\n if actor_name is not None:\n self.actors.append(actor_name)\n actor = next_sibling(actor, \"actor\")\n\n dom.unlink()\n dom = None\n return", "def process_item(self, item, spider):\n session = self.Session()\n if 'isAlbum' in item:\n album = Album()\n album.imgSrc = item[\"imgSrc\"]\n album.titleCn = item[\"titleCn\"]\n album.titleEn = item[\"titleEn\"]\n try:\n session.add(album)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n DropItem(\"Album should be input once\")\n else:\n query = session.query(Album).filter(Album.imgSrc == item['imgSrc'])\n song = Song()\n\n song.subTitle = item[\"subTitle\"]\n song.lowUrl = item[\"lowUrl\"]\n song.highUrl = item[\"highUrl\"]\n song.serial = item['serial']\n song.album_id = query.first().id\n try:\n session.add(song)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def save_record(record):\n record. save_details()", "def save_master_details(app, item_details):\n print('Saving master details to database...')\n changes_made = False\n errors = set()\n\n for detail in item_details:\n\n item_row = app.db.session.query(app.MasterDetails).filter(app.MasterDetails.sb_id == detail['id']).first()\n\n if item_row is None:\n sb_id = detail['id']\n parentId = detail['parentId']\n projectId = detail['proj_id']\n projectTitle = detail['proj_title']\n projectSize = detail['proj_size']\n num_files = detail['num_files']\n start_date = detail['start_date']\n end_date = detail['end_date']\n pub_date = detail['pub_date']\n casc = detail['casc']\n fy = detail['FY']\n url = detail['url']\n xml_urls = detail['xml_urls']\n relatedItemsUrl = detail['relatedItemsUrl']\n title = detail['title']\n summary = detail['summary']\n item_type = detail['item_type']\n PI = ''\n CI = ''\n for contact in detail['contacts']:\n if contact['type'] == 'Principal Investigator':\n PI = contact['name']\n elif contact['type'] in ['Co-Investigator', 'Cooperator/Partner']:\n CI += contact['name'] + ';'\n CI = CI.strip(';')\n\n detail_row = app.MasterDetails(sb_id = sb_id,\n parentId = parentId,\n projectId = projectId,\n projectTitle = projectTitle,\n projectSize = projectSize,\n num_files = num_files,\n start_date = start_date,\n end_date = end_date,\n pub_date = pub_date,\n casc = casc,\n fy = fy,\n url = url,\n xml_urls = xml_urls,\n relatedItemsUrl = relatedItemsUrl,\n title = title,\n summary = summary,\n PI = PI,\n CI = CI,\n item_type = item_type)\n \n app.db.session.add(detail_row)\n changes_made = True\n else:\n if item_row.parentId != detail['parentId']:\n item_row.parentId = detail['parentId']\n changes_made = True\n if item_row.projectId != detail['proj_id']:\n item_row.projectId = detail['proj_id']\n changes_made = True\n if item_row.projectTitle != detail['proj_title']:\n item_row.projectTitle = detail['proj_title']\n changes_made = True\n if item_row.projectSize != detail['proj_size']:\n item_row.projectSize = detail['proj_size']\n changes_made = True\n if item_row.num_files != detail['num_files']:\n item_row.num_files = detail['num_files']\n changes_made = True\n if item_row.start_date != detail['start_date']:\n item_row.start_date = detail['start_date']\n changes_made = True\n if item_row.end_date != detail['end_date']:\n item_row.end_date = detail['end_date']\n changes_made = True\n if item_row.pub_date != detail['pub_date']:\n item_row.pub_date = detail['pub_date']\n changes_made = True\n if item_row.casc != detail['casc']:\n item_row.casc = detail['casc']\n changes_made = True\n if item_row.fy != detail['FY']:\n item_row.fy = detail['FY']\n changes_made = True\n if item_row.url != detail['url']:\n item_row.url = detail['url']\n changes_made = True\n if item_row.xml_urls != detail['xml_urls']:\n item_row.xml_urls = detail['xml_urls']\n changes_made = True\n if item_row.relatedItemsUrl != detail['relatedItemsUrl']:\n item_row.relatedItemsUrl = detail['relatedItemsUrl']\n changes_made = True\n if item_row.title != detail['title']:\n item_row.title = detail['title']\n changes_made = True\n if item_row.summary != detail['summary']:\n item_row.summary = detail['summary']\n changes_made = True\n if item_row.item_type != detail['item_type']:\n item_row.item_type = detail['item_type']\n changes_made = True\n PI = ''\n CI = ''\n for contact in detail['contacts']:\n if contact['type'] == 'Principal Investigator':\n PI = contact['name']\n elif (contact['type'] in\n ['Co-Investigator', 'Cooperator/Partner']):\n CI += contact['name'] + ';'\n CI = CI.strip(';')\n if item_row.PI != PI:\n item_row.PI = PI\n changes_made = True\n if item_row.CI != CI:\n item_row.CI = CI\n changes_made = True\n\n if changes_made:\n app.db.session.commit()\n print('Master table updated and saved to database')\n else:\n print('Master table already up-to-date... no changes made')\n\n print('Testing master table with item id 57d84c15e4b090824ff9ac75:')\n test = app.db.session.query(app.MasterDetails).filter(app.MasterDetails.sb_id == '57d84c15e4b090824ff9ac75').first()\n if test is None:\n print('Not found! Something may be wrong')\n else:\n print('Success! Sample details for item id 57d84c15e4b090824ff9ac75:')\n print('Item type: {}'.format(test.item_type))\n print('Number of files: {}'.format(test.num_files))\n print('Start date: {}'.format(test.start_date))\n print('End date: {}'.format(test.end_date))\n print('Publication date: {}'.format(test.pub_date))\n print('XML url(s): {}'.format(test.xml_urls))\n print('Done!')\n print()", "def parse_details(self, response, item=None):\n \n assert item is not None, \"Provide an item\"\n \n if response:\n # Use individual WARN notice url\n item['url'] = response.url\n\n fields = item['fields']\n \n dt = get_text_of_matching_elements(response, '//dt')\n dd = get_text_of_matching_elements(response, '//dd')\n\n data = dict(zip(dt, dd))\n \n # Update fields with additional data\n fields.update(data)\n item['fields'] = fields\n\n # Generate normalized fields\n norm_fields = get_normalized_fields(self.fields_dict, pd.Series(fields)).to_dict()\n item['normalized_fields'] = norm_fields \n\n yield item", "def parse_abstract(self, response):\n\n if self.is_visited(response.url) == True:\n return None\n \n\n hxs = HtmlXPathSelector(response)\n item = ReportAbstractItem()\n\n url = response.url\n title = hxs.select(\"//td[@class='f20blue tdc']/text()\").extract()[0]\n date = hxs.select(\"//div[@class='f_black f_14']/text()\").extract()[0]\n abstract = hxs.select(\"//table[@class='f_black f_14']//td\").extract()[0]\n link = hxs.select(\"//a[contains(@href,'ShowNotesDocumentFile')]/@href\").extract()[0]\n link = \"http://www.gtja.com\" + link\n \n item[\"url\"] = unquote(response.url)\n item[\"title\"] = title\n item[\"date\"] = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n item[\"abstract\"] = abstract\n item[\"link\"] = link\n item[\"create_date\"] = datetime.datetime.now()\n \n time_delta = datetime.datetime.now() - item[\"date\"]\n if settings[\"EXPIRE_DAYS\"] and time_delta.days >= settings[\"EXPIRE_DAYS\"]:\n self.expired = True\n \n if self.expired == True:\n return\n \n self.visit(response.url)\n\n return item", "def scrape_detail(self,url):\n soup=get_soup(url)\n self.zip=soup.find('p',class_='block-normal hide-for-small-only text-small hero-ranking-data-contact').stripped_strings.__next__()[-5::1]\n if self.zip in zips:\n #print('DUPLICATE!')\n zips.append(self.zip)\n info_tags=soup.find_all('span',class_='heading-small text-black text-tight block-flush display-block-for-large-up')\n self.type=info_tags[0].string.strip()\n self.year_founded=int(info_tags[1].string.strip())\n self.setting=info_tags[4].string.strip()\n self.endowment=info_tags[5].string.strip()", "def _fetch_info(self, items, write, force):\n tags = self.config['tags'].as_str_seq()\n for item in items:\n # If we're not forcing re-downloading for all tracks, check\n # whether the data is already present. We use one\n # representative field name to check for previously fetched\n # data.\n if not force:\n mood_str = item.get('mood_acoustic', '')\n if mood_str:\n self._log.info('data already present for: {}', item)\n continue\n\n # We can only fetch data for tracks with MBIDs.\n if not item.mb_trackid:\n continue\n\n self._log.info('getting data for: {}', item)\n data = self._get_data(item.mb_trackid)\n if data:\n for attr, val in self._map_data_to_scheme(data, ABSCHEME):\n if not tags or attr in tags:\n self._log.debug('attribute {} of {} set to {}',\n attr,\n item,\n val)\n setattr(item, attr, val)\n else:\n self._log.debug('skipping attribute {} of {}'\n ' (value {}) due to config',\n attr,\n item,\n val)\n item.store()\n if write:\n item.try_write()", "def process_item(self, item, spider):\n session = self.Session()\n product = Product()\n subcategory = Subcategory()\n category = Category()\n product.name = item[\"title\"]\n product.source = item[\"source\"]\n if 'rate' in item:\n product.rate = item[\"rate\"]\n if 'safety' in item:\n product.safety = item[\"safety\"]\n if 'quality' in item:\n product.quality = item[\"quality\"]\n subcategory.name = item[\"subcategory\"]\n category.name = item[\"category\"]\n\n # Check for product duplicate\n exist_product = session.query(Product).filter_by(name = product.name).first()\n if exist_product is not None:\n exist_product.rate = product.rate\n exist_product.safety = product.safety\n exist_product.quality = product.quality\n exist_product.source = product.source\n else:\n # Check for subcategory duplicate\n exist_subcategory = session.query(Subcategory).filter_by(name = subcategory.name).first()\n if exist_subcategory is not None:\n exist_subcategory.products.append(product)\n else:\n subcategory.products.append(product)\n # Check for category duplicate\n exist_category = session.query(Category).filter_by(name = category.name).first()\n if exist_category is not None:\n exist_category.subcategories.append(subcategory)\n else:\n category.subcategories.append(subcategory)\n \n try:\n session.add(product)\n except:\n session.rollback()\n raise\n\n try:\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item", "def process_item(self, item, spider):\n try:\n self.db[self.collection_name].insert_one(dict(item))\n except Exception as e:\n logger.debug(str(e))\n return item", "def save_book_details(self, book_dict_with_href):\n for i, book_dict in enumerate(book_dict_with_href):\n print(f'Saving data for book {i + 1}')\n self._save_book_detail(book_dict, i)", "def scrape_and_save(elements: t.List):\n for el in elements:\n # Get the urls to all the images\n # print(el.get_attribute(\"src\"))\n \"\"\"\n https://scontent-sjc3-1.cdninstagram.com/v/t51.2885-19/s150x150/94194265_2959226834168352_8521584817458905088_n.\n jpg?_nc_ht=scontent-sjc3-1.cdninstagram.com&_nc_ohc=cIBSnUnifq4AX-hiNAH&oh=695b693aece7e53daa57902cd17c897e&oe=5\n F49F505\n \"\"\"\n img_url: str = el.get_attribute(\"src\")\n base_url: str = urlparse(\n img_url\n ).path # Also .params, .query, .fragment\n # print(f\"base_url: {base_url}\")\n filename: str = os.path.basename(base_url)\n # print(f\"filename: {filename}\")\n filepath: str = os.path.join(data_dir, filename)\n # print(f\"filepath: {filepath}\")\n\n # If file already saved/exists then skip\n if os.path.exists(filepath):\n continue\n\n # Make a GET request to the img_url and extract/save data\n with requests.get(img_url, stream=True) as r:\n try:\n r.raise_for_status()\n except:\n continue\n # Save data to the file\n with open(filepath, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=8192):\n if chunk:\n # print(f\"Writing chunk: {chunk}\")\n f.write(chunk)", "def save_items(self):\n raise NotImplementedError()", "def process_item(self, item, spider):\n print item\n try:\n self.cursor.execute(\n \"insert into {0} (city, priceToIncomeRatio, grossRentalYieldCityCentre, grossRentalYieldOutsideOfCentre, priceToRentRatioCityCentre, priceToRentRatioOutsideOfCityCentre, mortgageAsAPercentageOfIncome, affordabilityIndex) values (?, ?, ?, ?, ?, ?, ?, ?)\".format(spider.name),\n (item['city'], item['priceToIncomeRatio'], item['grossRentalYieldCityCentre'], item['grossRentalYieldOutsideOfCentre'], item['priceToRentRatioCityCentre'], item['priceToRentRatioOutsideOfCityCentre'], item['mortgageAsAPercentageOfIncome'], item['affordabilityIndex']))\n self.connection.commit()\n except:\n ''\n #import sys\n #sys.exit()\n\n log.msg(\"Item stored : \" % item, level=log.DEBUG)\n return item", "def process_item(self, item, spider):\n\n # strip non ascii chars\n item['raw_content'] = filter(lambda x : ord(x) < 128, item['raw_content'])\n #item['raw_content'] = ''.join(c for c in item['raw_content'] if ord(c) < 128)\n\n # hash the filename to prevent storing too-long file names\n hash_data = item['filename'] + item['user_agent'].ua_string\n filename = sha1(hash_data).hexdigest()\n\n # Javascript MIME types\n js_mimes = ('text/javascript',\n 'application/x-javascript',\n 'application/javascript')\n\n # Parse each file based on what its MIME specifies\n if 'text/html' == item['content_type']:\n # First save the request contents into a URLContent\n urlcontent,_ = model.URLContent.objects.get_or_create(\n url_scan=item['urlscan'],\n user_agent=item['user_agent'],\n defaults={'redirected_from':item['redirected_from']})\n\n # Store raw markup\n file_content = ContentFile(item['raw_content'])\n urlcontent.raw_markup.save(filename, file_content)\n urlcontent.raw_markup.close()\n\n # Store raw headers\n file_content = ContentFile(item['headers'])\n urlcontent.headers.save(filename, file_content)\n urlcontent.headers.close()\n\n urlcontent.save()\n\n elif any(mime == item['content_type'] for mime in js_mimes):\n urlcontent = model.URLContent.objects.get(\n url_scan=item['urlscan'],\n user_agent=item['user_agent'])\n\n linkedjs, _ = model.LinkedJS.objects.get_or_create(\n batch=spider.batch,\n url_hash=sha256(item['url']).hexdigest(),\n defaults={'url': item['url']},\n )\n\n # Store raw js\n file_content = ContentFile(item['raw_content'])\n linkedjs.raw_js.save(filename, file_content)\n linkedjs.raw_js.close()\n\n linkedjs.save()\n\n # Create relationship with url content\n linkedjs.linked_from.add(urlcontent)\n\n elif 'text/css' == item['content_type']:\n urlcontent = model.URLContent.objects.get(\n url_scan=item['urlscan'],\n user_agent=item['user_agent'])\n\n linkedcss, created = model.LinkedCSS.objects.get_or_create(\n batch = spider.batch,\n url_hash=sha256(item['url']).hexdigest(),\n defaults={\n 'url': item['url'],\n },\n )\n\n # Store raw css\n file_content = ContentFile(item['raw_content'])\n linkedcss.raw_css.save(filename, file_content)\n linkedcss.raw_css.close()\n\n linkedcss.save()\n\n # Create relationship with url content\n linkedcss.linked_from.add(urlcontent)\n\n if created:\n # Parse out rules and properties\n use_celery = getattr(settings, 'USE_CELERY', False)\n if use_celery:\n parse_css.delay(linkedcss)\n else:\n spider.log(\"Parsing css {0}\".format(linkedcss))\n self.css_parser.parse(linkedcss)\n spider.log(\"Ended parsing css {0}\".format(linkedcss))\n\n return item", "def save(self):\n with open(\"protocol.csv\", mode=\"w\") as csv_file:\n csv_file.truncate()\n fieldnames = [\"creation_date\", \"creation_time\", \"type\", \"title\", \"desc\", \"given_by\", \"result\", \"owner\",\n \"priority\", \"due\"]\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames, delimiter=\"|\")\n writer.writeheader()\n\n for item in self.items:\n row = {\"creation_date\": item.creation_date,\n \"creation_time\": item.creation_time,\n \"type\": item.type,\n \"title\": item.title,\n \"desc\": item.desc}\n\n if item.type == \"Information\":\n row[\"given_by\"] = item.given_by\n elif item.type == \"Decision\":\n row[\"result\"] = item.result\n elif item.type == \"Task\":\n row[\"owner\"] = item.owner\n row[\"priority\"] = item.priority\n row[\"due\"] = item.due\n\n writer.writerow(row)", "def unique_extract(url, complete=False):\r\n\r\n if 'olx.com' in url:\r\n response = requests.get(url, headers=header)\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n\r\n # Information dictionary\r\n product_dict_unique = {}\r\n\r\n # JSON\r\n general_info = json.loads(\r\n soup.find('script', {'id': 'initial-data'})['data-json'])['ad']\r\n\r\n # Complete information\r\n if complete is True:\r\n return general_info\r\n\r\n # Partial information\r\n # Name\r\n product_dict_unique['Name'] = general_info['subject']\r\n # ID\r\n product_dict_unique['ID'] = general_info['listId']\r\n # Image\r\n product_dict_unique['Image'] = general_info['images'][0]['original']\r\n # Price\r\n product_dict_unique['Price'] = re.findall('R\\$ (\\d*,?\\.?\\d*)|$', general_info['priceValue'])[0].replace('.', '')\r\n # Description\r\n product_dict_unique['Description'] = general_info['description']\r\n # Date\r\n product_dict_unique['Datetime (UTC)'] = general_info['listTime']\r\n # Author\r\n product_dict_unique['Author'] = general_info['user']['name']\r\n # Phone\r\n product_dict_unique['Phone'] = general_info['phone']['phone']\r\n # Type\r\n product_dict_unique['Type'] = general_info['parentCategoryName']\r\n # Category\r\n product_dict_unique['Category'] = general_info['categoryName']\r\n # Location\r\n product_dict_unique['Location'] = general_info['location']\r\n\r\n return product_dict_unique", "def save_data(self):\n # Command to get the download data\n pass", "def save_results(self, instagram_results):", "def _save_book_detail(self, book_dict, book_nmb):\n content = self._get_book_content(book_dict['book_page_href'])\n with open(f'{PATH_TO_DATA}/{book_nmb}.html', 'w') as file:\n file.write(content)", "def process_item(self, item, spider):\n writer = csv.writer(self.file, delimiter = '|')\n for apartment in item[\"apartments\"]:\n row = [apartment[\"price\"], apartment[\"size\"], apartment[\"rooms\"], apartment[\"address\"], apartment[\"lat\"],\n apartment[\"lng\"], apartment[\"zone\"], apartment[\"band\"], apartment[\"east\"], apartment[\"north\"],\n apartment[\"date\"]]\n writer.writerow(row)\n self.file.flush()\n print(\"page {} processed.\".format(item[\"page\"]))\n return item", "def response(self, flow: mitmproxy.http.HTTPFlow):\n if \"https://stock.xueqiu.com/v5/stock/batch/quote.json?_t\" in flow.request.url and \"x=\" in flow.request.url:\n base_data = json.loads(flow.response.text)\n new_data = self.recursion(base_data,2)\n flow.response.text = json.dumps(new_data)", "def process_item(self, item, spider):\n session = self.Session()\n # deal = Deals(**item)\n entry = Lyrics(item['song'], item['text'])\n\n try:\n session.add(entry)\n session.commit()\n print(f\"\\n\\nInserted {item['song']} into DB!\\n\\n\")\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n ###OLD###\n # # print(\"Pipeline test\" + item['song'])\n # self.conn.execute(f\"\"\"INSERT INTO lyrics VALUES\n # ({item['song']}, {item['text']});\n # \"\"\")\n\n return item", "def _clean_item_data(self, item):\r\n item['location'] = item['_id']\r\n del item['_id']", "def _save_item(request, usage_key, data=None, children=None, metadata=None, nullout=None,\r\n grader_type=None, publish=None):\r\n store = get_modulestore(usage_key)\r\n\r\n try:\r\n existing_item = store.get_item(usage_key)\r\n except ItemNotFoundError:\r\n if usage_key.category in CREATE_IF_NOT_FOUND:\r\n # New module at this location, for pages that are not pre-created.\r\n # Used for course info handouts.\r\n store.create_and_save_xmodule(usage_key)\r\n existing_item = store.get_item(usage_key)\r\n else:\r\n raise\r\n except InvalidLocationError:\r\n log.error(\"Can't find item by location.\")\r\n return JsonResponse({\"error\": \"Can't find item by location: \" + unicode(usage_key)}, 404)\r\n\r\n old_metadata = own_metadata(existing_item)\r\n\r\n if publish:\r\n if publish == 'make_private':\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().unpublish(i.location),\r\n ignore_exception=ItemNotFoundError\r\n )\r\n elif publish == 'create_draft':\r\n # This recursively clones the existing item location to a draft location (the draft is\r\n # implicit, because modulestore is a Draft modulestore)\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().convert_to_draft(i.location),\r\n ignore_exception=DuplicateItemError\r\n )\r\n\r\n if data:\r\n # TODO Allow any scope.content fields not just \"data\" (exactly like the get below this)\r\n existing_item.data = data\r\n else:\r\n data = existing_item.get_explicitly_set_fields_by_scope(Scope.content)\r\n\r\n if children is not None:\r\n children_usage_keys = [\r\n UsageKey.from_string(child)\r\n for child\r\n in children\r\n ]\r\n existing_item.children = children_usage_keys\r\n\r\n # also commit any metadata which might have been passed along\r\n if nullout is not None or metadata is not None:\r\n # the postback is not the complete metadata, as there's system metadata which is\r\n # not presented to the end-user for editing. So let's use the original (existing_item) and\r\n # 'apply' the submitted metadata, so we don't end up deleting system metadata.\r\n if nullout is not None:\r\n for metadata_key in nullout:\r\n setattr(existing_item, metadata_key, None)\r\n\r\n # update existing metadata with submitted metadata (which can be partial)\r\n # IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If\r\n # the intent is to make it None, use the nullout field\r\n if metadata is not None:\r\n for metadata_key, value in metadata.items():\r\n field = existing_item.fields[metadata_key]\r\n\r\n if value is None:\r\n field.delete_from(existing_item)\r\n else:\r\n try:\r\n value = field.from_json(value)\r\n except ValueError:\r\n return JsonResponse({\"error\": \"Invalid data\"}, 400)\r\n field.write_to(existing_item, value)\r\n\r\n if existing_item.category == 'video':\r\n manage_video_subtitles_save(existing_item, request.user, old_metadata, generate_translation=True)\r\n\r\n # commit to datastore\r\n store.update_item(existing_item, request.user.id)\r\n\r\n result = {\r\n 'id': unicode(usage_key),\r\n 'data': data,\r\n 'metadata': own_metadata(existing_item)\r\n }\r\n\r\n if grader_type is not None:\r\n result.update(CourseGradingModel.update_section_grader_type(existing_item, grader_type, request.user))\r\n\r\n # Make public after updating the xblock, in case the caller asked\r\n # for both an update and a publish.\r\n if publish and publish == 'make_public':\r\n def _publish(block):\r\n # This is super gross, but prevents us from publishing something that\r\n # we shouldn't. Ideally, all modulestores would have a consistant\r\n # interface for publishing. However, as of now, only the DraftMongoModulestore\r\n # does, so we have to check for the attribute explicitly.\r\n store = get_modulestore(block.location)\r\n store.publish(block.location, request.user.id)\r\n\r\n _xmodule_recurse(\r\n existing_item,\r\n _publish\r\n )\r\n\r\n # Note that children aren't being returned until we have a use case.\r\n return JsonResponse(result)", "def add_url(p_id, url):\n for product in all_products:\n if product['id'] == p_id:\n product['url'] = url\n product['product_id'] = p_id\n product.move_to_end('product_id', last=False)", "def scraper_data(self):\n self.lock.acquire()\n for item in s.item:\n item_name = item.get(\"item\")\n item_url = item.get(\"url\")\n item_stock, item_cost = self.scraper.ChooseScraper(item_url)\n s.updateStatus(item_name, item_url, item_stock, item_cost)\n time.sleep(1)\n\n self.lock.release()", "def saved_product_details_on_db(lining_pid, details: dict, db_name='felfeli.db'):\n logging.debug('Creating Connection with \"%s\" DataBase...' % db_name)\n conn = sqlite3.connect(db_name)\n c = conn.cursor()\n c.execute(\"\"\" SELECT * FROM details WHERE lining_pid=? \"\"\", (lining_pid,))\n data = c.fetchone()\n\n details_json = json.dumps(details)\n datetime_now = str(arrow.now('Asia/Tehran'))\n if data:\n # update row\n row_id = data[0]\n c.execute(\"\"\" UPDATE details SET json=?, last_update=? WHERE id=? \"\"\", (details_json, datetime_now, row_id))\n conn.commit()\n else:\n # create row\n c.execute(\"\"\" INSERT INTO details (lining_pid, json, last_update) VALUES (?, ?, ?) \"\"\", (lining_pid, details_json, datetime_now))\n conn.commit()\n\n c.execute(\"\"\" SELECT * FROM details WHERE lining_pid=? \"\"\", (lining_pid,))\n data = c.fetchone()\n conn.close()\n return data", "def save_shipment_detail(shipment_detail):\n shipment = Shipment.objects.filter(shipment_id=shipment_detail['shipmentId']).first()\n\n # Update shipment\n shipment.pickup_point = shipment_detail.get('pickupPoint')\n shipment.shipment_reference = shipment_detail.get('shipmentReference')\n shipment.billing_details = json.dumps(shipment_detail.get('billingDetails'))\n shipment.customer_details = json.dumps(shipment_detail.get('customerDetails'))\n shipment.detail_fetched = True\n shipment.save()\n\n # Create Transport if not created\n transport_obj, create = Transport.objects.get_or_create(\n shipment=shipment, transport_id=shipment_detail['transport']['transportId']\n )\n transport = shipment_detail['transport']\n\n transport_obj.shipping_label_id = transport.get('shippingLabelId')\n transport_obj.shipping_label_code = transport.get('shippingLabelCode')\n transport_obj.transport_id = transport.get('transportId')\n transport_obj.transporter_code = transport.get('transporterCode')\n transport_obj.track_and_trace = transport.get('trackAndTrace')\n transport_obj.shipment = shipment\n transport_obj.save()\n\n for item in shipment_detail['shipmentItems']:\n # Create order if not created\n order, created = Order.objects.get_or_create(order_id=item.get('orderId'), order_date=item.get('orderDate'))\n\n # Create OrderItem\n order_item_obj = {\n 'order': order,\n 'order_item_id': item.get('orderItemId'),\n 'ean': item.get('ean'),\n 'title': item.get('title'),\n 'quantity': item.get('quantity'),\n 'offer_price': item.get('offerPrice'),\n 'offer_condition': item.get('offerCondition'),\n 'offer_reference': item.get('offerReference')\n }\n order_item = OrderItem.objects.create(**order_item_obj)\n\n # Create ShipmentItem\n shipment_item_obj = {\n 'shipment': shipment,\n 'order': order,\n 'order_item': order_item,\n 'latest_delivery_date': item.get('latestDeliveryDate'),\n 'fulfilment_method': item.get('fulfilmentMethod')\n }\n ShipmentItem.objects.create(**shipment_item_obj)", "def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: {0}\".format(item))\n else:\n self.ids_seen.add(item['id'])\n\n session = Session()\n\n if 'sex' in item:\n friends = item.pop('friends')\n for friend in friends:\n try:\n session.execute(friendship.insert(), params={\"friend_a_id\": item['id'], \"friend_b_id\": friend})\n session.commit()\n except:\n session.rollback()\n continue\n item.pop('image_urls')\n pictures = item.pop('images')\n if pictures:\n item['picture'] = pictures[0]['path']\n data = User(**item)\n else:\n data = City(**item)\n\n try:\n session.add(data)\n session.commit()\n except:\n session.rollback()\n raise Exception(\n \"[ERROR]: {0} - {1}\".format(sys.exc_info()[0], sys.exc_info()[1])\n )\n finally:\n session.close()\n\n return item", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def parse_details(self, response):\n items = response.xpath(\"//*[@id='all']//div[@class='prdct-box']\")\n for i in items:\n image_url = response.urljoin(i.xpath(\".//div[@class='prdct-box1']/a[1]/@href\").get())\n description = i.xpath(\".//div[@class='prdct-box2']//a[1]/text()\").get()\n item_no = i.xpath(\".//div[@class='prdct-box2']//text()[3]\").get(default='').strip()\n upc = i.xpath(\".//*[contains(text(),'UPC')]/following-sibling::text()[1]\").extract()[0].strip()\n category = i.xpath(\"//*[@id='all']//*[@class='products']/text()\").get()\n case = i.xpath(\".//*[contains(text(),'Case')]/following-sibling::text()[1]\").extract()[0]\n yield {\n \"VENDORID\":1068,\n \"VENDOR\":'UPD',\n \"ITEMNO\":item_no,\n \"UPC\":upc,\n \"CATEGORY\":category,\n \"DESCRIPTION\":description,\n \"IMAGE_URL\":image_url,\n \"CASEPACK\":case,\n \"PAGE_TITLE\":response.css('title::text').get(),\n \"PAGE_URL\":response.request.url\n }\n\n next_page = response.xpath(\"//p[@class='page-num']//a/@href\").extract()\n if next_page is not None:\n for n in next_page:\n next_page_url = response.urljoin(n)\n yield scrapy.Request(next_page_url, callback=self.parse_details)", "def record_metadata(id, sleep_time=1):\n regex = re.compile('\\W')\n url = \"http://catalog.hathitrust.org/api/volumes/brief/recordnumber/{0}.json\"\n\n url = url.format(id)\n r = requests.get(url)\n data = r.json()\n\n # data = data['items'][id]\n items = []\n if data:\n for item in data['items']:\n enum = regex.sub('', str(item.get('enumcron', '')).lower())\n htid = item.get('htid', '')\n items.append((enum, htid))\n else:\n items = []\n\n sleep(sleep_time)\n return items", "def OLD_get_items_from_category(save_db=False):\n # for i in range(16):\n # name = pd.read_sql_query(\n # f'SELECT name FROM categories LIMIT 1 OFFSET {i}', conn).values[0][0]\n # cat_url = pd.read_sql_query(\n # f'SELECT url FROM categories LIMIT 1 OFFSET {i}', conn).values[0][0]\n # # cat_id = pd.read_sql_query(\n # # f'SELECT id FROM categories LIMIT 1 OFFSET {i}', conn).values[0][0]\n # cat_id = i + 1\n # print(cat_id)\n # \"\"\" pandas return DataFrame, use DataFrame.values to return list of list value in that DataFrame\n # url: https://tiki.vn/dien-thoai-may-tinh-bang/c1789?src=c.1789.hamburger_menu_fly_out_banner&page=2\n # \"\"\"\n # for i in range(100):\n # url = cat_url + f'&page={i+1}'\n # print(url)\n # soup = get_url(url)\n\n # result = []\n # \"\"\" item: div 'product-item' > div 'content'\n # img: img 'product-imgage'\n # title: p 'title'\n # price: span 'price-regular'\n # sale-tag: span 'sale-tag'\n # final-price: span 'final-price'\n # \"\"\"\n # div_container = soup.find_all('div', {'class': 'product-item'})\n # item_path = div_container['data-category']\n # if div_container:\n # for div in div_container:\n # # it = {'item_id':'','name':'', 'brand':'', 'url':'', 'img_url':'', 'price':'', 'sale-tag':'', 'final-price':''}\n # item_id = None\n # item_name = div.a['title']\n # brand = div['data-brand']\n # item_url = div.a['href']\n # img_url = div.img['src']\n # regular_price = div.find('span', {'class': 'price-regular'}).text\n # sale_tag = div.find('span', {'class': 'final-price'}).text[-5:-1]\n # final_price = div.find('span', {'class': 'final-price'}).text[:-5].strip()\n\n # item = Items(item_id, item_path, cat_id, item_name, brand, item_url,\n # img_url, regular_price, sale_tag, final_price)\n # if save_db:\n # item.save_into_db()\n # print(f'SAVE {item_name} INTO DTB')\n # result.append(item)\n # else:\n # break", "def save(self):\n if self.get('_id'):\n return self.connection.update({'_id': self.get('_id')}, {'$set': self._export(without_id=True)})\n else:\n return self.connection.insert(self._export())", "def get_saved_result(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n for i in objs:\n data.append({'sentence':i.sentence, 'head':i.head, 'tail':i.tail, 'pred_relation':i.pred_relation, 'pred_sentiment':i.sentiment, 'conf':i.conf})\n \n return HttpResponse(\n json.dumps({'data':data}),\n content_type=\"application/json\"\n )", "def processDataRequest(requestData):\n try:\n data = requestData\n if 'body' in data:\n for video in data['body']['itemListData']:\n #extracting the info we want to save\n dic = {}\n dic['id'] = video['itemInfos']['id']\n dic['musicId'] = video['itemInfos']['musicId']\n dic['timeCreated'] = video['itemInfos']['createTime']\n dic['likeCount'] = video['itemInfos']['diggCount']\n dic['shareCount'] = video['itemInfos']['shareCount']\n dic['playCount'] = video['itemInfos']['playCount']\n dic['commentCount'] = video['itemInfos']['commentCount']\n dic['videoUsed'] = False\n dic['videoUsedDate'] = ''\n print(dic)\n listOfVideoDic.append(dic)\n except Exception as e:\n print(e)", "def old_get_details(self, entity):\n self.logger.debug(\"get_details: entered\")\n\n # For every <url> tag that this entity has, we fetch the details it\n # provides.\n #\n link = first_child(entity, \"url\")\n i = 0\n while link:\n i += 1\n src_url = ScrapeURL(link, cache = self.cache)\n url_data = src_url.get()\n\n # If we get back an object with an iterator then we loop over the\n # elements in our src data, putting successive one in successive\n # buffers.\n #\n if hasattr(url_data, '__iter__'):\n for j,data in enumerate(url_data):\n self.parser.set_buffer(i+j, data)\n i += j\n else:\n self.parser.set_buffer(i, url_data)\n # XXX for debugging purposes again we write out the details\n # we get in uniquely named files that correspond to the\n # param buffer we use for the url data.\n #\n with open(\"details.%d.html\" % i, \"w\") as f:\n f.write(url_data)\n\n\n link = next_sibling(link, \"url\")\n\n # Now we get the url based id used to identify this entity, if we\n # have one. This is passed in to the parser as the next free\n # parameter buffer.\n #\n # XXX NOTE: the xml scraper seems to always expect the id in\n # buffer 2 (and then details html in buffer 1.)\n #\n entity_id = first_child(entity, \"id\")\n if entity_id is not None:\n entity_id = entity_id.firstChild.data\n self.parser.set_buffer(i+1, entity_id)\n self.logger.debug(\"get_details: buffer: %d entity id: %s\" % \\\n (i+1,entity_id))\n\n details = self.parser.parse(FN_GET_DETAILS, self.settings)\n\n # XXX I think we only need this file for debugging. Eventually\n # we will just remove this output statement.\n #\n with open(\"details.%s.xml\" % entity_id, \"w\") as f:\n f.write(details)\n\n self.logger.debug(\"get_details: leaving\")\n return details", "def get_details(self):\n # For every URL in our list of links that we got from the parser's\n # 'lookup()' method we get the data from that URL, set it in our\n # parser's buffer, and then let the parser do the rest of the work.\n #\n for i,link in enumerate(self.links):\n # NOTE: Buffers are 1-based, not 0-based.\n #\n link_data = link.get()\n self.scraper.parser.set_buffer(i+1, link_data)\n\n # And in the final buffer we set the id. The scraper we have\n # loaded knows how many bits of url data it expects and in which\n # buffer the id will be in.\n #\n i += 1\n self.scraper.parser.set_buffer(i+1, self.id)\n self.xml_details = self.scraper.parser.parse(FN_GET_DETAILS,\n self.scraper.settings)", "def _add_data_to_response(self, desc_matches, url, page_matches, response):\n\n if desc_matches:\n response.append({\n 'url': url,\n 'matches': desc_matches\n })\n\n if page_matches:\n response.append({\n 'url': url,\n 'matches': page_matches\n })", "def get_items_from_category(save_db=False):\n query_result = pd.read_sql_query(\"\"\"SELECT c1.id, c1.name, c1.parent_id, c1.url\n FROM categories c1 LEFT OUTER JOIN categories c2\n ON c1.id = c2.parent_id\n WHERE c2.parent_id IS NULL\n LIMIT 1400 OFFSET 1300\"\"\", conn)\n for i in query_result.itertuples():\n name = i.name[:-10].strip()\n cat_url = i.url\n cat_id = i.id\n quantity = i.name[-10:].strip()\n \n for i in range(100):\n url = cat_url + f'&page={i+1}'\n print(url)\n soup = get_url(url)\n \n result = []\n \"\"\" item: div 'product-item' > div 'content'\n img: img 'product-imgage'\n title: p 'title'\n price: span 'price-regular'\n sale-tag: span 'sale-tag'\n final-price: span 'final-price'\n \"\"\"\n try:\n div_container = soup.find_all('div', {'class': 'product-item'})\n except Exception as err:\n print('ERROR BY DIV FINDALL: ', err)\n if div_container:\n for div in div_container:\n # it = {'item_id':'','name':'', 'brand':'', 'url':'', 'img_url':'', 'price':'', 'sale-tag':'', 'final-price':''}\n item_id = None\n item_path = div['data-category']\n item_name = div.a['title']\n brand = div['data-brand']\n item_url = div.a['href']\n img_url = div.img['src']\n regular_price = div.find('span', {'class': 'price-regular'}).text\n sale_tag = div.find('span', {'class': 'final-price'}).text[-5:-1]\n final_price = div.find('span', {'class': 'final-price'}).text[:-5].strip()\n\n item = Items(item_id, item_path, cat_id, item_name, brand, item_url,\n img_url, regular_price, sale_tag, final_price)\n if save_db:\n item.save_into_db()\n print(f'SAVE {item_name} INTO DTB')\n result.append(item)\n else:\n break", "def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()", "def save_info(request):\n user_info_obj = Shop.objects.get(user=request.user)\n user_info_obj.address = request.data[\"address\"]\n user_info_obj.description = request.data[\"description\"]\n user_info_obj.phone_number = request.data[\"phoneNumber\"]\n if request.data[\"logo\"]:\n user_info_obj.logo = request.data[\"logo\"]\n user_info_obj.save()\n data_payload = {\"img\": user_info_obj.logo.url if user_info_obj.logo else \"\"}\n return Response(status=status.HTTP_200_OK, data=data_payload)", "def _data_to_save(self) -> SerializedPipelineStorageCollection:\n base_data = super()._base_data_to_save()\n return {\n \"items\": base_data[\"items\"],\n \"preferred_item\": self._preferred_item,\n }", "def fetch_data(self):", "def save_object(self, data):\n return Item(**data)", "def __save_url_mapping(instance):\n short_to_url = Url.__load_url_mapping()\n short_to_url[instance.short_url] = instance\n pickle.dump(short_to_url, open(\"short_to_url.p\", \"wb\"))", "def api_item_details(item_id):\n if request.method == 'GET':\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return jsonify(item.Item.to_json())\n # TODO - Add a POST method + HTTP Auth to allow a RESTful item modification", "def save_trail_html(title, url):\n r = requests.get(url).text\n raw_insert = {'trail': title,\n \"raw_html\": r}\n trail_page_raw_html.insert_one(raw_insert)\n return None", "def only_url_item(upload_items: List[JSONDict]) -> JSONDict:\n altered = upload_items[0]\n del altered[\"guid\"]\n return altered", "def save_data(self, session, exp_id, content):\n from expfactory.database.models import Participant, Result\n\n subid = session.get(\"subid\")\n token = session.get(\"token\")\n\n self.logger.info(\"Saving data for subid %s\" % subid)\n\n # We only attempt save if there is a subject id, set at start\n if subid is not None:\n p = Participant.query.filter(\n Participant.id == subid\n ).first() # better query here\n\n # Does\n if self.headless and p.token != token:\n self.logger.warning(\n \"%s attempting to use mismatched token [%s] skipping save\"\n % (p.id, token)\n )\n elif self.headless and p.token.endswith((\"finished\", \"revoked\")):\n self.logger.warning(\n \"%s attempting to use expired token [%s] skipping save\" % (p.id, token)\n )\n else:\n\n # Preference is to save data under 'data', otherwise do all of it\n if \"data\" in content:\n content = content[\"data\"]\n\n result = Result(\n data=content, exp_id=exp_id, participant_id=p.id\n ) # check if changes from str/int\n\n # Create and save the result\n self.session.add(result)\n p.results.append(result)\n self.session.commit()\n self.logger.info(\"Save [participant] %s [result] %s\" % (p, result))", "def bulk_detail(self, *appids):\n bulk_message = self.bulkDetails(appids)\n for appid, message in zip(appids, bulk_message.entry):\n try:\n if message.ByteSize() > 0:\n item = {}\n item[\"appid\"] = message.doc.docid\n item[\"version_code\"] = message.doc.details.appDetails.versionCode\n item[\"price\"] = message.doc.offer[0].formattedAmount.lower()\n seen = self.redis.hget(\"app_record\", item['appid'])\n if item['price'] != 'free':\n self.redis.sadd(\"paid_appids\", item['appid'])\n continue\n if str(item[\"version_code\"]) != seen:\n if not seen:\n item['tag'] = 'new'\n else:\n item['tag'] = 'updated'\n else:\n #self.log.warning(\"Ignore app %s vc %s local vc %s\" % (item['appid'], item['version_code'], seen))\n continue\n\n share_url = message.doc.shareUrl\n response = self.requests.get(share_url)\n if response.status_code == 404:\n continue\n\n q = _Q(response.content.decode('utf-8'))\n item[\"offer_type\"] = message.doc.offer[0].offerType\n category_url = q(\".document-subtitle.category\").attr('href')\n\n category = ''\n if category_url:\n category = re.search('.*/(.*?)$', category_url).group(1)\n item[\"category_id\"] = CATEGORY_MAP.get(category, 'TOOLS')\n item[\"category_play\"] = category\n item[\"description\"] = q('div[itemprop=description]').html()\n item[\"lang\"] = unicode(guess_language(q('.id-app-orig-desc').text() or 'en'))\n item[\"developer\"] = q(\"a.document-subtitle.primary span\").text()\n item[\"group\"] = GROUP_MAP.get(message.doc.details.appDetails.appType) or 'app'\n item[\"icon\"] = [img.imageUrl for img in message.doc.image if img.imageType == 4][0]\n item[\"is_deleted\"] = False\n item[\"name\"] = message.doc.title\n name = re.sub(ur\"\"\"\\$|\\%|\\(|\\)|\\[|\\[|\\]|\\*|\\ |\\®|\\#|\\~|\\`|\\@|\\^|\\&|\\{|\\}|\\<|\\>|\\?|\\\"|\\'|\\’|\\–|\\:|\\;|\\||\\/|\\+|\\!|\\•|\\,|\\™|\\_\"\"\", '-', item['name'])\n name_url = urllib.quote(name.encode('utf-8'))\n if \"%\" not in name_url:\n item['name_url'] = name_url\n\n item[\"operating_systems\"] = q(\"div[itemprop=operatingSystems]\").text().strip()\n item[\"order\"] = 0\n item[\"rating\"] = message.doc.aggregateRating.starRating\n item['rating_user'] = humanize.intcomma(message.doc.aggregateRating.ratingsCount)\n\n total_count = message.doc.details.appDetails.numDownloads\n total_count = remove_downloads(total_count)\n item[\"total_count\"] = total_count\n item[\"download_count\"] = strCount_to_intCount(total_count)\n\n item[\"release_time\"] = message.doc.details.appDetails.uploadDate\n item[\"screenshot\"] = [img.get('src') if img.get('src').startswith('http') else 'http:' + img.get('src') for img in q(\"div.thumbnails img[itemprop=screenshot]\")]\n item[\"update_info\"] = q(\".recent-change\").text().strip()\n item[\"version_name\"] = q(\"div[itemprop=softwareVersion]\").text()\n item[\"size\"] = humanize.naturalsize(message.doc.details.appDetails.installationSize, gnu=True)\n item[\"source\"] = 'crawler'\n item[\"channel\"] = 'googleplay'\n item[\"paid\"] = 1 # 1 for free, 2 for paid\n item[\"search_order\"] = 0\n item[\"search_reindex\"] = 1\n item['app_status'] = 0\n\n yield item\n else:\n yield {\"appid\": appid, 'notfound': True}\n except Exception as e:\n traceback.print_exc()", "def save_data(self):\n with open(self.storage_path, 'w') as cache_file:\n json.dump(self.data, cache_file)", "def save(self, db):\n db.googleResults.insert_one(\n {\n \"searchQuery\": self.search_query,\n \"title\": self.title,\n \"link\": self.link,\n \"subtext\": self.subtext,\n \"searchterms\" : self.searchterms, # array\n \"queryTime\": datetime.datetime.now(),\n \"details\": self.link_scripts\n }\n )", "def _setData(self):\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n data_list = []\n results = self.query.all()\n \n # if no current parliament, no data\n try:\n parliament_id = model_utils.get_current_parliament().parliament_id\n except: \n return data_list\n #\n government_id = self.__parent__.government_id\n for result in results:\n data = {}\n data[\"qid\"] = \"g_%s\" % (result.group_id)\n data[\"subject\"] = result.short_name\n data[\"title\"] = \"%s (%s)\" % (result.short_name, result.type)\n data[\"result_item_class\"] = \"workflow-state-%s\" % (result.status)\n _url = \"/archive/browse/parliaments/obj-%s\" % (parliament_id)\n if type(result) == domain.Parliament:\n data[\"url\"] = url.set_url_context(_url)\n continue\n elif type(result) == domain.Committee:\n #data[\"url\"] = url + \"/committees/obj-\" + str(result.group_id) \n data[\"url\"] = url.set_url_context(\"/groups/%s/%s\" % (\n result.parent_group.group_principal_id,\n result.group_principal_id))\n elif type(result) == domain.PoliticalGroup:\n data[\"url\"] = url.set_url_context(\n \"%s/politicalgroups/obj-%s\" % (_url, result.group_id))\n elif type(result) == domain.Ministry:\n data[\"url\"] = url.set_url_context(\n \"%s/governments/obj-%s/ministries/obj-%s\" % (\n _url, government_id, result.group_id))\n else:\n data[\"url\"] = \"#\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def save_local_copy(self, adi):\r\n temp = self.from_copy\r\n self.from_copy = False\r\n products = self.get_products(adi)\r\n print(\"Saving products from {}...\".format(adi))\r\n self.rf.dump_json(products, self.products_copy.format(adi))\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Saving {}...\".format(product[\"name\"].translate({ord(c): None for c in \"\\\\/:*\\\"<>|\"})))\r\n product_detail = self.get_product_detail(adi, product[\"productId\"], product[\"name\"])\r\n self.rf.dump_json(product_detail, self.product_detail_copy.format(adi, product[\"name\"].translate({ord(c): None for c in \"\\\\/:*\\\"<>|\"})))\r\n self.from_copy = temp", "def update_url(url):\n url_db = select(u for u in Url if u.id == url.id).get()\n url_db.date_scanned = datetime.now()", "def saveData(self): \n self.spIndex.close()\n output = open(PublicTransit.PICKLE_SAVE_FILE, 'wb') \n # cPickle the list using the highest protocol available.\n cPickle.dump(self.nodesDict, output, -1)\n cPickle.dump(self.linksDict, output, -1)\n cPickle.dump(self.stopsByRoute, output, -1)\n cPickle.dump(self.stopsByNode, output, -1)\n cPickle.dump(self.routeXref, output, -1)\n cPickle.dump(self.transitRoutes, output, -1)\n output.close()\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE)", "def process(self, tup):\n\n input = tup.values[0]\n\n url = input['url']\n\n now = datetime.datetime.now()\n if url in self.seen:\n lastSeen = self.seen[url]\n delta = now - lastSeen\n if delta.total_seconds() < 3600:\n # seen less than an hour ago, don't fetch again\n return\n self.seen[url] = now\n\n delta = now - self.lastfetch\n if delta.total_seconds < 0.25:\n # self.log(\"CrawlerBolt sleeping\")\n time.sleep(.25)\n\n cj = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n headers = [(\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0\")]\n opener.addheaders = headers\n output = None\n try:\n response = opener.open(url, timeout=10)\n content_type = response.info().getheader('Content-Type')\n decode_type = \"ISO-8859-1\"\n if content_type is not None and \"charset\" in content_type and \"pdf\" not in content_type:\n decode_type = content_type.split(\"charset=\")[-1]\n html = response.read().decode(decode_type)\n links = []\n if input['depth'] is not 0:\n links = self.linkExtractor.extract(url, response.getcode(), '', '', html, response.info()['date'], 'datawake-local-crawler')\n links = map(lambda x: x.value, links)\n links = filter(lambda x: x is not None and len(x) > 0, links)\n # self.log(\"CrawlerBolt extracted links: \"+str(links))\n\n output = dict(\n crawlid=input['crawlid'],\n appid=input['appid'],\n url=url,\n status_code=response.getcode(),\n status_msg='Success',\n timestamp=response.info()['date'],\n links_found=links,\n body=html,\n attrs=input['attrs']\n )\n\n\n # self.emit([json.dumps(output)])\n self.producer.send_messages(self.topic, json.dumps(output))\n\n self.lastfetch = datetime.datetime.now()\n except:\n self.log(\"CrawlerBolt \" + traceback.format_exc())\n self.log(\"CrawlerBolt: URL: \" + url)\n # self.fail(tup)\n\n if len(self.seen) > self.MAX_LRU_SIZE:\n self.log(\"CrawlerBolt truncating LRU cache\", level='trace')\n sorted_x = sorted(self.seen.items(), key=operator.itemgetter(1))[0:self.TRUNCATE]\n self.seen = dict(sorted_x)", "def parse(self, response):\n item = Top100ShopsItem()\n item['url'] = response.url\n\n yield scrapy.Request(url=response.url, callback=self.parse_webpage, meta={'item': item})", "def save_info(self):\n json.dump(self.info, open(os.path.join(self.dstore_dir, \"info.json\"), \"w\"),\n sort_keys=True, indent=4, ensure_ascii=False)", "def process_item(self, item, spider):\n try:\n s = self.session()\n if isinstance(item, (PomItem, type(PomItem()), type(PomItem))):\n self.store_pom(item, s)\n elif isinstance(item, (AscItem, type(AscItem()), type(AscItem))):\n self.store_asc(item, s)\n elif isinstance(item, (ArtifactItem, type(ArtifactItem()), type(ArtifactItem))):\n self.store_index(item, s)\n elif isinstance(item, LinkItem):\n pass\n else:\n logger.warning('Unknown item: %s type %s' % (item, type(item)))\n return\n\n s.commit()\n s.flush() # writes changes to DB\n s.expunge_all() # removes objects from session\n except Exception as e:\n logger.warning('Exception in storing key %s' % e)\n\n finally:\n utils.silent_close(s)\n s = None\n return item", "def get_item_detail(item_id):\n pass", "def save_results_internal(self, obj: object):\r\n filename = f\"{self.search_internal_path}/results_internal.dill\"\r\n\r\n with open_(filename, \"wb\") as f:\r\n dill.dump(obj, f)" ]
[ "0.5922734", "0.5857825", "0.5833559", "0.5798279", "0.5555864", "0.55542606", "0.5488991", "0.5482613", "0.54276925", "0.5380093", "0.53528607", "0.5344608", "0.53070605", "0.5304953", "0.5301043", "0.5283759", "0.52696395", "0.5248865", "0.52482927", "0.5235908", "0.5224081", "0.51554215", "0.5148963", "0.51228595", "0.5104435", "0.5101055", "0.50889176", "0.50864196", "0.5068238", "0.50658524", "0.5062574", "0.50521046", "0.50464", "0.50397396", "0.5036037", "0.50176847", "0.5014243", "0.5011447", "0.50110996", "0.49890736", "0.49858326", "0.4974638", "0.49745274", "0.49703076", "0.49666065", "0.49633127", "0.4960654", "0.496007", "0.4955574", "0.4954939", "0.49465433", "0.49379462", "0.49354115", "0.49274436", "0.49261716", "0.49147826", "0.49143216", "0.49132496", "0.49107665", "0.48979035", "0.48847118", "0.487932", "0.48789474", "0.4865522", "0.48628706", "0.48626843", "0.48615175", "0.48511988", "0.4847842", "0.48461327", "0.48445448", "0.48439825", "0.4837486", "0.4835438", "0.48330954", "0.48308992", "0.48271307", "0.48195574", "0.48158562", "0.48068103", "0.4790286", "0.47765478", "0.47753718", "0.47681072", "0.4767122", "0.47648773", "0.47611934", "0.4760417", "0.47587976", "0.47560385", "0.47542375", "0.4753683", "0.47442684", "0.4739421", "0.47362185", "0.47341657", "0.47332653", "0.47319987", "0.4730848", "0.47302738" ]
0.690983
0
Tests basic whitelist functionality
def test_whitelist(self): p = self.load_policy({ 'name': 'test-key-vault', 'resource': 'azure.keyvault', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'glob', 'value_type': 'normalize', 'value': 'cckeyvault1*'}, {'not': [ {'type': 'whitelist', 'key': 'principalName', 'users': ['[email protected]']} ]} ] }) resources = p.run() self.assertEqual(len(resources), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_to_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(3 in Feature(\"testing\").whitelist)", "def test_add_to_whitelist_with_string(self):\n email = '[email protected]'\n self.feature_test.add_to_whitelist(email)\n self.assertTrue(email in Feature(\"testing\").whitelist)", "def test_remove_from_whitelist_with_string(self):\n email = '[email protected]'\n self.feature_test.add_to_whitelist(email)\n self.feature_test.remove_from_whitelist(email)\n self.assertFalse(email in Feature(\"testing\").whitelist)", "def test_remove_from_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.feature_test.remove_from_whitelist(3)\n self.assertFalse(3 in Feature(\"testing\").whitelist)", "def __init__(self, *args, **kwargs):\n self.whitelist = set(kwargs.pop('whitelist', []))\n self.blacklist = set(kwargs.pop('blacklist', []))\n\n super(MyTestRunner, self).__init__(*args, **kwargs)", "def test_list_rules(self):\n pass", "def test_add_to_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(3 in Feature(\"testing\").blacklist)", "def test_sanitization_function(self):\n for (broken, clean) in self.needSanitization:\n self.assertEquals(clean, sanitizeFeedback(broken))\n\n for test in self.noSanitizingNeeded:\n self.assertEquals(test, sanitizeFeedback(test))", "def test_visible_whitelisted_with_string(self):\n email = '[email protected]'\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(email)\n self.assertTrue(self.feature_test.is_visible(email))", "def test_add_to_blacklist_with_string(self):\n email = '[email protected]'\n self.feature_test.add_to_blacklist(email)\n self.assertTrue(email in Feature(\"testing\").blacklist)", "def whitelist_ips(self):\r\n if self.whitelist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.whitelist.split(',')]) # pylint: disable=no-member\r", "def validate_whitelist(self):\n\n for addr in self.whitelist:\n try:\n socket.gethostbyname_ex(addr)\n except:\n raise APIException(\n \"'{}' does not look like an ip or domain name\"\n .format(addr), 400)", "def main():\n try:\n (whitelist, strict, lists, addressbooks, folders, exclusions) = parseOptions()\n except Exception, e:\n usage(e)\n sys.exit(1)\n\n try:\n addresses = set()\n for i in lists:\n result = parseHandList(i)\n addresses |= result\n for i in addressbooks:\n result = parseMuttAddressbook(i)\n addresses |= result\n for i in folders:\n result = parseMboxFolder(i)\n addresses |= result\n for i in exclusions:\n result = parseHandList(i)\n addresses -= result\n if strict:\n open(whitelist, \"w\").writelines([\"^\"+key.replace('.', '\\\\.')+'$\\n' for key in sorted(addresses)])\n else:\n open(whitelist, \"w\").writelines([key+'\\n' for key in sorted(addresses)])\n except Exception, e:\n print \"Error generating whitelist: %s\" % e\n sys.exit(2)", "def __init__(self, *args, **kwargs):\n # skip\n self.skip_whitelist = set(kwargs.pop('skip_whitelist', []))\n self.skip_blacklist = set(kwargs.pop('skip_blacklist', []))\n # ignore\n self.ignore_whitelist = set(kwargs.pop('ignore_whitelist', []))\n self.ignore_blacklist = set(kwargs.pop('ignore_blacklist', []))\n\n\n super(MyTestRunner, self).__init__(*args, **kwargs)", "def test_list(self):\n pass", "def test_list(self):\n pass", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_support_SAFELIST(self):\n self.assertEqual(self._parseFeature(\"SAFELIST\"), True)", "def testUsingFilterTool(self):\n pass", "def test_visible_white_and_blacklisted_with_string(self):\n email = '[email protected]'\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(email)\n self.feature_test.add_to_blacklist(email)\n self.assertTrue(self.feature_test.is_visible(email))", "def test_check_org_on_whitelist_true(self):\n\n org_name = 'AS24940 Hetzner Online GmbH'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertTrue(result)", "def test_filterSamples_strict(self):\r\n with self.assertRaises(ValueError):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'])\r\n\r\n with self.assertRaises(ValueError):\r\n self.empty_map.filterSamples(['foo'])", "def short_whitelist(whitelist):\n for x in [\"guid-4\", \"guid-5\"]:\n whitelist.remove(x)\n return whitelist", "def test_break_security_group_usual_case():", "async def test_allowlist(hass: HomeAssistant, mock_client) -> None:\n await _setup(\n hass,\n {\n \"include_domains\": [\"fake\"],\n \"include_entity_globs\": [\"test.included_*\"],\n \"include_entities\": [\"not_real.included\"],\n },\n )\n\n tests = [\n FilterTest(\"climate.excluded\", False),\n FilterTest(\"fake.included\", True),\n FilterTest(\"test.excluded_test\", False),\n FilterTest(\"test.included_test\", True),\n FilterTest(\"not_real.included\", True),\n FilterTest(\"not_real.excluded\", False),\n ]\n\n for test in tests:\n hass.states.async_set(test.id, \"not blank\")\n await hass.async_block_till_done()\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def test_check_ip_on_whitelist_true(self):\n\n ip_name = 'mail-ed1-f51.google.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertTrue(result)", "def test_filterSamples_strict(self):\n with self.assertRaises(ValueError):\n self.overview_map.filterSamples(['PC.356', 'abc123'])\n\n with self.assertRaises(ValueError):\n self.empty_map.filterSamples(['foo'])", "def test_blacklist(self):\n ts = self.ts_db\n fc1 = self.field_change1\n fc2 = self.field_change2\n fc3 = self.field_change3\n fc4 = self.field_change4\n\n valid = blacklist.filter_by_benchmark_name(ts, fc1)\n self.assertTrue(valid, \"Expect this to not be filtered.\")\n valid = blacklist.filter_by_benchmark_name(ts, fc2)\n self.assertTrue(valid, \"Expect this to not be filtered.\")\n bad = blacklist.filter_by_benchmark_name(ts, fc3)\n self.assertFalse(bad, \"Expect this to be filtered by regex.\")\n bad = blacklist.filter_by_benchmark_name(ts, fc4)\n self.assertFalse(bad, \"Expect this to be filtered by blacklist.\")", "def test_check_org_short_on_whitelist_true(self):\n\n org_name = 'AS36351'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertTrue(result)", "def test_kyc_get_legal_list(self):\n pass", "def getWhiteList(self):\n return self.whitelist", "def allow(self, test):\n raise NotImplementedError()", "def test_list_field():", "def _is_whitelisted(self, path, name, method):\r\n # This would be more flexible as an imported file but I gues having some static paths can be beneficial too..\r\n # You're going to potentially miss out on good stuff, e.g. - if a program has a handle to one of these that shouldn't\r\n ok_handle = [r'.*\\\\Application Data\\\\AVG.*',\r\n r'.*\\\\BigFix Enterprise\\\\BES Client.*',\r\n r'.*\\\\cygwin\\\\.*',\r\n r'.*\\\\Local Settings\\\\History\\\\History\\.IE5\\\\index\\.dat*', # iexplore, explorer\r\n r'.*\\\\Local Settings\\\\Temporary Internet Files\\\\Content\\.IE5\\\\index\\.dat', # iexplore, explorer\r\n r'.*\\\\I386\\\\.*',\r\n r'.*\\\\Java\\\\jre.*',\r\n r'.*\\\\Perl\\\\.*',\r\n r'.*\\\\Microsoft\\\\CryptnetUrlCache\\\\.*',\r\n r'.*\\\\Microsoft\\\\Windows\\\\UsrClass\\.dat.*',\r\n r'.*\\\\Microsoft Silverlight\\\\.*',\r\n r'.*\\\\Program Files\\\\Microsoft SQL Server\\\\.*',\r\n r'.*\\\\Ruby\\\\.*',\r\n r'.*\\\\System Volume Information\\\\_restore.*',\r\n r'.*\\\\Temp\\\\ASPNETSetup_.*',\r\n r'.*\\\\Temp\\\\Microsoft .NET Framework.*',\r\n r'.*\\\\Users\\\\.*\\\\AppData\\\\Local\\\\Microsoft\\\\Outlook\\\\.*\\.ost', # outlook\r\n r'.*\\\\Users\\\\.*\\\\AppData\\\\Local\\\\Microsoft\\\\Outlook\\\\Offline Address Books\\\\.*\\.oab', # outlook\r\n r'.*\\\\Users\\\\.*\\\\AppData\\\\Local\\\\Microsoft\\\\Windows\\\\Explorer\\\\thumbcache_.*\\.db', # iexplore\r\n r'.*\\\\Users\\\\.*\\\\AppData\\\\Local\\\\Microsoft\\\\Windows\\\\History\\\\History.IE5\\\\.*\\\\index\\.dat', # iexplore, explorer\r\n r'.*\\\\Users\\\\.*\\\\AppData\\\\Local\\\\Microsoft\\\\Windows\\\\Temporary Internet Files\\\\Content\\.IE5\\\\.*',# iexplore, explorer\r\n r'.*\\\\Windows\\\\CCM\\\\.*',\r\n r'.*\\\\Windows\\\\Fonts\\\\.*',\r\n r'.*\\\\Windows\\\\Help\\\\.*',\r\n r'.*\\\\Windows\\\\inf\\\\.*',\r\n r'.*\\\\Windows\\\\Installer\\\\.*',\r\n r'.*\\\\WINDOWS\\\\Microsoft.NET\\\\.*',\r\n r'.*\\\\Windows\\\\PCHealth\\\\.*',\r\n #r'.*\\\\WINDOWS\\\\$hf_mig$\\\\.*',\r\n #r'.*\\\\WINDOWS\\\\$NtUninstallKB.*',\r\n r'.*\\\\WINDOWS\\\\assembly\\\\.*',\r\n r'.*\\\\Windows\\\\GatherLogs\\\\SystemIndex\\\\SystemIndex.*',\r\n r'.*\\\\WINDOWS\\\\ie7updates\\\\.*',\r\n r'.*\\\\WINDOWS\\\\ie8updates\\\\.*',\r\n r'.*\\\\Windows\\\\servicing\\Packages\\\\.*',\r\n r'.*\\\\Windows\\\\ServiceProfiles\\\\.*',\r\n r'.*\\\\Windows\\\\ServicePackFiles\\\\.*',\r\n r'.*\\\\WINDOWS\\\\SoftwareDistribution\\\\.*',\r\n r'.*\\\\Windows\\\\System32\\\\CatRoot\\\\.*',\r\n r'.*\\\\WINDOWS\\\\system32\\\\config\\\\.*',\r\n r'.*\\\\WINDOWS\\\\system32\\dllcache\\\\.*',\r\n r'.*\\\\Windows\\\\System32\\\\DriverStore\\\\FileRepository\\\\.*',\r\n r'.*\\\\WINDOWS\\\\System32\\\\en-us\\\\.*',\r\n r'.*\\\\Windows\\\\System32\\\\EventProviders\\\\.*',\r\n r'.*\\\\WINDOWS\\\\System32\\\\mui\\\\.*',\r\n r'.*\\\\Windows\\\\System32\\\\spool\\\\drivers.*', # winlogon\r\n r'.*\\\\WINDOWS\\\\System32\\\\Microsoft\\\\Protect\\\\.*',\r\n r'.*\\\\Windows\\\\System32\\\\MsDtc\\\\.*',\r\n r'.*\\\\Windows\\\\System32\\\\wbem\\\\.*', # winlogon\r\n r'.*\\\\Windows\\\\System32\\\\winevt\\\\Logs\\\\.*\\.evtx', # svchost, winlogon\r\n r'.*\\\\Windows\\\\WinSxS\\\\.*',\r\n ]\r\n \"\"\"\r\n baseline_path_lst = {'smss.exe' : r'\\\\SystemRoot\\\\System32\\\\smss.exe',\r\n 'csrss.exe' : r'.*:\\\\windows\\\\system32\\\\csrss.exe',\r\n 'winlogon.exe' : r'.*:\\\\windows\\\\system32\\\\winlogon.exe',\r\n 'services.exe' : r'.*\\\\windows\\\\system32\\\\services.exe',\r\n 'svchost.exe' : r'.*\\\\windows\\\\system32\\\\svchost.exe',\r\n 'explorer.exe' : r'.*\\\\windows\\\\explorer.exe',\r\n 'ctfmon.exe' : r'.*\\\\windows\\\\system32\\\\ctfmon.exe'\r\n }\r\n \"\"\"\r\n\r\n ok_file = [r'.*\\\\Device\\\\Afd\\\\Endpoint',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\\\$.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\.*\\\\(Application Data|Local Settings|My Documents|Quick Launch)\\\\desktop\\.ini',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\.*\\\\Application Data\\\\Microsoft\\\\.*security\\.config\\.cch',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Program Files\\\\Broadcom\\\\.*',\r\n r'.*\\\\Device\\\\(HarddiskVolume\\d\\\\)?Program Files\\\\AVG\\\\.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Program Files \\(x86\\)\\\\AVG\\\\.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Program Files \\(x86\\)\\\\Dell\\\\.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Program Files \\(x86\\)\\\\Internet Explorer\\\\iexplore\\.exe',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Program Files \\(x86\\)\\\\NSClient.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Windows\\\\System32\\\\catroot\\\\.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\Windows\\\\System32\\\\en-US\\\\.*',\r\n #r'.*\\\\Device\\\\HarddiskVolume\\d\\\\WINDOWS\\\\system32\\\\inetsrv\\\\.*',\r\n r'.*\\\\Device\\\\HarddiskVolume\\d\\\\WINDOWS\\\\system32\\\\LogFiles\\\\.*',\r\n ]\r\n\r\n ok_internet = [r'Cookie:.*@.*addthis.com',\r\n r'Cookie:.*@.*bing.com',\r\n r'Cookie:.*@.*doubleclick.net',\r\n r'Cookie:.*@.*google.com',\r\n r'Cookie:.*@.*msn.com',\r\n r'Cookie:.*@.*quantserver.com',\r\n r'Cookie:.*@.*twitter.com',\r\n r'Cookie:.*@.*youtube.com',\r\n ]\r\n\r\n ok_svc = {'AudioSrv': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'Browser': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'CryptSvc': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'DcomLaunch': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k DcomLaunch',\r\n 'Dhcp': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k NetworkService',\r\n 'Dnscache': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k NetworkService',\r\n 'ERSvc': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k WinErr',\r\n 'Eventlog': r'.*\\\\WINDOWS\\\\system32\\\\services\\.exe',\r\n 'EventSystem': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'helpsvc': r'.*\\\\WINDOWS\\\\System32\\\\svchost.exe -k netsvcs',\r\n 'HidServ': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'HTTPFilter': r'.*\\\\WINDOWS\\\\system32\\\\lsass\\.exe',\r\n 'lanmanserver': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'lanmanworkstation': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'LmHosts': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k LocalService',\r\n 'MSDTC': r'.*\\\\WINDOWS\\\\System32\\\\msdtc\\.exe',\r\n 'PlugPlay': r'.*\\\\WINDOWS\\\\system32\\\\services\\.exe',\r\n 'PolicyAgent': r'.*\\\\WINDOWS\\\\system32\\\\lsass.exe',\r\n 'ProtectedStorage': r'.*\\\\WINDOWS\\\\system32\\\\lsass.exe',\r\n 'RasMan': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'RemoteRegistry': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k regsvc',\r\n 'RpcSs': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k rpcss',\r\n 'SamSs': r'.*\\\\WINDOWS\\\\system32\\\\lsass\\.exe',\r\n 'Schedule': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'seclogon': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'ShellHWDetection': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'SNMP': r'.*\\\\WINDOWS\\\\System32\\\\snmp\\.exe',\r\n 'Spooler': r'.*\\\\WINDOWS\\\\system32\\\\spoolsv\\.exe',\r\n 'SrmSvc': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k srmsvcs',\r\n 'TapiSrv': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k tapisrv',\r\n 'TermService': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k termsvcs',\r\n 'W32Time': r'.*\\\\WINDOWS\\\\system32\\\\svchost\\.exe -k LocalService',\r\n 'wuauserv': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n 'WZCSVC': r'.*\\\\WINDOWS\\\\System32\\\\svchost\\.exe -k netsvcs',\r\n }\r\n\r\n if method == \"File\":\r\n combined_file = ok_file + ok_handle\r\n for ok_path in self.compile_regex(combined_file):\r\n if re.search(ok_path, path):\r\n return True\r\n\r\n if method == \"Handle\":\r\n for ok_path in self.compile_regex(ok_handle):\r\n if re.search(ok_path, path):\r\n return True\r\n\r\n if method == \"Internet\":\r\n for ok_path in self.compile_regex(ok_internet):\r\n cleaner_path = path.replace(\" \", \"\")\r\n if re.search(ok_path, cleaner_path):\r\n return True\r\n\r\n if method == \"Service\":\r\n for svc_name, svc_path in ok_svc.iteritems():\r\n if name.lower() == svc_name.lower():\r\n sp = self.compile_regex(svc_path)\r\n if re.match(sp, path):\r\n return True", "def setWhitelist(self, w):\n return self._set(whitelist=w)", "def test_03_visit_special(self):", "def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))", "def testBeliefs1sk(self):", "def test__parse_allow(input_data):\n output = parse_allow(input_data)\n vampytest.assert_instance(output, Permission)\n return output", "def test_check_org_short_on_whitelist_false(self):\n\n org_name = 'AS10429'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertFalse(result)", "def test_remove_from_blacklist_with_string(self):\n email = '[email protected]'\n self.feature_test.add_to_blacklist(email)\n self.feature_test.remove_from_blacklist(email)\n self.assertFalse(email in Feature(\"testing\").blacklist)", "def test_check_ip_on_whitelist_false(self):\n\n ip_name = 'f11.my.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertFalse(result)", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_lower(self):\n results = list(Book.select(Book.title.lower() == \"a voyage in a balloon\"))\n self.assertIn(self.balloon, results)\n self.assertNotIn(self.carol, results)\n self.assertNotIn(self.miserables, results)\n self.assertNotIn(self.hunchback, results)\n self.assertNotIn(self.bellew, results)\n self.assertNotIn(self.amor, results)\n self.assertNotIn(self.eternity, results)\n\n # Test the unicode lowercase.\n results = list(Book.select(Book.title.lower() == \"le cap éternité\"))\n self.assertNotIn(self.balloon, results)\n self.assertNotIn(self.carol, results)\n self.assertNotIn(self.miserables, results)\n self.assertNotIn(self.hunchback, results)\n self.assertNotIn(self.bellew, results)\n self.assertNotIn(self.amor, results)\n self.assertIn(self.eternity, results)", "def test_visible_whitelisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "async def test_filtered_denylist(hass: HomeAssistant, mock_client) -> None:\n await _setup(\n hass,\n {\n \"include_entities\": [\"fake.included\", \"test.excluded_test\"],\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"*.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"fake.included\", True),\n FilterTest(\"alt_fake.excluded_test\", False),\n FilterTest(\"test.excluded_test\", True),\n FilterTest(\"not_real.excluded\", False),\n FilterTest(\"not_real.included\", True),\n ]\n\n for test in tests:\n hass.states.async_set(test.id, \"not blank\")\n await hass.async_block_till_done()\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def ignore_listings(name_key):\n # for blacklist_str in models_blacklist:\n # if blacklist_str in name_key:\n # return True\n return False", "def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))", "def test_listOnClean(self):\n output = self.userbase('list')\n self.assertEqual(output, ['No accounts'])", "def is_acceptable(self):", "def test_whitelist_zero_access_policies(self):\n p = self.load_policy({\n 'name': 'test-key-vault',\n 'resource': 'azure.keyvault',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'glob',\n 'value_type': 'normalize',\n 'value': 'cckeyvault2*'},\n {'not': [\n {'type': 'whitelist',\n 'key': 'principalName',\n 'users': ['[email protected]']}\n ]}\n ]\n })\n resources = p.run()\n self.assertEqual(len(resources), 0)", "def mock_amo_whitelist(monkeypatch, short_whitelist):\n monkeypatch.setattr(\n taar_locale, \"load_amo_curated_whitelist\", lambda: short_whitelist\n )", "def test_RestrictingNodeTransformer__visit_In_List():\n assert restricted_eval('2 in [1, 2, 3]') is True", "def test_validators():", "def test_remove_from_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.feature_test.remove_from_blacklist(3)\n self.assertFalse(3 in Feature(\"testing\").blacklist)", "def test_predicate_returns_true_for_whitelisted_context(self):\n test_cases = (\n InWhitelistTestCase(\n kwargs={\"channels\": self.channels},\n ctx=helpers.MockContext(\n channel=self.commands_channel, author=self.non_staff_member),\n description=\"In whitelisted channels by members without whitelisted roles\",\n ),\n InWhitelistTestCase(\n kwargs={\"redirect\": self.commands_channel.id},\n ctx=helpers.MockContext(\n channel=self.commands_channel, author=self.non_staff_member),\n description=\"`redirect` should be implicitly added to `channels`\",\n ),\n InWhitelistTestCase(\n kwargs={\"categories\": self.categories},\n ctx=helpers.MockContext(\n channel=self.general_channel, author=self.non_staff_member),\n description=\"In whitelisted category without whitelisted role\",\n ),\n InWhitelistTestCase(\n kwargs={\"roles\": self.roles},\n ctx=helpers.MockContext(\n channel=self.non_whitelisted_channel, author=self.staff_member),\n description=\"Whitelisted role outside of whitelisted channel/category\"\n ),\n InWhitelistTestCase(\n kwargs={\n \"channels\": self.channels,\n \"categories\": self.categories,\n \"roles\": self.roles,\n \"redirect\": self.commands_channel,\n },\n ctx=helpers.MockContext(\n channel=self.general_channel, author=self.staff_member),\n description=\"Case with all whitelist kwargs used\",\n ),\n )\n\n for test_case in test_cases:\n # patch `commands.check` with a no-op lambda that just returns the predicate passed to it\n # so we can test the predicate that was generated from the specified args&kwargs\n with unittest.mock.patch(\"bot.decorators.commands.check\", new=lambda predicate: predicate):\n predicate = in_whitelist(**test_case.kwargs)\n\n with self.subTest(test_description=test_case.description):\n self.assertTrue(predicate(test_case.ctx))", "def test_exclude_ip_ban(self):\n pass", "def allowed(cls):\n # type: () -> List[Str]\n names = cls.names()\n allowed = names\n allowed.extend([name.lower() for name in names])\n return allowed", "async def test_denylist(hass: HomeAssistant, mock_client) -> None:\n await _setup(\n hass,\n {\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"test.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"light.included\", True),\n FilterTest(\"test.excluded_test\", False),\n FilterTest(\"test.included_test\", True),\n FilterTest(\"not_real.included\", True),\n FilterTest(\"not_real.excluded\", False),\n ]\n\n for test in tests:\n hass.states.async_set(test.id, \"not blank\")\n await hass.async_block_till_done()\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def test_filter_function_settings_fail(self):\n with self.assertRaises(TypeError):\n self.es.register_filter('test')", "def test_linked_list_includes_exists():\n assert LinkedList.includes", "def test_listing_from_wall_when_blocked_some_users(self):", "def test_read_allowlist(self):\n\n curdir = os.path.dirname(os.path.abspath(__file__))\n allowlist_file = os.path.join(curdir, \"data\", \"ima-allowlist-short.txt\")\n allowlist_sig = os.path.join(curdir, \"data\", \"ima-allowlist-short.sig\")\n allowlist_bad_sig = os.path.join(curdir, \"data\", \"ima-allowlist-bad.sig\")\n allowlist_gpg_key = os.path.join(curdir, \"data\", \"gpg-sig.pub\")\n allowlist_checksum = \"8b7c2c6a1d7af2568cc663905491bda829c04c397cdba38cc4fc4d8d8a3e69d4\"\n allowlist_bad_checksum = \"4c143670836f96535d9e617359b4d87c59e89e633e2773b4d7feae97f561b3dc\"\n\n # simple read, no fancy verification\n al_data = ima.read_allowlist(allowlist_file)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertIsNotNone(al_data[\"meta\"], \"AllowList metadata is present\")\n self.assertEqual(al_data[\"meta\"][\"version\"], 1, \"AllowList metadata version is correct\")\n self.assertEqual(al_data[\"meta\"][\"generator\"], \"keylime-legacy-format-upgrade\", \"AllowList metadata generator is correct\")\n self.assertNotIn(\"checksum\", al_data[\"meta\"], \"AllowList metadata no checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")\n\n # validate checkum\n al_data = ima.read_allowlist(allowlist_file, allowlist_checksum)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertEqual(al_data[\"meta\"][\"checksum\"], allowlist_checksum, \"AllowList metadata correct checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")\n\n # test with a bad checksum\n with self.assertRaises(Exception) as bad_checksum_context:\n ima.read_allowlist(allowlist_file, allowlist_bad_checksum)\n self.assertIn('Checksum of allowlist does not match', str(bad_checksum_context.exception))\n\n # validate GPG signature\n al_data = ima.read_allowlist(allowlist_file, None, allowlist_sig, allowlist_gpg_key)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertNotIn(\"checksum\", al_data[\"meta\"], \"AllowList metadata no checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")\n\n # test with a bad GPG sig\n with self.assertRaises(Exception) as bad_sig_context:\n ima.read_allowlist(allowlist_file, None, allowlist_bad_sig, allowlist_gpg_key)\n self.assertIn('GPG signature verification failed', str(bad_sig_context.exception))\n\n # validate everything together\n al_data = ima.read_allowlist(allowlist_file, allowlist_checksum, allowlist_sig, allowlist_gpg_key)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertEqual(al_data[\"meta\"][\"checksum\"], allowlist_checksum, \"AllowList metadata correct checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")", "def test_url_pattern(self):\n\t\turl = URLFilter()\n\t\turl.set_limit(\"goog*\")\n\t\tself.assertTrue(url.check(Object(get_urls=lambda: ['google.com'])))", "def test_filter_wea_zero_entry():\n pass", "async def test_allowlist(hass, mock_client):\n handler_method = await _setup(\n hass,\n {\n \"include_domains\": [\"fake\"],\n \"include_entity_globs\": [\"test.included_*\"],\n \"include_entities\": [\"not_real.included\"],\n },\n )\n\n tests = [\n FilterTest(\"climate.excluded\", False),\n FilterTest(\"fake.included\", True),\n FilterTest(\"test.excluded_test\", False),\n FilterTest(\"test.included_test\", True),\n FilterTest(\"not_real.included\", True),\n FilterTest(\"not_real.excluded\", False),\n ]\n\n for test in tests:\n event = make_event(test.id)\n handler_method(event)\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def test_parse_restricted_tags():\n invalid_tags = {'*', '**', '***', 'a*', '*a', 'a*a*', '*a*a', '*aa*', 'a**a', '}'}\n combined_tags = valid_tags | invalid_tags\n\n # Function under test\n resultant_tags = searchtag.parse_restricted_tags(\" \".join(combined_tags))\n\n # Verify that we have the tags in the valid list\n assert resultant_tags == valid_tags", "def setup_whitelisted_section():\n setup_unrelated_section()\n\n # whitelist user to the course\n cs61a = Course.objects.get(name=\"CS61A\")\n user = User.objects.get(username=\"demo_user\")\n cs61a.whitelist.add(user)", "def test_copy_required_include_list(self):\n include_list = ['path/to/*', '[abc]?/*/file*']\n\n self.assertTrue(clone_rules._copy_required('path/to/rules.yara', include_list, []))\n self.assertTrue(clone_rules._copy_required(\n 'a1/some/long/path/file_apt.yara', include_list, []))\n self.assertTrue(clone_rules._copy_required('b2/malware/file ROOTKIT.YAR', include_list, []))\n\n self.assertFalse(clone_rules._copy_required('base.yara', include_list, []))\n self.assertFalse(clone_rules._copy_required('path/to/file.txt', include_list, []))\n self.assertFalse(clone_rules._copy_required('a1/file.yara', include_list, []))", "def test_linked_list_instantiates_with_list_input():\n a = [5, 6, 7, 8]\n aa = LinkedList(a)\n # for i in a:\n # assert aa.includes(i) is False\n assert len(aa) == len(a)\n assert aa.includes(5)\n assert aa.includes(6)\n assert aa.includes(7)\n assert aa.includes(8)", "def test_empty_list(self):\n self.assertEqual(pyperry.Base.resolve_name('ChittyChittyBangBang'), [])", "def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))", "def make_skipfunc(whitelist=None, blacklist=None,\n default_blacklist=None,\n verbose=False):\n assert not (whitelist and blacklist)\n def make_set(s):\n if s is None:\n return set()\n if isinstance(s, basestring):\n s = filter(None,\n [item.strip()\n for item in s.splitlines()])\n return set(s)\n\n whitelist = make_set(whitelist)\n has_whitelist = bool(whitelist)\n\n if has_whitelist:\n has_blacklist = False\n else:\n blacklist = make_set(blacklist)\n blacklist.update(make_set(default_blacklist))\n has_blacklist = bool(blacklist)\n\n def check_whitelist(s):\n return s not in whitelist\n\n def check_blacklist(s):\n return s in blacklist\n\n def return_false(s):\n return False\n\n def check_whitelist_verbose(s):\n res = s not in whitelist\n print 'skip %r? whitelist %s --> %r' \\\n % (s, whitelist, res)\n return res\n\n def check_blacklist_verbose(s):\n res = s in blacklist\n print 'skip %r? blacklist %s --> %r' \\\n % (s, blacklist, res)\n return res\n\n def return_false_verbose(s):\n res = False\n print 'skip %r? Don\\'t skip anything --> %r' \\\n % (s, res)\n return res\n\n if verbose:\n if has_whitelist:\n return check_whitelist_verbose\n elif has_blacklist:\n return check_blacklist_verbose\n else:\n return return_false_verbose\n else:\n if has_whitelist:\n return check_whitelist\n elif has_blacklist:\n return check_blacklist\n else:\n return return_false", "def test_allowed_list(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_list, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'rightsHolder': 'INBO'}\n self.assertTrue(val.validate(document))\n document = {'rightsHolder': 'ILVO'}\n self.assertFalse(val.validate(document))\n document = {'sex': 'male'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'female'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'Female'}\n self.assertFalse(val.validate(document))\n document = {'age': 'adult'}\n self.assertTrue(val.validate(document))\n document = {'age': 'juvenile'}\n self.assertTrue(val.validate(document))\n document = {'age': 'adult | juvenile'}\n self.assertTrue(val.validate(document))\n document = {'age': 'adult|juvenile'}\n self.assertFalse(val.validate(document))", "def test_check_org_on_whitelist_false(self):\n\n org_name = 'AS15169 Google LLC'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertFalse(result)", "def test_validate(self):\n pass", "def test_break_security_group_usual_case_specify_sg():", "def test_list_identity(self):\n pass", "def test_string_to_listed():\n\n @type_checked\n def _run_test(thing:[str]=None):\n assert thing == [\"words\"]\n\n _run_test(\"words\")", "def test_URLFilter(self):\n tkns = get_tokenizer(\"en_US\", filters=(URLFilter,))(self.text)\n out = [t for t in tkns]\n exp = [(\"this\", 0), (\"text\", 5), (\"with\", 10), (\"and\", 30),\n (\"SomeLinksLike\", 34), (\"AndOthers\", 93), (\"not\", 103), (\"quite\", 108),\n (\"a\", 114), (\"url\", 116), (\"with\", 134), (\"an\", 139), (\"aemail\", 142),\n (\"address\", 149), (\"as\", 157), (\"well\", 160)]\n self.assertEqual(out, exp)", "def test_no_op(self):\n request = RequestFactory().get('/?tags=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertNotIn('tags__slug__in', filter.qs.filters)", "def test_get_list(self):\n pass", "def _allowed_components():\n pass", "def test_get_permissions(self):\n pass", "def test_containsAll(self) -> None:\n assert containsAll('43221', '123')\n assert not containsAll('134', '123')", "def test_iterable(self):\n allowed_cls = AllowedSites(defaults=['yay.com'])\n with self.assertNumQueries(1):\n self.assertTrue(validate_host('example.com', allowed_cls))\n with self.assertNumQueries(1):\n self.assertTrue(validate_host('example.org', allowed_cls))\n with self.assertNumQueries(1):\n self.assertFalse(validate_host('djangoproject.com', allowed_cls))\n # ideally this should be 0 queries, because it's a default ...\n with self.assertNumQueries(1):\n self.assertTrue(validate_host('yay.com', allowed_cls))", "def import_whitelist(str):\n pat = re.compile('[^\\s]*((from|import)\\s+)+(%s)+' % '|'.join(settings.IMPORT_WHITELIST))\n if pat.search(str):\n return True\n else:\n return False", "def sanity_check(self):\n pass", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def test_visible_blacklisted_with_string(self):\n email = '[email protected]'\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_blacklist(email)\n self.assertFalse(self.feature_test.is_visible(email))", "def test_sm_list_filter_args(\n self, filter_args, exp_names):\n\n # Add two faked storage_groups\n self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n # Execute the code to be tested\n storage_groups = storage_group_mgr.list(filter_args=filter_args)\n\n assert len(storage_groups) == len(exp_names)\n if exp_names:\n names = [p.properties['name'] for p in storage_groups]\n assert set(names) == set(exp_names)", "def ImportsTest(recipe, allowed_modules):\n\n for _, val in sorted(recipe.global_symbols.iteritems()):\n if isinstance(val, types.ModuleType):\n module_name = val.__name__\n for pattern in allowed_modules:\n if pattern.match(val.__name__):\n break\n else:\n yield ('In %s:\\n'\n ' Non-whitelisted import of %s' % (recipe.path, module_name))", "def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])", "def setWhiteList(self, entities):\n return self._set(whiteList=entities)", "def sanitize(cls):", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def test_sanitize():\n config = Configuration()\n notification = Notification(Exception(\"oops\"), config, {}, request={\"params\":{\"password\":\"secret\"}})\n\n notification.add_tab(\"request\", {\"arguments\":{\"password\":\"secret\"}})\n\n payload = notification._payload()\n\n assert(payload['events'][0]['metaData']['request']['arguments']['password'] == '[FILTERED]')\n assert(payload['events'][0]['metaData']['request']['params']['password'] == '[FILTERED]')", "def test_rules():", "def test_prefilter_check(self):\r\n def handler(event):\r\n pass\r\n\r\n self.assertRaises(Exception, self.events.register, handler, PrefilterTest_1)\r\n self.assertRaises(Exception, self.events.register, handler, PrefilterTest_2)\r\n\r\n self.events.register(handler, PrefilterTest_1, require='foo')\r\n self.events.register(handler, PrefilterTest_2, require='foo')\r\n\r\n self.events.register(handler, PrefilterTest_1, require='foo', optional='bar')\r\n self.events.register(handler, PrefilterTest_2, require='foo', optional='bar')\r\n\r\n self.assertRaises(Exception, self.events.register, handler, PrefilterTest_1,\r\n require='foo', optional='bar', fooarg='excess argument')\r\n self.events.register(handler, PrefilterTest_2,\r\n require='foo', optional='bar', fooarg='excess argument')", "def test_registry(self):\n validate_registry()" ]
[ "0.7257275", "0.69541496", "0.6669898", "0.6634256", "0.63836783", "0.61440814", "0.607428", "0.6058461", "0.60226995", "0.60136276", "0.59704053", "0.59617037", "0.5960837", "0.59211046", "0.59204555", "0.59204555", "0.5909409", "0.59081924", "0.5890749", "0.5881475", "0.587579", "0.58560747", "0.5846006", "0.5831575", "0.58279777", "0.5826971", "0.58114916", "0.5811354", "0.5759723", "0.5742072", "0.5720152", "0.5698359", "0.56767637", "0.5660055", "0.56564003", "0.5654227", "0.5637114", "0.5630287", "0.56275886", "0.5614278", "0.5610614", "0.5602538", "0.5602431", "0.55851424", "0.55841786", "0.55762994", "0.55618405", "0.5539303", "0.55378133", "0.553665", "0.55316037", "0.553011", "0.5521267", "0.5514749", "0.5513351", "0.5512478", "0.5505011", "0.5494247", "0.54898906", "0.54770106", "0.5475853", "0.5467288", "0.546478", "0.54631406", "0.5458541", "0.5456498", "0.54540133", "0.5445987", "0.5436923", "0.54329133", "0.54242736", "0.54214823", "0.5419472", "0.54143655", "0.5412535", "0.54079705", "0.54077417", "0.5407406", "0.5404712", "0.53996205", "0.5397893", "0.539435", "0.539089", "0.53905517", "0.5381177", "0.5379936", "0.53751886", "0.5372613", "0.5368594", "0.5366819", "0.5365556", "0.5360922", "0.5358932", "0.5351839", "0.534897", "0.5340298", "0.5340251", "0.5336546", "0.53327316", "0.53292227" ]
0.67256504
2
Tests that a keyvault with 0 access policies is processed properly and doesn't raise an exception.
def test_whitelist_zero_access_policies(self): p = self.load_policy({ 'name': 'test-key-vault', 'resource': 'azure.keyvault', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'glob', 'value_type': 'normalize', 'value': 'cckeyvault2*'}, {'not': [ {'type': 'whitelist', 'key': 'principalName', 'users': ['[email protected]']} ]} ] }) resources = p.run() self.assertEqual(len(resources), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_authz_file_empty_raises(self):\n self.env.config.set('authz_policy', 'authz_file', '')\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "async def test_get_access_requests_no_envars(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n await self.inst._get_access_requests(\n \"test-container\"\n )\n self.sys_exit_mock.assert_called_once()", "def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_all_sequential_open_distribution_no_access(self, has_access):\r\n has_access.return_value = False\r\n response = views.all_sequential_open_distrib(self.request, 'test/test/test')\r\n\r\n self.assertEqual(\"{\\\"error\\\": \\\"Access Denied: User does not have access to this course\\'s data\\\"}\", response.content)", "def test_invalid_access_key(self):\r\n data = {\r\n \"EdX-ID\": self.receipt_id,\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test testing:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('Access key invalid', response.content)\r\n self.assertEqual(response.status_code, 400)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_001_unauthorized_access(self):\n false_token = \"12345\"\n self.info(\"Will use token %s\", false_token)\n client = ComputeClient(self.clients.compute_url, false_token)\n client.CONNECTION_RETRY_LIMIT = self.clients.retry\n\n with self.assertRaises(ClientError) as cl_error:\n client.list_servers()\n self.assertEqual(cl_error.exception.status, 401)", "def test_assessor_access_limited(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n self.assertFalse(get_user_assessor_groups(assessor))\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions')\n ]\n urls_post_allowed = [\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403", "def test_section_problem_grade_distribution_no_access(self, has_access):\r\n has_access.return_value = False\r\n response = views.section_problem_grade_distrib(self.request, 'test/test/test', '1')\r\n\r\n self.assertEqual(\"{\\\"error\\\": \\\"Access Denied: User does not have access to this course\\'s data\\\"}\", response.content)", "def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200", "def test_call_bad_perms(self):\r\n self.assertRaises(ValueError, self.cs_overview, -1)", "def test_vault_create_authorization_for_vault_section(self):\n pass", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_all_functions_auth_failure(self):\r\n \r\n auth = {'username':'tester', 'api_key':'api_key'}\r\n\r\n # Indicate no user record was found with the provided auth info.\r\n interface.get_user_with_api_key = mock_raises_DoesNotExistError\r\n \r\n try:\r\n proxy.renew_resources(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.acquire_resources(auth, {})\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.acquire_specific_vessels(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.release_resources(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_resource_info(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_account_info(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_public_key(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")", "def test_all_problem_grade_distribution_no_access(self, has_access):\r\n has_access.return_value = False\r\n response = views.all_problem_grade_distribution(self.request, 'test/test/test')\r\n\r\n self.assertEqual(\"{\\\"error\\\": \\\"Access Denied: User does not have access to this course\\'s data\\\"}\", response.content)", "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def test_check_keys_exist_for_provider_string(self):\n\n secret_key = None\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_get_vault_pubkeys(self):\n pass", "def test_whitelist(self):\n p = self.load_policy({\n 'name': 'test-key-vault',\n 'resource': 'azure.keyvault',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'glob',\n 'value_type': 'normalize',\n 'value': 'cckeyvault1*'},\n {'not': [\n {'type': 'whitelist',\n 'key': 'principalName',\n 'users': ['[email protected]']}\n ]}\n ]\n })\n resources = p.run()\n self.assertEqual(len(resources), 1)", "def test40_check_authz(self):\n # auth disabled\n LDPHandler.no_auth = True\n h = mockedLDPHandler()\n h.check_authz(None, 'write')\n # auth enabled, no admin\n LDPHandler.no_auth = False\n h = mockedLDPHandler()\n self.assertRaises(HTTPError, h.check_authz, LDPRS('uri:a'), 'write')", "def check_vault_access(self, did, access_vault=None):\n info = self.get_vault_service(did)\n if not info:\n raise VaultNotFoundException()\n\n # INFO: no need check permission.\n # if (access_vault == VAULT_ACCESS_WR or access_vault == VAULT_ACCESS_DEL) \\\n # and info[VAULT_SERVICE_STATE] == VAULT_SERVICE_STATE_FREEZE:\n # raise ForbiddenException(msg=\"The vault can't be written.\")", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_excessive_Sigops(self):\n logging.info(\"Entered : test_excessive_Sigops \\n\")\n try:\n testExcessiveSigops(self)\n except (Exception, JSONRPCException) as e1:\n logging.info(e1)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n raise TestAssertionError({\"file_name\": fname, \"line_num\": exc_tb.tb_lineno, \\\n \"error_type\": exc_type.__name__, \"error_msg\": str( e1 ), \\\n \"n1\" : \"N/A\", \"n2\" : \"N/A\", \"amount\" : \"N/A\", \"numsig\" : \"N/A\"})", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def test_get_authz_file_notdefined_raises(self):\n self.env.config.remove('authz_policy', 'authz_file')\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)", "def test_invalid(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n with pytest.raises(AssertionError):\n key.audit(5, 1, 1, 1)", "def test_wrong_total_number_of_keys(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n policy = replace(self.policy, num_different_keys_in_all_bundles=2)\n with self.assertRaises(KSR_POLICY_KEYS_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Unacceptable number of key sets in request test, (1 keys instead of 2)\",\n str(exc.exception),\n )", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_wrong_number_of_keys_in_a_bundle(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n policy = replace(self.policy, num_keys_per_bundle=[2, 1])\n with self.assertRaises(KSR_POLICY_KEYS_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\"Bundle #1/test-1 has 1 keys, not 2\", str(exc.exception))", "def test_filter_public_permissions(self):\n data = {\"public\": \"view\"}\n check_public_permissions(data)\n\n data = {\"public\": \"edit\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"share\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)\n\n data = {\"public\": \"owner\"}\n with self.assertRaises(exceptions.PermissionDenied):\n check_public_permissions(data)", "def test_get_asgard_vaults(self):\n pass", "def test_acceptrisk(client):\n g.test_authorized_for = []\n res = client.post(\"/v0/acceptrisk\", json={\"fingerprint\": \"\", \"token\": \"\"})\n assert res.json.get(\"message\") == \"acceptrisk failed\"\n res = client.post(\"/v0/acceptrisk\", json={})\n assert res.json.get(\"status\") == \"error\"", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_modify_access_revoke_not_allowed(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'unique_student_identifier': self.other_staff.email,\r\n 'rolename': 'instructor',\r\n 'action': 'revoke',\r\n })\r\n self.assertEqual(response.status_code, 200)", "def test_deploy_policy_fail_key(self):\n\n self._check_deploy_failure(\n self._create_test_app(key='',\n flavor='m1.small'),\n 'missing key')", "def test_missing_authorize_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n node.give_message(authorize, self._mm)\n\n # OTHER wants the proof that OWNER is allowed to grant authorization to NODE\n node.give_message(other.create_missing_proof(authorize.authentication.member, authorize.distribution.global_time), other)\n\n # NODE sends dispersy-authorize containing authorize(MASTER, OWNER) to OTHER\n _, authorize = other.receive_message(names=[u\"dispersy-authorize\"]).next()\n\n permission_triplet = (self._mm.my_member.mid, u\"protected-full-sync-text\", u\"permit\")\n authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]\n self.assertIn(permission_triplet, authorize_permission_triplets)", "def test_privatize_vaults(self):\n g = Game()\n g.add_player(uuid4(), 'p0')\n g.add_player(uuid4(), 'p1')\n gs = g\n\n p0, p1 = gs.players\n\n latrine, insula, statue, road = cm.get_cards(['Latrine', 'Insula', 'Statue', 'Road'])\n p0.vault.set_content([latrine, insula])\n p1.vault.set_content([statue, road])\n\n gs_private = g.privatized_game_state_copy('p1')\n p0, p1 = gs_private.players\n\n self.assertEqual(p0.vault, Zone([Card(-1)]*2, name='vault'))\n self.assertEqual(p1.vault, Zone([Card(-1)]*2, name='vault'))", "def test_empty_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(\n key=\"\", description=\"container\", software_system=system1\n )", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(len(data), 1)", "def sanity_check(self, test_vec_handle):\n self.vec_space.sanity_check(test_vec_handle)", "def test_without_login_balance_view(self):\n response = self.client.get(reverse('account_balance', args=[self.acc.id]))\n self.assertEqual(response.status_code, 403)", "async def test_txn_list_with_zero_count(self):\n response = await self.get_assert_status('/transactions?count=0', 400)\n\n self.assert_has_valid_error(response, 53)", "def test_no_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(description=\"container\", software_system=system1)", "def test_get_yggdrasil_vaults(self):\n pass", "def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)", "def test_that_test_can_fail():\n try:\n verify_atomic_weight_for_substance(\"O2\", 1.0)\n except AssertionError as e:\n return\n\n raise AssertionError(\"test_that_test_can_fail() didn't fail\")", "def test_old_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 10, 9)\n assert key.audit_state == 'stagnant_expire'", "def test_get_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.get(\"key 1\")", "def test_unauthorized(self, req):\n req.side_effect = ks_exc.Unauthorized()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls still\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_leave_accrual_access_rights(self):\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n accrual.write({\n 'line_ids': [(0, 0, {\n 'name': 'Test',\n 'amount_cash': 100,\n 'date': datetime.now(),\n })],\n })\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_3.id).check_access_rule, 'read')\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_2.id).check_access_rights, 'write')\n\n accrual.sudo(self.user_1.id).check_access_rule('read')\n self.assertTrue(\n accrual.sudo(self.user_1.id).check_access_rights('read'))\n\n # The manager can not access the leave accruals of the employee 2\n # because he is not the employee's manager\n accrual_2 = self.employee_2.get_leave_accrual(self.leave_type.id)\n\n self.assertRaises(\n Exception,\n accrual_2.sudo(self.user_1.id).check_access_rule, 'read')\n\n self.user_1.write({\n 'groups_id': [(4, self.ref('base.group_hr_manager'))]})\n\n for operation in ['read', 'write', 'create', 'unlink']:\n accrual_2.sudo(self.user_1.id).check_access_rule(operation)\n self.assertTrue(\n accrual_2.sudo(self.user_1.id).check_access_rights(operation))", "def test_get_authz_file_notfound_raises(self):\n authz_file = os.path.join(self.env.path, 'some-nonexistent-file')\n self.env.config.set('authz_policy', 'authz_file', authz_file)\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)", "def test_access_positive(self, api):\n self.builder.add_user(api.get_user())\n self.builder.upd_access(api.get_user(), False)\n r1 = api.access_user(api.get_user(), True)\n access_true = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_true == 1\n assert r1.status_code == 200", "def testOperationsWithoutLock(self):\n self.assertRaises(RuntimeError, self._lock.Unlock)\n self.assertRaises(RuntimeError, self._lock.SetInUse, True)\n self.assertRaises(RuntimeError, self._lock.SetInUse, False)", "def test_no_permissions(self):\n\n login(self.client)\n\n client = create_client('test')\n client.write_access = False\n client.save()\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': 3})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def testValidClientApprovalAllowsAccessToEverythingInsideClient(self):\n self.InitDefaultRouter()\n\n client_id = self.SetupClient(0)\n gui_test_lib.CreateFileVersion(client_id, \"fs/os/foo\")\n\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n self.RequestAndGrantClientApproval(client_id, requestor=self.test_username)\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n # Move the clocks forward to make sure the approval expires.\n with test_lib.FakeTime(\n rdfvalue.RDFDatetime.Now() + config.CONFIG[\"ACL.token_expiry\"],\n increment=1e-3):\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()", "def test_assessor_access_normal(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n # add the assessor to the assessment group\n self.assertTrue(Assessment.objects.filter(application=self.application).count() > 0)\n for assessment in Assessment.objects.filter(application=self.application):\n add_assessor_to_assessor_group(assessor, assessment.assessor_group)\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions'),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_allowed = [\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_tenant_secret_page_on_root_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.tenant_root_domain)\n self.assertEqual(response.status_code, 403)", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "async def test_get_access_requests_no_os_envars(self):\n with self.sys_exit_patch, \\\n self.patch_init_request_client_error, \\\n self.os_environ_get_patch:\n with self.assertRaises(SystemExit):\n await self.inst._get_access_requests(\n \"test-container\"\n )", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_audit_only_not_expired(self):\n CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1, tzinfo=UTC))\n audit_only_course = CourseFactory.create()\n self.create_user_for_course(audit_only_course, CourseUserType.ENROLLED)\n response = self.client.get(course_home_url(audit_only_course))\n assert response.status_code == 200\n self.assertContains(response, TEST_COURSE_TOOLS)\n self.assertNotContains(response, TEST_BANNER_CLASS)", "def test_enable_missing_svn_access(self):\n svn = SpokeSVN(self.org_name, self.user_id)\n svn.delete(self.svn_repo_name)\n self.assertRaises(error.NotFound, svn.modify, enable=True)", "def test_tally_no_votes(self):\n self.populate_database()\n self.electionA.elect_open = False\n with self.assertRaises(NoVotes):\n self.wta.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.proportional.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.schulze.check_race(self.raceA.id)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_view_acls(self):\n v1, v2, v3 = set_resources_and_sync([\n make_video(\n media_id='123', acl=['USER_spqr1', 'USER_abcd1', 'INST_botolph', 'GROUP_1234']),\n make_video(media_id='456', acl=['WORLD']),\n make_video(media_id='789', acl=['CAM']),\n ])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n i3 = mpmodels.MediaItem.objects.get(jwp__key=v3.key)\n\n self.assertEqual(i1.view_permission.crsids, ['spqr1', 'abcd1'])\n self.assertEqual(i1.view_permission.lookup_groups, ['1234'])\n self.assertEqual(i1.view_permission.lookup_insts, ['botolph'])\n self.assertFalse(i1.view_permission.is_public)\n self.assertFalse(i1.view_permission.is_signed_in)\n\n self.assertEqual(i2.view_permission.crsids, [])\n self.assertEqual(i2.view_permission.lookup_groups, [])\n self.assertEqual(i2.view_permission.lookup_insts, [])\n self.assertTrue(i2.view_permission.is_public)\n self.assertFalse(i2.view_permission.is_signed_in)\n\n self.assertEqual(i3.view_permission.crsids, [])\n self.assertEqual(i3.view_permission.lookup_groups, [])\n self.assertEqual(i3.view_permission.lookup_insts, [])\n self.assertFalse(i3.view_permission.is_public)\n self.assertTrue(i3.view_permission.is_signed_in)", "def test_basic_acl(self):\n v1, = set_resources_and_sync([make_video(acl=[], media_id='1234')])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n\n self.assertEqual(i1.view_permission.crsids, [])\n self.assertEqual(i1.view_permission.lookup_groups, [])\n self.assertEqual(i1.view_permission.lookup_insts, [])\n self.assertFalse(i1.view_permission.is_public)\n self.assertFalse(i1.view_permission.is_signed_in)", "def test_no_api_key(self):\n\n self.assertRaises(Exception, kaput.init, None, '123')", "def test_normal(self):\n get_response = lambda: self.client.get(self.url)\n\n self.assert_authentication_required(get_response)\n\n # regular users have no access\n self.login_as(\"bob\")\n with self.assertNumQueries(2):\n self.assert_not_authorized(get_response())\n\n # superuser has access\n self.login_as(\"admin\")\n with self.assertNumQueries(3):\n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), self.num_communities)\n self.assertListEqual(list(response.data[0].keys()), self.expected_keys)", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def test_modify_access_noparams(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 400)", "def test_parse_authz_empty(self):\n create_file(self.authz_file, '')\n authz_policy = AuthzPolicy(self.env)\n authz_policy.parse_authz()\n self.assertEqual([], authz_policy.authz.sections())", "def test_wrong_permission(self):\n with self.assertRaises(InvalidPermissionStringError):\n client_has_permission('test', 'asdf')", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def test_falsepositive_no_token_passed(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/falsepositive?fp=splunk_82998ef6bb3db9dff3dsfdsfsdc\")\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def testExpiredClientApprovalIsNoLongerValid(self):\n self.InitDefaultRouter()\n\n client_id = self.SetupClient(0)\n gui_test_lib.CreateFileVersion(client_id, \"fs/os/foo\")\n\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n with test_lib.FakeTime(100.0, increment=1e-3):\n self.RequestAndGrantClientApproval(\n client_id, requestor=self.test_username)\n\n # This should work now.\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n token_expiry = config.CONFIG[\"ACL.token_expiry\"]\n\n # Make sure the caches are reset.\n self.ClearCache()\n\n # This is close to expiry but should still work.\n with test_lib.FakeTime(100.0 + token_expiry - 100.0):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n # Make sure the caches are reset.\n self.ClearCache()\n\n # Past expiry, should fail.\n with test_lib.FakeTime(100.0 + token_expiry + 100.0):\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()", "def test_revoked_cert(self):\n\n # Initially should be able to to operations like open a session\n self._open_session()\n HttpAgentRpc().remove_host(self.host.fqdn)\n\n # After revokation any access should be bounced\n response = self._post([])\n self.assertEqual(response.status_code, 403)\n response = self._get()\n self.assertEqual(response.status_code, 403)", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def test_disable_missing_svn_access(self):\n svn = SpokeSVN(self.org_name, self.user_id)\n svn.delete(self.svn_repo_name)\n self.assertRaises(error.NotFound, svn.modify, enable=False)", "def test_change_provisioned_throughput_usual_case():", "def test_private_key_not_set():\n\n loop = asyncio.get_event_loop()\n with aioresponses() as m:\n m.post('https://api.idex.market/returnNextNonce', payload=nonce_res, status=200)\n m.post('https://api.idex.market/cancel', payload=json_res, status=200)\n\n async def _run_test():\n client = await AsyncClient.create(api_key, address)\n with pytest.raises(IdexPrivateKeyNotFoundException):\n await client.cancel_order('0xcfe4018c59e50e0e1964c979e6213ce5eb8c751cbc98a44251eb48a0985adc52')\n\n loop.run_until_complete(_run_test())", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_withdraw_no_jwt(client):\n response = client.post(WITHDRAW_PATH, follow=True)\n assert response.status_code == 403\n assert response.json() == {\"error\": \"JWT must be passed as 'Authorization' header\"}", "def verify_that_the_acl_was_not_set_to_rtacltest3(driver):\n assert wait_on_element(driver, 5, f'//div[contains(text(),\"rt-acl-test-1\")]//button', 'clickable')\n driver.find_element_by_xpath(f'//div[contains(text(),\"rt-acl-test-1\")]//button').click()\n time.sleep(3)\n assert wait_on_element(driver, 5, f'//tr[contains(.,\"rt-acl-test-3\")]//mat-icon[text()=\"more_vert\"]', 'clickable')\n driver.find_element_by_xpath(f'//tr[contains(.,\"rt-acl-test-3\")]//mat-icon[text()=\"more_vert\"]').click()\n time.sleep(1)\n assert wait_on_element(driver, 5, '//button[normalize-space(text())=\"View Permissions\"]')\n driver.find_element_by_xpath('//button[normalize-space(text())=\"View Permissions\"]').click()\n assert wait_on_element(driver, 5, '//div[contains(text(),\"User - games\")]') is False" ]
[ "0.615517", "0.6075056", "0.6071155", "0.6068709", "0.59903747", "0.5970702", "0.59659946", "0.5951002", "0.5945882", "0.5927157", "0.59155506", "0.5856525", "0.58519167", "0.5801739", "0.5789592", "0.5782919", "0.5781346", "0.577808", "0.5775319", "0.576997", "0.57695985", "0.5769285", "0.5766115", "0.573969", "0.57388294", "0.57350534", "0.5731577", "0.57238805", "0.57086813", "0.5697845", "0.5694458", "0.56939673", "0.56829107", "0.56751513", "0.56662726", "0.56633824", "0.5657074", "0.5653288", "0.5653288", "0.56448287", "0.5624394", "0.5622021", "0.5615138", "0.5613062", "0.5604899", "0.55940604", "0.5593928", "0.5588812", "0.5582449", "0.557949", "0.55646396", "0.556", "0.5544055", "0.55391586", "0.55342096", "0.5522261", "0.5518573", "0.5513411", "0.5510242", "0.5504686", "0.5496956", "0.54963905", "0.5492255", "0.5488937", "0.5483468", "0.5482448", "0.5480292", "0.5479991", "0.54769194", "0.547548", "0.54744875", "0.5472958", "0.54701823", "0.5469658", "0.54688215", "0.54596126", "0.54456174", "0.5443084", "0.5433524", "0.5433524", "0.54295224", "0.54253787", "0.5419644", "0.54186624", "0.541745", "0.5416812", "0.5414576", "0.54108465", "0.5409741", "0.54075533", "0.5405899", "0.54034257", "0.539362", "0.5390045", "0.5386033", "0.53806555", "0.5379776", "0.5374928", "0.5374596", "0.53717935" ]
0.7272632
0
Filters a list of elements. 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'elements' is the list of elements to filter. Returns a list containing only those elements for which 'select' returns True.
def filter(self, viewer, parent, elements): return [e for e in elements if self.select(viewer, parent, e)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(self, viewer, parent, element):\n\n return True", "def filter_by_reviewers(reviews, selected_reviewers):\n return [x for x in reviews if x.reviewer in selected_reviewers]", "def validate(elements):\n return list(filter(lambda el: el.is_valid, elements))", "def select(elements, val=True):\n for el in elements:\n el.select_set(val)", "def hide_show_elements(driver: webdriver, elements: list, hide: bool = None) -> None:\n for element_locator in elements:\n locator_type, locator_value = element_locator\n element_list = get_element(driver, locator_value, locator_type, many=True)\n if element_list:\n for element in element_list:\n display_element(driver, element, hide)", "def order_filter(self,elements):", "def find_elements(self, locator, parent=None):\n return self._element_finder.find(locator, first_only=False,\n required=False, parent=parent)", "def find_elements(self, elements: List[WebElement]) -> List[WebElement]:\n return elements", "def filter(self, *args):\n return _libsbml.ElementFilter_filter(self, *args)", "def query_parent(selectors, tree_item):\n return [subitem for subitem in iterate_parent(tree_item)\n if all(selectors, subitem)]", "def filter_selection_set(info: GraphQLResolveInfo):\n from graphql import Location\n from .pyutils import unfreeze\n\n excluded_field_nodes = []\n\n def _should_include(field_node: FieldNode):\n if not field_node.name:\n # Unknown field_node type\n return True\n if field_node.name.value == \"subscription_id\":\n return True\n\n # Location is a highly nested AST type\n excluded_field_nodes.append(unfreeze(field_node, ignore_types=[Location]))\n return False\n\n info.field_nodes[0].selection_set.selections = [\n x for x in info.field_nodes[0].selection_set.selections if _should_include(x)]\n\n return excluded_field_nodes", "def filter(self, row):\r\n return list(itertools.compress(row, self.selectors))", "def filter(self, filters):", "def selectAll(self,parent):\n\t\tif parent.IsOk() and self.tree.ItemHasChildren(parent):\n\t\t\tchild, cookie = self.tree.GetFirstChild(parent)\n\t\t\twhile child:\n\t\t\t\tobj = self.tree.GetPyData(child)\n\t\t\t\tselect = obj != \"1\" and obj != \"2\"\n\t\t\t\tif child not in self.tree.GetSelections() and select:\n\t\t\t\t\tself.tree.SelectItem(child)\n\t\t\t\tif self.tree.ItemHasChildren(child):\n\t\t\t\t\tself.selectAll(child)\n\t\t\t\tchild = self.tree.GetNextSibling(child)", "def clean_all_filters(driver, selector):\n filters_buttons = driver.find_elements_by_css_selector(selector)\n\n if not filters_buttons:\n return\n\n filters_buttons[0].click()\n driver.implicitly_wait(1)\n\n if len(filters_buttons) > 1:\n clean_all_filters(driver, selector)", "def filter_none(elems):\n return [x for x in elems if x is not None]", "def query(selectors, tree_item):\n return [subitem for subitem in iterate_item(tree_item)\n if all(selectors, subitem)]", "def find_elements_inside_element(self, parent_element: Union[WebElement, Tuple[By, str]],\n children_element_locator: Tuple[By, str], wait_time=10,\n skip_exception=False) -> List[WebElement]:\n parent_element = self.find_element(parent_element)\n for i in range(wait_time):\n by_type, value = children_element_locator\n if by_type == By.CSS_SELECTOR:\n children = parent_element.find_elements_by_css_selector(value)\n elif by_type == By.XPATH:\n children = parent_element.find_elements_by_xpath(value)\n else:\n children = parent_element.find_elements(children_element_locator)\n if len(children):\n return children\n time.sleep(1)\n else:\n if not skip_exception:\n raise TimeoutException(f'Elements was not found in {wait_time} seconds')\n return []", "def filter(self, cls):\n return ElementList([x for x in self._elements if isinstance(x, cls)])", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def filter_func(self, agents):\n return [\n agent for agent in agents\n if agent.energy < self.model.energy_threshold and not agent.pack\n ]", "def node_type_filter(node_list, *filter_types):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.node_type_filter\")\n\n flg.info(\"Filtering Node List\")\n\n filtered_list = []\n for node in node_list:\n node_type = mc.nodeType(node)\n flg.debug(\"Node, {0}, is of type, {1}\".format(node, node_type))\n if node_type not in filter_types:\n flg.debug(\"Node kept\")\n filtered_list.append(node)\n else:\n flg.debug(\"Node filtered\")\n flg.info(\"Returning Filtered List\")\n return filtered_list", "def filter_to(self, samples):\n sample_set = set(samples)\n\n filtered_trios = []\n for trio in self._trios:\n restricted_trio = trio._restrict_to(sample_set)\n if restricted_trio is not None:\n filtered_trios.append(restricted_trio)\n\n return Pedigree(filtered_trios)", "def select_from(pav_cfg,\n paths: Iterable[Path],\n filter_func: Callable[[Any], bool] = default_filter,\n transform: Callable[[Path], Any] = None,\n order_func: Callable[[Any], Any] = None,\n order_asc: bool = True,\n fn_base: int = 10,\n limit: int = None) -> (List[Any], List[Path]):\n\n paths = list(paths)\n max_threads = min(pav_cfg.get('max_threads', 1), len(paths))\n\n selector = partial(select_one, ffunc=filter_func, trans=transform,\n ofunc=order_func, fnb=fn_base)\n\n if max_threads > 1:\n with ThreadPoolExecutor(max_workers=max_threads) as pool:\n selections = pool.map(selector, paths)\n else:\n selections = map(selector, paths)\n\n selected = [(item, path) for item, path in zip(selections, paths)\n if item is not None]\n\n if order_func is not None:\n selected.sort(key=lambda d: order_func(d[0]), reverse=not order_asc)\n\n return SelectItems(\n [item[0] for item in selected][:limit],\n [item[1] for item in selected][:limit])", "def filter_list(self, node_list):\n filtered_list = []\n for node in node_list:\n if self.is_member(node):\n filtered_list.append(node)\n return filtered_list", "def get_selected_elements(doc):\n try:\n # Revit 2016\n return [doc.GetElement(id)\n for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]\n except:\n # old method\n return list(__revit__.ActiveUIDocument.Selection.Elements)", "def filter_(self, ancestors, filter_, matches):\n\n compounds = self.data_object.get_compound()\n\n node_stack = stack(self.data_object, ancestors)\n\n for compound in compounds:\n\n compound_finder = self.item_finder_factory.create_finder(compound)\n compound_finder.filter_(node_stack, filter_, matches)", "def elements(xpath_selection):\n driver = Driver().connect()\n return driver.find_elements_by_xpath(xpath_selection)", "def filter_selected_nodes(tree) -> list:\n return [n for n in tree.nodes if n.select and n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}]", "def filter_nodes(self, node_filter, parent=None):\n if self.data is None:\n return None\n\n if parent is None:\n return self.data.xpath(node_filter)\n else:\n return parent.xpath(node_filter)", "def filter_element_and(mt_list, elem_list):\r\n return [mt for mt in mt_list if all(e in mt['pretty_formula'] for e in elem_list)]", "def filter_element_or(mt_list, elem_list):\r\n return [mt for mt in mt_list if any(e in mt['pretty_formula'] for e in elem_list)]", "def filter(self, op):\n def op_filter(seqs):\n r = [s for s in seqs if op(s)]\n if len(r) == 0:\n return None\n else:\n return r\n return self.element_wise(op_filter)", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def parent( self, selector = None ): \n tmpList = []\n for node in self.nodeList:\n if node.parentNode:\n tmpList += self.getUniqueNodes( tmpList, [ node.parentNode ] )\n if selector:\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self ).filter( selector )\n else:\n tmpList = sorted( tmpList, key = lambda x: x.pos )\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList,self)", "def selectElements(self, f, elements):\n if isinstance(elements, types.StringTypes):\n m = self.elementIndex(elements)\n return f[m]\n if elements:\n fs = []\n k = 0\n for s in elements:\n k = self.elementIndex(s)\n fs.append(f[k])\n return asarray(fs)\n else:\n return asarray(f)", "def process_elements_hook_pass_everything(\n state: ProcessingState, # pylint: disable=unused-argument\n elements: rght.Elements,\n) -> rght.Elements:\n return elements", "def filter_filenames(filenames, filters, inverse=False):\n out = []\n for filename in filenames:\n for filt in filters:\n if (filt not in filename) + (inverse) == 1:\n break\n else:\n out.append(filename)\n return out", "def parents( self, selector = None ): \n tmpList = []\n for node in self.nodeList:\n if not node.ancestorList:\n node.generateAncestorList()\n tmpList += self.getUniqueNodes( tmpList, node.ancestorList )\n if selector:\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self ).filter( selector )\n else:\n tmpList = sorted( tmpList, key = lambda x: x.pos ) \n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self)", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def filter(self, included_suites=None, included_tests=None,\n included_tags=None, excluded_tags=None):\n self.visit(Filter(included_suites, included_tests,\n included_tags, excluded_tags))", "def get_direct_inputs(self, ignore_types=None):\n inputs = []\n # Stright from self.inputs:\n if not ignore_types:\n return self.inputs\n else:\n # Type check for ignore_types list/tuple:\n if not hasattr(ignore_types, '__iter__'):\n if inspect.isclass(ignore_types):\n ignore_types = (ignore_types,)\n else:\n raise TypeError(\"ignore_types must be either class of list of classes.\")\n # Return either filtered inputs or go recurently further to\n # find child which passes filter test.\n for action in self.inputs:\n if True in [isinstance(action, type) for type in ignore_types]:\n inputs += action.get_direct_inputs(ignore_types=ignore_types)\n else:\n inputs += [action]\n\n return inputs", "def web_elements(self):\n if isinstance(self._selector, tuple):\n return self._driver.find_elements(*self._selector)", "def validateSelect(nodes=None, minimum=0, maximum=0, find=None, parent=False, display=pm.error):\n # If user chooses not to display anything, we must pass an empty function\n if not display:\n\n def _nothing(*args):\n pass # using a function instead of a lambda one-liner because PEP-8\n\n display = _nothing\n\n if not nodes:\n nodes = pm.selected()\n\n if find and not parent:\n nodes = pm.ls(nodes, type=find)\n\n if not nodes and find:\n nodes = pm.ls(type=find)\n\n if parent:\n nodes = list({node.getParent() for node in nodes})\n\n if not nodes:\n display('Nothing selected!')\n return []\n\n if len(nodes) < minimum:\n display('Not enough selected. Please select at least ' + str(minimum) + ' objects.')\n return []\n\n if 1 < maximum < len(nodes):\n display('Too many objects selected. Please select up to ' + str(maximum) + ' objects.')\n return []\n\n return nodes", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def filter(self, viewlets):\n return [(name, viewlet) for name, viewlet in viewlets\n if isAvailable(viewlet)]", "def filter(self, record):\n\n if self.config[\"filter\"].get(\"whitelist\"):\n return any(name.filter(record) for name in self.parse_list)\n return not any(name.filter(record) for name in self.parse_list)", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def by_their(\n self,\n selector: Union[str, Tuple[str, str], Callable[[Element], Element]],\n condition: Condition[Element],\n ) -> Collection:\n\n def find_in(parent: Element) -> Element:\n if callable(selector):\n return selector(parent)\n else:\n return parent.element(selector)\n\n return self.by(lambda it: condition(find_in(it)))", "def select(self, test):\n survivors = []\n for particle in self.particles:\n # Find the originating particle\n parent = particle\n while parent.origin is not None:\n parent = parent.origin.initial_state[0]\n if test(parent, particle) is True:\n survivors.append(particle)\n return ParticleCollection(survivors)", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def filter(self, func: Callable[[T], bool]) -> 'List[T]':\n return [v for v in self.array if func(v)]", "def recursive_filter(filters, tiddlers):\n if len(filters) == 0:\n return tiddlers\n filter = filters.pop(0)\n try:\n return recursive_filter(filters, filter(tiddlers))\n except AttributeError, exc:\n raise FilterError('malformed filter: %s' % exc)", "def filter_nodes(request):\n\tmodel_dict = {\"player\": {'qs': Player.objects.all(), 'SerClass': PlayerSerializer},}\n\n\tlist_country_query = [e for e in ['player_country', 'band_country', 'venue_country', 'festival_country', 'album_country'] if e in request.query_params.keys()]\n\t# check that the nodes in the list_country_query are also selected !\n\tlist_country_query = [e for e in list_country_query if e[:-8] in ['player']]\n\n\t# filter each country\n\tif list_country_query != []:\n\t\tfor country_q in list_country_query:\n\t\t\t\n\t\t\t# for player_countries: treat it as a list so you query several countries at once\n\t\t\tif (country_q == 'player_country') and (hasattr(request.query_params, \"getlist\")):\n\t\t\t\tquery_list_countries = request.query_params.getlist('player_country')\n\t\t\t\t# have several countries in the filter\n\t\t\t\tQ_query_filter = Q()\n\t\t\t\tfor ql in query_list_countries:\n\t\t\t\t\tQ_query_filter |= Q(country=ql)\n\t\t\t\tmodel_dict[country_q[:-8]]['qs'] = model_dict[country_q[:-8]]['qs'].filter(Q_query_filter)\n\n\t\t\t# for everything else: only filter one option at a time\n\t\t\telse:\n\t\t\t\tmodel_dict[country_q[:-8]]['qs'] = model_dict[country_q[:-8]]['qs'].filter(country=request.query_params[country_q])\n\telse:\n\t\tpass\n\n\tlist_filter_query = [e for e in ['instrument', 'active', 'name'] if e in request.query_params.keys()]\n\tif 'name' in request.query_params.keys():\n\t\tfor k, v in model_dict.items():\n\t\t\tv['qs'] = v['qs'].filter(name__startswith=request.query_params['name'])\n\t\n\tif 'instrument' in request.query_params.keys():\n\t\t# build the query filter so that you can query several instruments at once\n\t\tQ_query_filter = Q()\n\t\tfor ql in request.query_params.getlist('instrument'):\n\t\t\tQ_query_filter |= Q(instrument__name=ql)\n\t\tmodel_dict['player']['qs'] = model_dict['player']['qs'].filter(Q_query_filter)\n\n\tif 'active' in request.query_params.keys():\n\t\tfor k, v in model_dict.items():\n\t\t\tv['qs'] = v['qs'].filter(isactive=request.query_params['active'])\n\treturn model_dict", "def all(self):\n\t\timport revitron\n\t\tdb = revitron.DB\n\t\tf = db.LogicalOrFilter(\n\t\t db.ElementIsElementTypeFilter(False),\n\t\t db.ElementIsElementTypeFilter(True)\n\t\t)\n\n\t\tself.collector = self.collector.WherePasses(f)\n\t\treturn self", "def tableSelFieldsFilter(tdata, columns):\n\tif areAllFieldsIncluded(tdata[0], columns):\n\t\tntdata = tdata\n\telse:\n\t\tntdata = list()\n\t\tfor rec in tdata:\n\t\t\t#print(rec)\n\t\t\t#print(columns)\n\t\t\tnrec = extractList(rec, columns)\n\t\t\tntdata.append(nrec)\n\treturn ntdata", "def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst", "def get_list_if_visible(self, selector, no_highlight=False):\n l = self.get_list(selector, no_highlight=no_highlight)\n return [e for e in l if e.is_displayed()]", "def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)", "def get_filterable_queryset(self):\n site = self.get_site()\n\n if not site:\n return self.get_model_class().objects.none()\n\n queryset = self.get_model_class().objects.in_site(site).live()\n\n filterable_list_block = self.get_filterable_list_wagtail_block()\n if filterable_list_block is None:\n return queryset\n\n if filterable_list_block.value['filter_children']:\n queryset = queryset.child_of(self)\n elif filterable_list_block.value['filter_siblings']:\n queryset = queryset.sibling_of(self)\n\n return queryset", "def filter_generic(mt_list, func):\r\n return [mt for mt in mt_list if func(mt)]", "def type_filter(self, items, types=None):\n if not types:\n return items\n allowed_items = []\n for item in items:\n if item.portal_type not in types:\n continue\n allowed_items.append(item)\n return allowed_items", "def parent(self, parent_object, limit_parent_language=True):\n lookup = get_parent_lookup_kwargs(parent_object)\n\n # Filter the items by default, giving the expected \"objects for this parent\" items\n # when the parent already holds the language state.\n if limit_parent_language:\n language_code = get_parent_language_code(parent_object)\n if language_code:\n lookup[\"language_code\"] = language_code\n\n return self.filter(**lookup)", "def vm_filter(self):\r\n vm_filter = []\r\n if self._vmFilter:\r\n subclient_filter = self._vmFilter\r\n if 'children' in subclient_filter:\r\n children = subclient_filter['children']\r\n vm_filter = self._get_content_list(children)\r\n return vm_filter", "def select_from_list_by_text(self, locator, *texts):\n if not texts:\n raise ValueError(\"No texts given.\")\n items_str = \"text(s) '%s'\" % \", \".join(texts)\n self._info(\"Selecting %s from list '%s'.\" % (items_str, locator))\n select = self._get_select_list(locator)\n for text in texts:\n select.select_by_visible_text(text)", "def filter(self, resource_manager, parent_ids=None, **params):\n m = self.resolve(resource_manager.resource_type)\n if resource_manager.get_client:\n client = resource_manager.get_client()\n else:\n client = local_session(self.session_factory).client(m.service)\n\n enum_op, path, extra_args = m.enum_spec\n if extra_args:\n params.update(extra_args)\n\n parent_type, parent_key, annotate_parent = m.parent_spec\n parents = self.manager.get_resource_manager(parent_type)\n if not parent_ids:\n parent_ids = []\n for p in parents.resources(augment=False):\n if isinstance(p, str):\n parent_ids.append(p)\n else:\n parent_ids.append(p[parents.resource_type.id])\n\n # Bail out with no parent ids...\n existing_param = parent_key in params\n if not existing_param and len(parent_ids) == 0:\n return []\n\n # Handle a query with parent id\n if existing_param:\n return self._invoke_client_enum(client, enum_op, params, path)\n\n # Have to query separately for each parent's children.\n results = []\n for parent_id in parent_ids:\n merged_params = self.get_parent_parameters(params, parent_id, parent_key)\n subset = self._invoke_client_enum(\n client, enum_op, merged_params, path, retry=self.manager.retry)\n if annotate_parent:\n for r in subset:\n r[self.parent_key] = parent_id\n if subset and self.capture_parent_id:\n results.extend([(parent_id, s) for s in subset])\n elif subset:\n results.extend(subset)\n return results", "def locate_elements(self, selector):\n if ',' not in selector:\n return self.base_driver.find_elements_by_id(selector)\n\n selector_by = selector.split(',')[0].strip()\n selector_value = selector.split(',')[1].strip()\n\n if selector_by == \"i\" or selector_by == 'id':\n elements = self.base_driver.find_elements_by_id(selector_value)\n elif selector_by == \"n\" or selector_by == 'name':\n elements = self.base_driver.find_elements_by_name(selector_value)\n elif selector_by == \"c\" or selector_by == 'class_name':\n elements = self.base_driver.find_elements_by_class_name(selector_value)\n elif selector_by == \"l\" or selector_by == 'link_text':\n elements = self.base_driver.find_elements_by_link_text(selector_value)\n elif selector_by == \"p\" or selector_by == 'partial_link_text':\n elements = self.base_driver.find_elements_by_partial_link_text(selector_value)\n elif selector_by == \"t\" or selector_by == 'tag_name':\n elements = self.base_driver.find_elements_by_tag_name(selector_value)\n elif selector_by == \"x\" or selector_by == 'xpath':\n elements = self.base_driver.find_elements_by_xpath(selector_value)\n elif selector_by == \"s\" or selector_by == 'css_selector':\n elements = self.base_driver.find_elements_by_css_selector(selector_value)\n else:\n raise NameError(\"Please enter a valid type of targeting elements.\")\n\n return elements", "def filter_list(data: List[dict], field: str, selected: List[str]):\n if len(selected):\n return [x for x in data if x[field] in selected]\n else:\n return data", "def only_vsources(comp_list):\n return filter(lambda comp: isinstance(comp, components.VoltageSource), comp_list)", "def filterData(records):\n def isInteresting(record):\n if record[VO_ISSUER] in ('/DC=ch/DC=cern/OU=computers/CN=voms.cern.ch', '/DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch'):\n return True\n if record[VO_NAME] in ('atlas', 'cms', 'alice'):\n return True\n if record[USERSN] == '/C=SI/O=SiGNET/O=IJS/OU=F9/CN=Andrej Filipcic':\n return True\n if record[USERSN] in ('aliprod', '/aliprod'):\n return True\n if record[USERSN].startswith(ALIEN_USER_PREFIX):\n return True\n\n return False\n\n return [ r for r in records if isInteresting(r) ]", "def extract_filter_list(self, filter_type, elements):\n titleLabel = QLabel(filter_type)\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n for element in elements:\n nextLabel = QLabel(element)\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter/3), counter % 3, alignment=Qt.AlignCenter)\n counter += 1", "def _filter_for_panel( item, item_type, filters, context ):\n def _apply_filter( filter_item, filter_list ):\n for filter_method in filter_list:\n try:\n if not filter_method( context, filter_item ):\n return False\n except Exception as e:\n raise MessageException( \"Toolbox filter exception from '%s': %s.\" % ( filter_method.__name__, e ) )\n return True\n if item_type == panel_item_types.TOOL:\n if _apply_filter( item, filters[ 'tool' ] ):\n return item\n elif item_type == panel_item_types.LABEL:\n if _apply_filter( item, filters[ 'label' ] ):\n return item\n elif item_type == panel_item_types.SECTION:\n # Filter section item-by-item. Only show a label if there are\n # non-filtered tools below it.\n\n if _apply_filter( item, filters[ 'section' ] ):\n cur_label_key = None\n tools_under_label = False\n filtered_elems = item.elems.copy()\n for key, section_item_type, section_item in item.panel_items_iter():\n if section_item_type == panel_item_types.TOOL:\n # Filter tool.\n if _apply_filter( section_item, filters[ 'tool' ] ):\n tools_under_label = True\n else:\n del filtered_elems[ key ]\n elif section_item_type == panel_item_types.LABEL:\n # If there is a label and it does not have tools,\n # remove it.\n if cur_label_key and ( not tools_under_label or not _apply_filter( section_item, filters[ 'label' ] ) ):\n del filtered_elems[ cur_label_key ]\n\n # Reset attributes for new label.\n cur_label_key = key\n tools_under_label = False\n\n # Handle last label.\n if cur_label_key and not tools_under_label:\n del filtered_elems[ cur_label_key ]\n\n # Only return section if there are elements.\n if len( filtered_elems ) != 0:\n copy = item.copy()\n copy.elems = filtered_elems\n return copy\n\n return None", "def Filter(sourcesarray, match, include=1):\n if include:\n return Array(*[item for item in sourcesarray if item.find(match) > -1])\n else:\n return Array(*[item for item in sourcesarray if item.find(match) == -1])", "def test_driver_filtering(instr_task_workbench, instr_view):\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_drivers(p._profiles['fp1'].model.drivers)\n assert len(filtered) == 2\n\n pt = instr_task_workbench.get_plugin('ecpy.tasks')\n del pt._tasks.contributions['ecpy.InstrumentTask'].interfaces\n filtered = instr_view.filter_drivers(p._profiles['fp1'].model.drivers)\n assert len(filtered) == 1", "def get_many(self, selectors, parent=[], parameters=None):\n if not parameters:\n parameters = self._parameters\n\n for e in parameters:\n param = Parameter(e)\n parent.append(param['name'])\n\n if param['children'] and any_list_contains(selectors, parent):\n self.get_many(selectors, parent, param['children'])\n '''\n \n if param['name'] in field or '*' in field:\n if fields:\n ret = self.get('/'.join(fields), conditions,\n param['children'])\n if ret:\n results.append(ret)\n else:\n if not self.test_conditions(param, conditions):\n continue\n\n if field.endswith('eng'):\n results.append(param['eng'])\n elif field.endswith('name'):\n results.append(param['name'])\n else:\n results.append(param['raw_int'])\n return results\n '''", "def filter_macro(out, scope, args, children):\n len(args) == 2 or syntax_error(\"'filter' macro takes exactly 2 arguments.\")\n regex, l = args\n if not isinstance(l, list):\n syntax_error(\"Invalid list in 'filter' macro: '%s'\" % str(list))\n if not isinstance(regex, str):\n syntax_error(\"Invalid regex in 'filter' macro: '%s'\" % str(regex))\n def match(s):\n return re.search(regex, s)\n return list(filter(match, l))", "def filter(self, hierarchy: List[str]) -> bool:", "def soft_assert_bulk_verify_filter_ui_elements(modal, soft_assert):\n filter_section_element = modal.filter_section.expand()\n soft_assert.expect(\n filter_section_element.reset_to_default_button.exists,\n \"'Reset to Default' button should be displayed in filter section.\")\n soft_assert.expect(\n filter_section_element.get_state_filter_options() == [\n 'Select All', 'In Review'],\n \"Filter should contain exactly 2 options: 'Select All', 'In Review'.\")\n expected_filters = [{\"attr_name\": \"Verifiers\", \"compare_op\": \"Contains\",\n \"value\": users.current_user().email}]\n soft_assert.expect(\n filter_section_element.get_filters_dicts() == expected_filters,\n \"Modal should contain default filter for current user as a verifier.\")", "def _filter_entries(self, entries):\n entries = super()._filter_entries(entries)\n if self._filter_categories:\n return list(filter(lambda entry:\n entry.category in self._filter_categories,\n entries))\n return entries", "def filter_all(_):\n return True", "def type_filter(self, items, types=None):", "def create_filters_selects_elements() -> html.Div:\n\n geographical_data = DEPARTEMENTS_GEOGRAPHICAL_DATA\n waste_nomenclature = get_waste_code_hierarchical_nomenclature()\n\n geographical_data = geographical_data.to_dict(as_series=False)\n options = [\n {\"value\": a, \"label\": b}\n for a, b in zip(\n geographical_data[\"code_departement\"], geographical_data[\"libelle\"]\n )\n ]\n\n options.insert(0, {\"value\": \"all\", \"label\": \"France entière\"})\n\n departements_dropdown = html.Div(\n [\n html.Label(\n [\"Sélectionner un département :\"],\n className=\"fr-label\",\n htmlFor=\"departement-select\",\n ),\n dcc.Dropdown(\n options=options,\n placeholder=\"Rechercher un département...\",\n id=\"departement-select\",\n value=\"all\",\n clearable=False,\n ),\n ],\n className=\"fr-select-group\",\n id=\"departement-select-group\",\n )\n\n waste_select = html.Div(\n [\n html.Button(\n [\"Filtrer par code déchet\"],\n id=\"waste-select-modal-button\",\n className=\"fr-btn\",\n **{\"data-fr-opened\": False, \"aria-controls\": \"fr-modal-1\"},\n ),\n html.Dialog(\n html.Div(\n html.Div(\n html.Div(\n html.Div(\n [\n html.Div(\n html.Button(\n \"Fermer\",\n className=\"fr-link--close fr-link\",\n title=\"Fermer la fenêtre de sélection des filtres sur les codes déchets\",\n **{\n \"aria-controls\": \"fr-modal-1\",\n },\n ),\n className=\"fr-modal__header\",\n ),\n html.Div(\n [\n html.H1(\n [\n html.Span(\n className=\"fr-fi-arrow-right-line fr-fi--lg\"\n ),\n \"Filtrer par code déchets :\",\n ],\n id=\"fr-modal-title-modal-1\",\n className=\"fr-modal__title\",\n ),\n AntdTree(\n id=\"waste-select\",\n className=\"waste-select\",\n treeData=waste_nomenclature,\n # multiple=True,\n checkable=True,\n selectable=False,\n defaultCheckedKeys=[\"all\"],\n defaultExpandedKeys=[\"all\"],\n ),\n ],\n className=\"fr-modal__content\",\n ),\n ],\n className=\"fr-modal__body\",\n ),\n className=\"fr-col-12 fr-col-md-8\",\n ),\n className=\"fr-grid-row fr-grid-row--center\",\n ),\n className=\"fr-container fr-container--fluid fr-container-md\",\n ),\n id=\"fr-modal-1\",\n className=\"fr-modal\",\n role=\"dialog\",\n **{\"aria-labelledby\": \"fr-modal-title-modal-1\"},\n ),\n ],\n id=\"waste-select-group\",\n )\n\n selects_div = html.Div(\n [departements_dropdown, waste_select], className=\"selects-container\"\n )\n\n return selects_div", "def filter_viable_offers(want, have, offers) -> List:\n return [x for x in offers if is_offer_viable(want, have, x) is True]", "def _filter_max_dist_in_element(self, simplices):\n if self.max_dist_in_element is None:\n return simplices\n\n filtered = []\n for tup in simplices:\n dists = []\n for root in tup:\n new_dist = self.cluster.get_distances(root, tup)\n dists += list(new_dist)\n\n if max(dists) < self.max_dist_in_element:\n filtered.append(tup)\n return filtered", "def web_videos_filter(channel, tracks, archived, played):\n\n videos = []\n\n if channel == 'all':\n for tracked in tracks:\n videos.append(yt_get_channel_videos(tracked['id']))\n videos = [\n item\n for sublist in videos\n for item in sublist\n ]\n else:\n videos = yt_get_channel_videos(channel)\n\n if archived == 'true':\n videos = [\n video\n for video in videos\n if video['archived'] is not None\n ]\n elif archived == 'false':\n videos = [\n video\n for video in videos\n if video['archived'] is None\n ]\n\n if played == 'true':\n videos = [\n video\n for video in videos\n if video['played'] is not None\n ]\n elif played == 'false':\n videos = [\n video\n for video in videos\n if video['played'] is None\n ]\n\n return videos", "def lsThroughFilter(*args, item: Union[AnyStr, List[AnyStr]]=\"\", nodeArray: bool=True, reverse:\n bool=True, selection: bool=True, sort: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def getFilteredInterpreters(self, data):\n def filterInterPreters(dataItem):\n return dataItem['type'] == 'interpreter'\n return list(filter(filterInterPreters, data))", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def filter_representative_sites_patient(\n df: pd.DataFrame, representative_sites: List[str]) -> pd.DataFrame:\n\n return df.loc[~(df['site'].isin(representative_sites))]", "def isolateSelect(*args, addDagObject: name=None, addSelected: bool=True, addSelectedObjects:\n bool=True, loadSelected: bool=True, removeDagObject: name=None,\n removeSelected: bool=True, state: bool=True, update: bool=True, viewObjects:\n bool=True, q=True, query=True, **kwargs)->Union[bool, Any]:\n pass", "def getAndFilterPlayers(self, filterFunc):\n\t\tplayerlist = []\n\t\tfor x in self.playerList:\n\t\t\tif x != None:\n\t\t\t\tplayer = filterFunc(x)\n\t\t\t\tif player != None:\n\t\t\t\t\tplayerlist.append(x)\n\t\treturn playerlist", "def __sub__(self, vs):\n return [v for v in self.__elements if tuple(v) not in map(tuple, vs)]", "def filter(self, filters:list)->list:\n for item in self.list:\n use_item = True\n for filter in filters:\n filter_key, filter_value, filter_type = filter\n if filter_type == \"<\" and item[filter_key] >= filter_value:\n use_item = False\n break\n elif filter_type == \">\" and item[filter_key] <= filter_value:\n use_item = False\n break\n elif filter_type == \"<=\" and item[filter_key] > filter_value:\n use_item = False\n break\n elif filter_type == \">=\" and item[filter_key] < filter_value:\n use_item = False\n break\n elif filter_type == \"=\" and not item[filter_key] == filter_value:\n use_item = False\n break\n if use_item:\n yield item", "def filter(self, rois):\n can_fragments = np.array([roi.can_fragment for roi in rois])\n return can_fragments", "def tool_panel_contents( self, trans, **kwds ):\n filter_method = self._build_filter_method( trans )\n for _, item_type, elt in self._tool_panel.panel_items_iter():\n elt = filter_method( elt, item_type )\n if elt:\n yield elt", "def find_elements(self, elements_locator: Tuple[By, str], wait_time=10, skip_exception=False) -> List[WebElement]:\n try:\n return WebDriverWait(self.driver, wait_time).until(EC.presence_of_all_elements_located(elements_locator),\n message=f\"Can't find elements with {elements_locator}\")\n except TimeoutException as err:\n if not skip_exception:\n print(f\"Elements was not found in {wait_time} seconds\")\n raise err\n return []", "def gatherSelected(self):\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list", "def get_widgets(parent: \"wx.Window | wx.Panel\") -> \"list[wx.Window]\":\n\n items: \"list[wx.Window]\" = [parent]\n\n for item in parent.GetChildren():\n items.append(item)\n\n if hasattr(item, \"GetChildren\"):\n for child in item.GetChildren():\n items.append(child)\n\n return items", "def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]", "def filter_ancestor(self, queryset, name, ancestor):\n\n return queryset.filter(\n parent__in=ancestor.get_descendants(include_self=True)\n )" ]
[ "0.53416806", "0.5286004", "0.516275", "0.51596725", "0.5059366", "0.5055007", "0.50266373", "0.49372914", "0.4907756", "0.47648945", "0.47609642", "0.46878317", "0.46518713", "0.46268824", "0.46157223", "0.4608182", "0.4597628", "0.45550662", "0.45305058", "0.45274553", "0.45258284", "0.4478596", "0.445101", "0.44420236", "0.4434969", "0.4425385", "0.44229695", "0.44192868", "0.4409322", "0.44092733", "0.44076505", "0.43972543", "0.43911508", "0.43844572", "0.43459055", "0.42992067", "0.4285451", "0.4273205", "0.42656636", "0.42629942", "0.42609364", "0.42531744", "0.4224319", "0.4218904", "0.4209808", "0.4208949", "0.42062145", "0.41963106", "0.41884625", "0.4176774", "0.41738066", "0.4162465", "0.41435912", "0.41371605", "0.41334897", "0.41313985", "0.41290337", "0.41220313", "0.41171297", "0.4115912", "0.4112793", "0.4109728", "0.40992326", "0.40821347", "0.4069792", "0.4068957", "0.40662074", "0.40653092", "0.40645823", "0.40624562", "0.40604234", "0.40542522", "0.405217", "0.405009", "0.4048056", "0.40455726", "0.4045441", "0.4033213", "0.40283495", "0.40261605", "0.40238813", "0.40162805", "0.40157044", "0.40140745", "0.40132174", "0.40072447", "0.40065908", "0.40045345", "0.40038627", "0.4000164", "0.39939854", "0.3993748", "0.39920044", "0.3982891", "0.39766005", "0.39734375", "0.39723337", "0.39723215", "0.39702332", "0.39656883" ]
0.87938285
0
Returns True if the element is 'allowed' (ie. NOT filtered). 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'element' is the element to select. By default we return True.
def select(self, viewer, parent, element): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def tag_visible(element):\n\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True", "def filter(self, viewer, parent, elements):\n\n return [e for e in elements if self.select(viewer, parent, e)]", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)", "def is_element_only(self) -> bool:\n raise NotImplementedError()", "def is_filter_trait(self, element, trait_name):\n\n return False", "def filterAcceptsRow(self, sourceRow, sourceParentIndex):\n parent_item = self.sourceModel().treeItem(sourceParentIndex)\n tree_item = parent_item.child(sourceRow)\n\n accept = ((self._show_special_attributes or\n not tree_item.is_special_attribute) and\n (self._show_callables or\n not tree_item.is_callable_attribute))\n\n return accept", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def check_parent_and_children_not_in_view(self, element: Element) -> None:\n for view in self.element_views:\n if view.element in element.child_elements:\n raise ValueError(f\"A child of {element.name} is already in this view.\")\n if view.element is getattr(element, \"parent\", None):\n raise ValueError(\n f\"The parent of {element.name} is already in this view.\"\n )", "def is_parent(self):\n if self.parent is not None:\n return False\n return True", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def can_be_viewed_by(self,user):\n return True", "def is_element_enabled(self):\n if self.web_element.is_enabled():\n return True\n else:\n return False", "def has_parent(self):\n return self.parent != None", "def isElement(self, elementXpath):\r\n try:\r\n self.browser.find_element_by_xpath(elementXpath)\r\n return True\r\n except:\r\n return False", "def is_element_available(self, locator):\r\n if self.driver.is_element_present(locator):\r\n if self.driver.is_visible(locator):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def is_review_permitted(self, user):\n if user.is_authenticated or settings.OSCAR_ALLOW_ANON_REVIEWS:\n return not self.has_review_by(user)\n else:\n return False", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def isElementOnly(self):\n return _libsbml.SBaseExtensionPoint_isElementOnly(self)", "def is_element_displayed(self, locator=\"\", locator_type=\"id\", element=None):\n is_displayed = False\n try:\n if locator: # This means if locator is not empty\n element = self.get_element_(locator, locator_type)\n if element is not None:\n is_displayed = element.is_displayed()\n self.log.info(\"Element is displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n else:\n self.log.info(\"Element not displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n return is_displayed\n except:\n print(\"Element not found\")\n return False", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()", "def can_traverse(self, equipment: str, point: Point) -> bool:\n region_type = self.get_region(point)\n traversable = [\n [\"torch\", \"climbing\"],\n [\"climbing\", \"neither\"],\n [\"torch\", \"neither\"]\n ]\n return equipment in traversable[region_type]", "def can_change_external_reviewers(user, submission) -> bool:\n # check if all submissions have external review enabled\n if not submission.stage.has_external_review:\n return False\n\n if user.is_superuser:\n return True\n\n if settings.GIVE_STAFF_LEAD_PERMS and user.is_apply_staff:\n return True\n\n # only leads can change external reviewers\n if submission.lead.id == user.id:\n return True\n\n return False", "def has_parent(self):\n return False", "def _is_element_clickable(self, locator):\n return self.wait.until(lambda x: self.ec.element_to_be_clickable(self.get_element(locator)))", "def contains(self, element):\n pass", "def is_permitted(self):\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [\n\t\t\td.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"], filters={\"parent\": self.name})\n\t\t]\n\n\t\tcustom_roles = get_custom_allowed_roles(\"page\", self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True", "def has_parent(self):\n return self._parent_ is not None", "def any_parent_has_power(self, member_name):\n for parent in self.parents_of(member_name):\n if parent.has_power:\n return True\n \n return False", "def _is_node_an_element(self, node):\n # Try the simplest approach first, works for plain old ElementTree\n if isinstance(node, BaseET.Element):\n return True\n # For cElementTree we need to be more cunning (or find a better way)\n if hasattr(node, 'makeelement') \\\n and isinstance(node.tag, six.string_types):\n return True", "def has_character_access(self, character: EveCharacter) -> Optional[bool]:\n owner_corporation = self.structure.owner.corporation\n if character.corporation_id == owner_corporation.corporation_id:\n return True\n if (\n owner_corporation.alliance\n and owner_corporation.alliance.alliance_id == character.alliance_id\n ):\n return self.allow_alliance_access\n else:\n return None", "def is_element_display(self, selector):\n return True if self.get_element(selector).is_displayed() else False", "def _is_element_present():\r\n return self.q(css=element_selector).present", "def isElementDisplayed(self, locator=\"\",locatorType='id', element=None):\n isDisplayed=False\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n if element is not None:\n isDisplayed=element.is_displayed()\n self.logger.info(\"Element is displayed with locator\" + locator + \"LocatorType\" + locatorType)\n\n else:\n self.logger.info(\"Element is not displayed with locator\" + locator + \"LocatorType\" + locatorType)\n return isDisplayed\n\n except:\n print(\"Element not found\")\n return False", "def is_view_dropdown_visible(self):\n return self.is_element_visible(self.view_dropdown_locator)", "def check(self, capability):\n capability = ircutils.toLower(capability)\n if capability == 'owner' or capability == antiOwner:\n if self.__parent.__contains__('owner'):\n return not isAntiCapability(capability)\n else:\n return isAntiCapability(capability)\n elif self.__parent.__contains__('owner'):\n if isAntiCapability(capability):\n return False\n else:\n return True\n else:\n return self.__parent.check(capability)", "def is_visible_to(self, user):\n return True", "def IsValidSubElement(self, name):\n return bool(self.LoadSubElement(name))", "def can_contain(self):\n return False", "def is_parent(self):\n return not self.children", "def is_viewed(self):\n return self.has_label(VIEWED_LABEL)", "def _should_be_filtered(self, tag, attrs):\n \n # Test if the node's tag should be filtered\n if self.__ignores[0] and tag in self.__ignores[0]:\n return False\n \n # Test if the node's attributes should be filtered\n filters = self.__ignores[1][any_tag]\n if tag in self.__ignores[1]:\n filters |= self.__ignores[1][tag]\n\n try:\n if any('.%s' % attr[1] in filters for attr in attrs if attr[0] == 'class'):\n return False\n except KeyError:\n pass\n\n try:\n if any('#%s' % attr[1] in filters for attr in attrs if attr[0] == 'id'):\n return False\n except KeyError:\n pass\n\n return True", "def is_item_editor(self,user):\n if user.is_anonymous():\n return False\n if self.group:\n grouptest = user.person == self.group.project_leader or user.person == self.group.editor\n return user.is_staff or user.is_superuser or user.id == self.creator_id", "def read_allowed(self, ui, req):\n\n user = req.env.get('REMOTE_USER')\n\n deny_read = ui.configlist('web', 'deny_read', untrusted=True)\n if deny_read and (not user or ismember(ui, user, deny_read)):\n return False\n\n allow_read = ui.configlist('web', 'allow_read', untrusted=True)\n # by default, allow reading if no allow_read option has been set\n if (not allow_read) or ismember(ui, user, allow_read):\n return True\n\n return False", "def is_reviewer(user, addon, allow_content_reviewers=True):\n if addon.type == amo.ADDON_STATICTHEME:\n return is_static_theme_reviewer(user)\n return is_listed_addons_reviewer(\n user, allow_content_reviewers=allow_content_reviewers\n )", "def _is_xblock_read_only(xblock):\r\n # We allow direct editing of xblocks in DIRECT_ONLY_CATEGORIES (for example, static pages).\r\n if xblock.category in DIRECT_ONLY_CATEGORIES:\r\n return False\r\n component_publish_state = compute_publish_state(xblock)\r\n return component_publish_state == PublishState.public", "def is_inline_action_popup_loaded_properly(self):\n return self.is_element_present(self.vendor_profile_inline_item_locator)", "def _is_granter_pvm( # pylint: disable=no-self-use\n self, pvm: PermissionView\n ) -> bool:\n\n return pvm.permission.name in {\"can_override_role_permissions\", \"can_approve\"}", "def can_be_edited(self, user):\n return (self.is_public or user == self.owner or\n user in list(self.auth_users.all()))", "def _markValidElements(self, element):\n self.log(\"element:%s\" % element.get_name())\n if element == self.typefind:\n return\n self._validelements.append(element)\n # find upstream element\n pad = list(element.sink_pads())[0]\n parent = pad.get_peer().get_parent()\n self._markValidElements(parent)", "def filterAcceptsRow(self, row, parent):\n if not self.__customFilterEnabled:\n return super(\n _WorkerFilterProxyModel, self).filterAcceptsRow(row, parent)\n\n if not self.__regex:\n return True\n\n model = self.sourceModel()\n idx = model.index(row, 0, parent)\n if not idx.isValid():\n return False\n\n obj = model.data(idx, _WorkerModel.ObjectRole)\n if not obj:\n return False\n\n result = self.__regex.match(obj.name)\n if not result:\n return False\n return True", "def can_be_parent(self, give_reason=False):\n reason = None\n if self.is_child:\n reason = _(\"The specified parent product is a child product.\")\n if self.has_stockrecords:\n reason = _(\"One can't add a child product to a product with stock records.\")\n is_valid = reason is None\n if give_reason:\n return is_valid, reason\n else:\n return is_valid", "def is_parent(self) -> bool:\n return AccountEntry.objects.filter(parent=self).exists()", "def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS", "def can_delete(self, user_obj):\n if self.id is None:\n return False\n if user_obj.is_superuser:\n return True\n if self.parentnode is not None and self.is_empty():\n return self.parentnode.is_admin(user_obj)\n else:\n return False", "def can_be_viewed_by(self,user):\n\n # check whether everyone is allowed to view this. Anymous user is the only member of group\n # 'everyone' for which permissions can be set\n anonymousUser = get_anonymous_user()\n\n if anonymousUser.has_perm(\"view_ComicSiteModel\",self):\n return True\n else:\n # if not everyone has access, check whether given user has permissions\n return user.has_perm(\"view_ComicSiteModel\",self)", "def can_submit_proceedings(self, user):\n if user is None:\n return False\n # The submitter of the original abstract is always authorized\n if self.abstract and self.abstract.submitter == user:\n return True\n # Otherwise only users with submission rights are authorized\n return self.can_manage(user, 'submit', allow_admin=False, check_parent=False)", "def IsDescendantOf(self, parent, item):\r\n\r\n while item:\r\n \r\n if item == parent:\r\n \r\n # item is a descendant of parent\r\n return True\r\n \r\n item = item.GetParent()\r\n \r\n return False", "def check_visible(file_name):\n if file_name == None:\n return False\n\n # Get toolingapi settings\n toolingapi_settings = context.get_toolingapi_settings()\n\n # Get component_type\n component_type = util.get_component_type(file_name)\n\n # If component type is not in range, just show error message\n if component_type not in toolingapi_settings[\"component_types\"]:\n return False\n\n # Get component_url and component_id\n username = toolingapi_settings[\"username\"]\n try:\n component_attribute = util.get_component_attribute(username, file_name)\n except KeyError as err:\n return False\n\n if component_attribute == None: \n return False\n\n return True", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def is_submission_allowed(self, check_dev_agreement=True):\n from olympia.users.models import (\n DeveloperAgreementRestriction,\n UserRestrictionHistory,\n )\n\n if not self.request:\n raise ImproperlyConfigured('Need a request to call is_submission_allowed()')\n\n if self.user and self.user.bypass_upload_restrictions:\n return True\n\n if check_dev_agreement is False:\n restriction_choices = filter(\n lambda item: item[1] != DeveloperAgreementRestriction,\n UserRestrictionHistory.RESTRICTION_CLASSES_CHOICES,\n )\n else:\n restriction_choices = None\n return self._is_action_allowed(\n 'submission', restriction_choices=restriction_choices\n )", "def is_relevant_to_orgq(element):\n relevance = get_semeval_relevance_orgq(element)\n return relevance in RELEVANT_TAGS", "def __Ancestor(self, flag):\n command = self._parent\n while command:\n if flag in command.flags:\n return True\n command = command._parent # pylint: disable=protected-access\n return False", "def is_people_with_link_can_edit(self):\n return self._tag == 'people_with_link_can_edit'", "def checkRights(self,entry):\n if not self.session.isLoggedin():\n self.logger.debug('Not logged in, we leave checkRights')\n return False\n \n # Ist Eintrag Public (z.B. Authen)\n if entry.get('public'):\n return True\n \n \n rights = entry.get('rights')\n \n if rights is None: \n self.logger.debug('Rights are net set (None), we leave checkRights')\n return True\n\n self.logger.debug('Entryrights: {}'.format(repr(rights)))\n\n found = False\n userRights = self.session.getAttribute('rights')\n self.logger.debug('Userrights: {}'.format(repr(userRights)))\n\n # wurden Rechte gesetzt\n if rights is not None or rights==[]:\n if isinstance(rights,str): rights = rights.split(',')\n \n for right in rights:\n if right.startswith('-'):\n right = right[1:]\n if right in userRights: \n self.logger.debug('Negative righths found: {} is forbidden'.format(right))\n return False\n else:\n if right in (userRights or []):\n found = True \n else:\n # Wenn keine Rechte im Eintrag\n # auf jeden Fall anzeigen\n found = True\n \n self.logger.debug('Result is \"{}\"'.format(found))\n return found", "def is_override(self) -> bool:\n if self.parent is not None:\n return False\n return any(self.elem in x for x in self.schema.root if x.tag == XSD_OVERRIDE)", "def _is_input_element(se):\n return inspect.isclass(se) and issubclass(se, BaseField)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def _isA(self, elementClass, category = ''):\n if not isinstance(self, elementClass):\n return False\n if category and self.getCategory() != category:\n return False\n return True", "def is_visible(self, locator, timeout=15):\n try:\n ui.WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))\n return True\n except TimeoutException:\n return False", "def has_permission(self, request, view):\n\n # Fallback to has_object_permission unless it's a POST\n if request.method != 'POST':\n return True\n\n # Need this information to make a decision\n if 'privileged_access' not in request.data and \\\n 'document' in request.data:\n return False\n\n document = request.data['document']\n privileged_access = request.data['privileged_access']\n\n found = Document.objects.filter(id=document).first()\n\n if not found:\n return False\n\n if found.create_user.organization != request.user.organization and \\\n not request.user.is_government_user:\n return False\n\n return DocumentCommentPermissions.user_can_comment(\n request.user,\n found,\n privileged_access\n )", "def user_can_edit(self, user):\n return user == self.owner", "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def must_skip(self, item):\n user = c.user if c.user_is_loggedin else None\n\n if hasattr(item, \"promoted\") and item.promoted is not None:\n return False\n\n # can_view_slow only exists for Messages, but checking was_comment\n # is also necessary because items may also be comments that are being\n # viewed from the inbox page where their render class is overridden.\n # This check needs to be done before looking at whether they can view\n # the subverbify, or modmail to/from private subverbifys that the user\n # doesn't have access to will be skipped.\n if hasattr(item, 'can_view_slow') and not item.was_comment:\n return not item.can_view_slow()\n\n if hasattr(item, 'subverbify') and not item.subverbify.can_view(user):\n return True", "def checkIfAllowed(self, user):\n\n # Default case if mod access is not needed everyone has access\n if not self.modOnlyAccess:\n return True\n\n # Otherwise check the user's access level\n if user.modAccess == self.modOnlyAccess:\n return True\n else:\n return False", "def verify_allow_access_screen(self, raise_e=True):\n return self.driver.wait_for_object(\"dropbox_title\", timeout=10, raise_e=raise_e) and \\\n self.driver.wait_for_object(\"allow_access_btn\", timeout=30, raise_e=raise_e)", "def user_can_comment(user, document, privileged):\n if user.is_government_user and \\\n user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW') and \\\n document.status.status in ['Received', 'Submitted']:\n return True\n\n if not user.is_government_user and not privileged and \\\n document.status.status in ['Draft', 'Submitted']:\n return True\n\n return False", "def is_parent_of(cls, *args):\n return cls.graph_traversal(None, None, Bytecode()).is_parent_of(*args)", "def viewer(self):\n return self.parent", "def is_ignorable_request(self, request, uri, domain, referer):\n if super().is_ignorable_request(request, uri, domain, referer):\n return True\n user_agent = request.META[\"HTTP_USER_AGENT\"]\n return any(\n pattern.search(user_agent)\n for pattern in self.ignored_user_agent_patterns\n )", "def isElementPresent(self,locator=\"\",locatorType='id', element=None):\n\n\n\n\n try:\n if locator:\n element = self.getElement(locator, locatorType)\n\n if element is not None:\n self.logger.info(\"Element found with locator \"+locator+\" LocatorType \"+locatorType)\n return True\n\n else:\n self.logger.info(\"Element not found with locator \" + locator + \" LocatorType \" + locatorType)\n return False\n\n except:\n print(\"Element not found\")\n return False", "def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))", "def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False", "def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False", "def allowed(self):\n may = self.request.user.may\n return (not self.__class__.__name__ in self.request.cfg.actions_excluded and\n may.write(self.pagename))", "def can_view(self, user):\r\n return True", "def isAncestorOf(self, node):\n if (self in node.parents()):\n return True\n elif (not node.isSource()):\n return reduce(lambda x,y: x or y, [self.isAncestorOf(x) for x in node.parents()])\n else:\n return False", "def _valid_clickable_object(ui_object):\n return not _valid_typable_object(ui_object) and ui_object.clickable", "def get_viewable(self, user):\n return True", "def experiments_submission_allowed(user, parsed_addon_data):\n return not parsed_addon_data.get('is_experiment', False) or action_allowed_for(\n user, amo.permissions.EXPERIMENTS_SUBMIT\n )", "def _contains_in_self_or_parent(self, name: str) -> bool:\n return name in self", "def _canEnable(self, is_set, get_value):\n\t\tparents = any([is_set(parent) for parent in self.parents]) \\\n\t\t or not self.parents\n\t\tnon_parents = all([(not is_set(non_parent) or self.non_parent_exceptions.count(non_parent) or non_parent==self.name) \\\n\t\t\t\t\t for non_parent in self.non_parents]) \\\n\t\t\t\t\t or not self.non_parents\n\t\treturn parents and non_parents", "def check(self, capability):\n capability = ircutils.toLower(capability)\n if self.__parent.__contains__(capability):\n return True\n elif self.__parent.__contains__(_invert(capability)):\n return False\n else:\n raise KeyError, capability", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n review_request = context.get('review_request')\n\n return (super().should_render(context=context) and\n review_request is not None and\n review_request.public and\n not is_site_read_only_for(context['request'].user))", "def has_chain(praw_r, praw_comment, username):\n if not hasattr(praw_comment, 'parent_id'):\n return False\n parent = praw_r.get_info(thing_id=praw_comment.parent_id)\n if not parent or type(parent) != praw.objects.Comment:\n return False\n return is_comment_owner(parent, username)", "def is_visible(self, url=''):\n return bool(url)", "def any_parent_overriden(self):\n\n if self._parent._is_overriden:\n return True\n return self._parent.any_parent_overriden()", "def query_filters_restricted (self) :\n user = self.user_restriction\n if user is not None :\n return Q.created_by == user" ]
[ "0.59139556", "0.57015985", "0.5638367", "0.54528904", "0.5326502", "0.52207935", "0.5189183", "0.51695454", "0.5162016", "0.51491517", "0.50781363", "0.49656478", "0.49402714", "0.49008185", "0.48731193", "0.48402408", "0.4839491", "0.48392522", "0.48301572", "0.48110285", "0.47901526", "0.4777314", "0.4727134", "0.4725504", "0.4724558", "0.4714422", "0.47120154", "0.4704228", "0.46767986", "0.46692315", "0.46628174", "0.46409118", "0.4634323", "0.46217147", "0.46209407", "0.4618856", "0.46061915", "0.45854092", "0.4582912", "0.45791584", "0.45715562", "0.45693138", "0.4556336", "0.45511532", "0.4550021", "0.45477247", "0.453401", "0.45287478", "0.45063043", "0.45014414", "0.44984087", "0.44978458", "0.44901618", "0.44893873", "0.44841787", "0.4483042", "0.4474872", "0.44711062", "0.4468449", "0.44623506", "0.44576383", "0.44553158", "0.4447365", "0.44462875", "0.4438491", "0.44368163", "0.44320628", "0.44318715", "0.4429778", "0.4428533", "0.4419876", "0.4416704", "0.44126993", "0.44108596", "0.44075078", "0.44065458", "0.43953195", "0.43946594", "0.43882778", "0.43856028", "0.43849882", "0.43844986", "0.43754333", "0.4374191", "0.4373697", "0.4373697", "0.4373506", "0.4371671", "0.4371325", "0.43654767", "0.43652862", "0.43557787", "0.43553302", "0.43550473", "0.4354834", "0.43526092", "0.43525827", "0.4348163", "0.43471926", "0.43470672" ]
0.6605754
0
Is the filter affected by changes to an element's trait? 'element' is the element. 'trait_name' is the name of the trait. Returns True if the filter would be affected by changes to the trait named 'trait_name' on the specified element. By default we return False.
def is_filter_trait(self, element, trait_name): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def has_visibility(trait, visibility_name):\n\n return trait.visibility == getattr(schema.Trait.Visibility, visibility_name)", "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def is_calibration_tag_for_name(ins, exp, run, name='dark') :\n for attr in run_attributes(ins, exp, run) :\n if attr['class'] == 'Calibrations' and attr['name'] == name : return True\n return False", "def __contains__(self, name):\n\n return name in self._wdict", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def isModifiedByCategory(self,node, queryCategory):\n pred = self.getModifiers(node )\n for p in pred:\n #if( queryCategory.lower() == p.getCategory().lower() ):\n if( p.isA(queryCategory) ):\n return True\n\n return False", "def __contains__(self, name):\n return hasattr(self, name)", "def __contains__(self, attribute_name):\n return False # pragma: no cover", "def isModifiedByCategory(self, node, queryCategory):\n predecessors = self.getModifiers(node)\n for predecessor in predecessors:\n if predecessor.isA(queryCategory):\n return True\n\n return False", "def is_injected(self, name):\n return name in self.__provisions", "def __continas__ (self, name):\n return name in self.containments", "def tag_visible(element):\n\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True", "def __bool__(self):\n return True if self._name is not None else False", "def match(self, name, tags):\n S, tags = self.get_compiled(name, tags)\n return bool(S & tags)", "def contains(self, element):\n pass", "def is_element_only(self) -> bool:\n raise NotImplementedError()", "def specify_change(self) -> bool:\n return any(True for e in self if e != self.wildcard)", "def __contains__(self, name):\n return name in self._variables", "def can_transform(self, html_element: ET.Element):\n return html_element.tag == \"mark\"", "def is_change(self) -> bool:\n return self._change", "def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)", "def __bool__(self):\n return any(\n getattr(self, hook_trigger, None) for hook_trigger in self._hook_triggers\n )", "def __contains__(self, name):\n return name in set(self)", "def trait_view ( self, name = None, view_element = None ):\n return self.__class__._trait_view( name, view_element,\n self.default_traits_view, self.trait_view_elements,\n self.editable_traits )", "def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False", "def can_analyze_contain(cls, element):\n\n if element is None:\n return False\n\n return element.ele_type == SecondaryStructureElementType.Stem \\\n or element.ele_type == SecondaryStructureElementType.Hairpin \\\n or element.ele_type == SecondaryStructureElementType.Interior \\\n or element.ele_type == SecondaryStructureElementType.Multiloop \\\n or element.ele_type == SecondaryStructureElementType.Unpaired \\\n or element.ele_type == SecondaryStructureElementType.Bulge \\\n or element.ele_type == SecondaryStructureElementType.End", "def is_wrapped_by(self, name):\n\n\t\ttry:\n\t\t\tself._find_wrapper_by_name(name)\n\t\texcept ValueError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def _is_node_an_element(self, node):\n # Try the simplest approach first, works for plain old ElementTree\n if isinstance(node, BaseET.Element):\n return True\n # For cElementTree we need to be more cunning (or find a better way)\n if hasattr(node, 'makeelement') \\\n and isinstance(node.tag, six.string_types):\n return True", "def supports(self, name):\r\n return hasattr(self, \"data_to_\" + name)", "def check_condition(self, element):\n conditional = element.getAttribute(\"conditional\")\n\n # No condition, then we execute this statement.\n #\n if len(conditional) == 0:\n return True\n\n # We have a conditional. See if it begins with a '!', which inverts\n # our test.\n #\n result = True\n oc = conditional\n if conditional[0] == '!':\n result = False\n conditional = conditional[1:]\n\n if self.settings is not None and conditional in self.settings.ids:\n if self.settings.value(conditional) is True:\n return result\n return not result\n return not result", "def IsValidSubElement(self, name):\n return bool(self.LoadSubElement(name))", "def __contains__(self, name):\n return (name in self._defs) or \\\n ((self._parent is not None) and (name in self._parent))", "def hasFilter(self, column) -> bool:\n column_name = self._dataframe.columns[column]\n return column_name in self._filters.keys()", "def is_event_annotated(self, name):\n return name in self._annotations.keys()", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def __bool__(self: Self) -> bool:\n return bool(self.removed or self.added)", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def has_changed(self):\n return bool(self.changed_data)", "def is_filter_at_key(self, key):\n\n if self.has_key(key):\n attribute_status = getattr(self, key)\n if isinstance(attribute_status, self.__class__):\n return True\n\n return False", "def isa_binary_element(self, element):\n\n return len(self.above(element)) <= 2 and len(self.under(element)) <= 2", "def affects(self, ns):\n return any(rn.affects(ns) for rn in self)", "def _has(self, name):\n return hasattr(self._, name)", "def is_rule(self, name):\n return name in self._rules", "def has(self, tag_name: str) -> bool:\n return hasattr(self, tag_name)", "def _blocks_in_components_changed(self):\n for name, component in self._components.items():\n if name in self._cached_components \\\n and _blocks_changed_in_config(self._cached_components[name], self._components[name]):\n return True\n return False", "def __contains__(self, component):\n if issubclass(component, Component):\n try:\n my_component = self.type.components[component.interface]\n except KeyError:\n return False\n else:\n return issubclass(my_component, component)\n else:\n return component in self.type.components", "def applicable(self, name):\n return True", "def check_column(self, table_name: str, column_name: str) -> bool:\n try:\n insp = reflection.Inspector.from_engine(self.engine)\n for col in insp.get_columns(table_name):\n if column_name in col[\"name\"]:\n return True\n return False\n except Exception as err:\n logger.error(\"check_column [error] -> %s\" % err)\n return False", "def is_element_enabled(self):\n if self.web_element.is_enabled():\n return True\n else:\n return False", "def MaybeAppliesToTest(self, test_name: str) -> bool:\n return self._comp(test_name)", "def has_name(self, name):\n return name in self.classes", "def __contains__(self, name):\n try:\n self[name]\n return True\n except KeyError:\n return False", "async def is_running(self, collection, container_name, prune=True):\n def has_container(instance):\n try:\n all_containers = instance.state.docker.get_containers()\n except:\n if prune:\n msg = (\"Lost contact with a container on %s, \"\n \"marking dead.\")\n logger.debug(msg % instance.instance.id)\n instance.state.nonresponsive = True\n return not prune\n return any(container_name in cont[\"Image\"]\n for cont in all_containers.values())\n\n results = await gen.multi([collection.execute(has_container, x)\n for x in collection.running_instances()])\n return any(results)", "def has_attr_with_name(self, name):\n for attr in self:\n if attr.name == name:\n return True\n\n return False", "def __bool__(self):\n return any(p for p in self)", "def _field_was_changed(self):\n field_map = self._field_map\n for field in field_map.itervalues():\n if field.was_changed():\n return True\n return False", "def has_filter(self, param: str) -> bool:\n return param in self.filter_names", "def isMixedElement(self, name):\n ret = libxml2mod.xmlIsMixedElement(self._o, name)\n return ret", "def has(self, tag_name: str, category: ty.Optional[str] = None) -> bool:\n tags = self.__holder.db_tags.filter(lambda t: t.name == tag_name)\n if category is not None:\n tags = tags.filter(category=category)\n\n return len(tags) >= 1", "def __contains__(self, name):\n return name in self.__resources", "def validate_trait ( self, name, value ):\n return self.base_trait( name ).validate( self, name, value )", "def hasElement(self, name, excludeNullElements=False):\n return self.asElement().hasElement(name, excludeNullElements)", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def has_attribute(self, name):\n return name in self.schema", "def is_registered(self, name):\r\n\r\n return name in self.__events", "def __contains__(self, name):\n if name not in self.ALLOWED_EXCLUDES or name not in self.data.keys():\n return False\n else:\n return True", "def exists(self, name):\n return name in self.cache", "def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False", "def check(self, capability):\n capability = ircutils.toLower(capability)\n if self.__parent.__contains__(capability):\n return True\n elif self.__parent.__contains__(_invert(capability)):\n return False\n else:\n raise KeyError, capability", "def isFissile(self):\n return self.name in self.fissile", "def handle(self, event):\n try:\n for event_listeners in self.listeners[event.type]:\n if event_listeners:\n for listener in event_listeners:\n if listener(event) is False:\n return False\n except KeyError:\n logger.insane('No listeners defined for event \"%s\"', hr_event_type(event.type))\n pass\n\n return True", "def wants_event(self, event_name: str, args: Dict) -> bool:\n ret = True\n if self.event_filter and event_name not in self.event_filter:\n ret = False\n elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \\\n and args['monitor'].id not in self.active_monitor_filter:\n ret = False\n return ret", "def _should_be_filtered(self, tag, attrs):\n \n # Test if the node's tag should be filtered\n if self.__ignores[0] and tag in self.__ignores[0]:\n return False\n \n # Test if the node's attributes should be filtered\n filters = self.__ignores[1][any_tag]\n if tag in self.__ignores[1]:\n filters |= self.__ignores[1][tag]\n\n try:\n if any('.%s' % attr[1] in filters for attr in attrs if attr[0] == 'class'):\n return False\n except KeyError:\n pass\n\n try:\n if any('#%s' % attr[1] in filters for attr in attrs if attr[0] == 'id'):\n return False\n except KeyError:\n pass\n\n return True", "def has_name(self, name):\n\t\t\treturn name in self.classes", "def has_asset(self, name):\n return name in self.assets", "def __call__(self, dataset: pydicom.dataset.Dataset, data_element: pydicom.DataElement) -> bool:\r\n if data_element.VR not in (\"DA\", \"DT\"):\r\n return False\r\n if not data_element.value:\r\n return True\r\n\r\n if data_element.VR == \"DA\":\r\n self._anonymize_date_and_time(dataset, data_element)\r\n else:\r\n self._anonymize_datetime(data_element)\r\n return True", "def _isA(self, elementClass, category = ''):\n if not isinstance(self, elementClass):\n return False\n if category and self.getCategory() != category:\n return False\n return True", "def sees_actor(self, actor):\n radius = actor.camera_radius\n\n if radius < 0.5:\n return self.camera.is_point_in_frustum(actor.world_position)\n\n return self.camera.is_sphere_in_frustum(actor.world_position, radius)", "def __contains__(self, name):\n return name in self._modules", "def has_name(self, name):\n\t\treturn name in self.classes", "def _do_layer_adaptation(self, param_name):\n if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\n return True", "def filter_exists(name):\n\n\thdata = weechat.hdata_get(\"filter\")\n\tfilters = weechat.hdata_get_list(hdata, \"gui_filters\")\n\tfilter = weechat.hdata_search(hdata, filters, \"${filter.name} == %s\" % name, 1)\n\n\treturn bool(filter)", "def __contains__(self, addonName):\r\n return bool(addonName in self.addons)", "def correct_player_name(name):\n tree = ET.parse('players.xml')\n root = tree.getroot()\n for rt in root:\n if rt.attrib['name'] is name:\n return True\n return False", "def remove_trait ( self, name ):\n # Get the trait definition:\n trait = self._trait( name, 0 )\n if trait is not None:\n\n # Check to see if the trait has additional sub-traits that need to\n # be removed also:\n handler = trait.handler\n if handler is not None:\n if handler.has_items:\n self.remove_trait( name + '_items' )\n if handler.is_mapped:\n self.remove_trait( name + '_' )\n\n # Remove the trait value from the object dictionary as well:\n if name in self.__dict__:\n del self.__dict__[ name ]\n\n # Get the object's instance trait dictionary and remove the trait\n # from it:\n itrait_dict = self._instance_traits()\n if name in itrait_dict:\n del itrait_dict[ name ]\n return True\n\n return False", "def is_event(self, Rover, name):\n func = self.event.get(name)\n return func(Rover)", "def has_friendliness(self):\n trait = self.traitDao.get_friendliness(self.name)\n if trait is None:\n return False\n else:\n return True", "def should_expose_interface(self, iface_name):\n return iface_name not in self.HIDDEN_INTERFACES", "def isMatch( self, projectile, target, library = None, evaluation = None ) :\n\n if( self.adaptable( self.projectile, projectile ) ) :\n if( self.adaptable( self.target, target ) ) :\n if( self.adaptable( self.library, library ) ) : return( self.adaptable( self.__evaluation, evaluation ) )\n\n return( False )", "def match(self, name, tags):\n name, tags = self.get_compiled(name, tags)\n \n def index_of_letter(l):\n return ord(l) - ord('a')\n \n true_val, false_val = name\n \n if true_val:\n return index_of_letter(true_val) in tags\n else:\n return index_of_letter(false_val) not in tags", "def validate(self, name):\n return name in self.dict", "def __contains__(self,name):\n if name in self.__myTeam:\n return True\n else:\n return False", "def fire(self) -> bool:\n return self._search_in_description(REGEXP_ATTR_FIRE) == 'Yes'", "def __contains__(self, attr):\n return attr in self._config", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def at_character_traverse(self, character):\n if not character:\n return True\n\n if self.can_bypass(character):\n return True\n\n triggered = False\n if defines.EVENT_TRIGGER_TRAVERSE in self.events:\n for event in self.events[defines.EVENT_TRIGGER_TRAVERSE]:\n # If has traverse event.\n if SCRIPT_HANDLER.match_condition(character, self.owner, event[\"condition\"]):\n # If matches the condition.\n triggered = True\n event[\"function\"](event[\"data\"], character)\n\n return not triggered", "def __contains__(self, name: str) -> bool:\n ...", "def any(self) -> bool:" ]
[ "0.5432807", "0.5380321", "0.5154928", "0.50004315", "0.49823684", "0.4972386", "0.49546346", "0.49431932", "0.4915069", "0.4896147", "0.48712415", "0.48191318", "0.48169592", "0.47890756", "0.4781882", "0.47721133", "0.47471988", "0.47195056", "0.47182444", "0.46942788", "0.4686392", "0.4648566", "0.4622082", "0.4620239", "0.4619137", "0.45946512", "0.4581375", "0.45769244", "0.4544667", "0.4529805", "0.45253518", "0.45219666", "0.4513777", "0.45130628", "0.4503715", "0.45002526", "0.44856957", "0.44794816", "0.44792598", "0.44728634", "0.44723308", "0.44709337", "0.44639236", "0.4454672", "0.4448363", "0.44437954", "0.4441116", "0.44386044", "0.44280434", "0.44238856", "0.44233453", "0.44223556", "0.4421748", "0.4419857", "0.44150323", "0.44082627", "0.4406743", "0.44062006", "0.44049022", "0.44040623", "0.4403259", "0.4398857", "0.43979383", "0.43950576", "0.4391011", "0.43862134", "0.4379339", "0.43785703", "0.437227", "0.4370591", "0.43704832", "0.4369256", "0.43669084", "0.4355662", "0.43548495", "0.43546513", "0.43541372", "0.43529087", "0.4350308", "0.43451455", "0.43446413", "0.43405244", "0.4333895", "0.4333741", "0.43319118", "0.43317926", "0.43213248", "0.4321129", "0.43204662", "0.43172345", "0.43101332", "0.4308755", "0.4307678", "0.43069598", "0.4297252", "0.4296823", "0.42956266", "0.4295139", "0.42938352", "0.42768037" ]
0.7937278
0
A BFS solution that runs in O(N)
def bfs(self, root: TreeNode) -> int: if not root: return 0 queue = deque([(root, 1)]) while queue: node, level = queue.popleft() if not node.left and not node.right: return level if node.left: queue.append((node.left, level + 1)) if node.right: queue.append((node.right, level + 1)) return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def bfs(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.Queue())", "def BFS(graph, s, n):\n #Implement queue using list. list.pop() to dequeue and list.insert(0,x) to enqueue.\n visited = [False] * n\n queue = []\n \n queue.insert(0, s)\n visited[s] = True\n while(queue):\n v = queue.pop()\n print(v, end= \" \")\n \n for i in range(len(graph[v])):\n if (not visited[graph[v][i]]):\n queue.insert(0, graph[v][i] )\n visited[graph[v][i]] = True", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def bfs(graph, start):\n visited, queue = set(), [start]\n while queue:\n node = queue.pop(0)\n if node not in visited:\n visited.add(node)\n # Add all the adjacent unvisited nodes to the queue\n queue.extend(graph[node] - visited)\n return visited", "def bfs(g: nx.Graph, start_node: Any) -> list:\r\n\tx = [start_node]\r\n\tqueue = [start_node]\r\n\ttracks = {node: [] for node in g.nodes}\r\n\twhile queue:\r\n\t\telement = queue.pop(0)\r\n\t\ty = list(g.neighbors(element))\r\n\t\tfor node in y:\r\n\t\t\tif node not in x:\r\n\t\t\t\tx.append(node)\r\n\t\t\t\tqueue.append(node)\r\n\t\t\t\ttracks[node].extend((*tracks[element], element))\r\n\treturn x", "def bfs(g,startnode):\n Q = deque('') # initialize Q to be empty queue\n \n inf = float(\"inf\") # define infinity\n result = {}\n # assign infinite length to every node\n for node in g:\n result[node] = inf\n result[startnode] = 0 # assign start node length = 0\n Q.append(startnode) # attach the start node to the queue\n \n while len(Q) > 0:\n j = Q.popleft()\n for neighbor in g[j]:\n if result[neighbor] == inf:\n result[neighbor] = result[j] + 1\n Q.append(neighbor)\n \n return result", "def bfs(graph, start):\n queue = deque([start])\n graph.distance[start] = 0\n\n while queue: # not empty\n u = queue.popleft()\n for v in range(graph.size):\n if is_edge(graph, u, v) and graph.distance[v] is None:\n graph.distance[v] = graph.distance[u] + 1\n graph.parent[v] = u\n queue.append(v)", "def bfs(self, start, end):\n\n queue = [start]\n parent = dict()\n\n # Initialize parent dictionary\n for v in iter(self._reachable): parent[v] = None\n parent[start] = start\n\n while len(queue) > 0:\n (x, y) = queue.pop(0)\n if (x, y) == end: break\n\n for v in self.get_reachables(x, y):\n if parent[v] is not None: \n # Vertex v already visited\n continue\n parent[v] = (x, y)\n queue.append(v)\n\n # Reconstruct path\n path = [end]\n vertex = end\n\n while parent[vertex] != vertex:\n if parent[vertex] is None: return []\n path.append(parent[vertex])\n vertex = parent[vertex]\n\n path.reverse()\n return path", "def bfs(start, goal):\n queue = [(start, [start])]\n visited = set([start])\n while queue:\n (vertex, path) = queue.pop(0)\n if vertex == goal:\n return path\n\n for word in bank:\n count = 0\n for i, c in enumerate(vertex): # Count differences\n if c != word[i]:\n count += 1\n if count == 1: # Valid neighbor\n if word not in visited:\n visited.add(word)\n queue.append((word, path + [word]))\n\n return \"No path found :(\"", "def bfs(g: nx.Graph, start_node: Hashable) -> List[Hashable]:\n list_ = list(g.neighbors(start_node))\n len_graph = g.number_of_nodes()\n list2 = [start_node]\n while len(list2) < len_graph:\n for i in range(len(list_) - 1):\n if list_[0] not in list2:\n list2.append(list_[0])\n list_ += list(g.neighbors(list_[0]))\n list_.remove(list_[0])\n # nx.draw(g, with_labels=True)\n # plt.show()\n return list2", "def bfs(self, startNode):\n queue = Queue()\n\n # Mark all the nodes as not visited\n visited = {}\n for node in self.getNodes():\n visited[node] = False\n\n queue.enqueue(startNode)\n\n while not queue.isEmpty():\n s = queue.dequeue()\n visited[s] = True\n print s,\n\n # enqueue all the adjacent vertices to s\n # if they've not already been visited\n\n for adjacentNode in self.getAdjacent(s):\n if visited[adjacentNode] is False:\n queue.enqueue(adjacentNode)\n visited[adjacentNode] = True", "def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def bfs(self, start):\n output_list = []\n queue = Queue()\n queue.put(start)\n visited = set(start)\n while not queue.empty():\n current_node = queue.get()\n output_list.append(current_node)\n visited.add(current_node)\n for node in self.__graph[current_node].neighbours:\n if node.name not in visited:\n queue.put(node.name)\n return output_list", "def bfs(graph, places):\n solution = Solution(places, graph)\n frontier = Queue()\n final_solution = None\n frontier.put(solution)\n while frontier.qsize() > 0:\n current_sol = frontier.get()\n print(\"-------\")\n print(current_sol.visited)\n print(current_sol.g)\n if current_sol.visited[-1] == places[-1]:\n if final_solution is None or current_sol.g < final_solution.g:\n final_solution = current_sol\n print(\"final solution = \", final_solution.g)\n else:\n for attraction in current_sol.not_visited[:-1]:\n new_sol = copy.deepcopy(current_sol)\n new_sol.add(places.index(attraction))\n frontier.put(new_sol)\n if len(current_sol.not_visited) == 1:\n new_sol = copy.deepcopy(current_sol)\n new_sol.add(places.index(current_sol.not_visited[0]))\n frontier.put(new_sol)\n return final_solution", "def BFS(graph, start, end, toPrint=False):\n initPath = [start]\n pathQueue = [initPath]\n if toPrint:\n print('current BFS path:', printPath(pathQueue))\n while len(pathQueue) != 0:\n # get and remove oldest element in pathQueue\n tmpPath = pathQueue.pop(0)\n print('Current BFS path:', printPath(tmpPath))\n lastNode = tmpPath[-1]\n if lastNode == end:\n return tmpPath # Explore all paths with n hops \n # before exploring any path with >n hops\n for nextNode in graph.childrenOf(lastNode):\n if next not in tmpPath:\n newPath = tmpPath + [nextNode]\n pathQueue.append(newPath)\n return None", "def bfs_visited(ugraph, start_node):\r\n queue = deque()\r\n visited = set() #Set is enough here.\r\n visited.add(start_node)\r\n queue.append(start_node)\r\n while len(queue) != 0:\r\n temp_node = queue.popleft()\r\n for neighbor in ugraph[temp_node]: #In graph theory, neighborhood is \r\n if neighbor not in visited: #well defined, so could be used directely.\r\n visited.add(neighbor)\r\n queue.append(neighbor)\r\n return visited", "def bfs(G, start, finish):\n if start == finish:\n return 0 # edge case - going to myself is 0\n marked = set()\n queue = deque()\n marked.add(start)\n queue.append((start, 0))\n while queue:\n cur, cur_depth = queue.popleft()\n children = G[cur]\n for child in children:\n if child == finish:\n return cur_depth + 1\n elif child not in marked:\n marked.add(child)\n queue.append((child, cur_depth + 1))\n return -1 # exhausted the whole queue and no path", "def BFS(start, end):\r\n queue = []\r\n queue.append(start)\r\n predecessors = {}\r\n predecessors[start] = None\r\n\r\n while len(queue):\r\n current = queue.pop(0)\r\n if current == end:\r\n break\r\n for neighbor in current.getConnections():\r\n if neighbor not in predecessors:\r\n predecessors[neighbor] = current\r\n queue.append(neighbor)\r\n\r\n if end in predecessors:\r\n path = []\r\n current = end\r\n while current != start:\r\n path.insert(0, current)\r\n current = predecessors[current]\r\n path.insert(0, start)\r\n return path\r\n else:\r\n return None", "def bfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n queue = collections.deque()\n queue.append(source)\n while queue:\n vertex = queue.popleft()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n if vertex not in visited:\n visited.add(vertex)\n for neighbor in filter(lambda n: n not in visited, get_neighbors(vertex)):\n queue.append(neighbor)\n parents[neighbor] = vertex\n return []", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #the logic is same as for DFS just that i made use of a Queue data structure\n #Here the queue acts as a FIFO queue\n neighbourNodes = util.Queue()\n moves = []\n neighbourNodes.push((problem.getStartState(),moves))\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n poppedNodeState, poppedNodeAction= neighbourNodes.pop()\n if(poppedNodeState in seenNodes):\n continue\n if problem.isGoalState(poppedNodeState):\n return poppedNodeAction\n seenNodes.add(poppedNodeState)\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n if(state in seenNodes):\n continue\n neighbourNodes.push((state, poppedNodeAction+[action]))\n return moves\n #util.raiseNotDefined()", "def bfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = deque([s])\n while len(boundary) > 0:\n v = boundary.popleft()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop(0)\n visited += [v]\n for w in neighbours(v, graph):\n if w not in goals:\n if w not in visited and w not in boundary:\n boundary.append(w)\n else:\n if w not in visited and w not in boundary:\n boundary.append(w)\n v = boundary.pop(0)\n visited += [v]\n break\n return visited\n \"\"\"", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITHM FOR bFS \n Create a queue Q\n enqueue root node to Q\n while Q is not empty:\n dequeu an item v from Q\n mark the item v as visited \n for each node w that is directed from v:\n enqueue w to Q\n \n \n \"\"\"\n\n fringes = util.Queue()\n explored =[]\n fringes.push((problem.getStartState(),[]))\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n goal = currentNode\n pathToGoal = currDir\n #print \"final path is : \", pathToGoal\n\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.append(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n fringes.push((childNode[0],currDir+[childNode[1]]))\n\n\n return pathToGoal", "def bfs(self, start_word, end_word): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([start_word]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == end_word: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def _BFS(self, start_vertex, visited, callback):\n queue = []\n queue.insert(0, start_vertex)\n visited.add(start_vertex)\n while queue:\n curr_vertex = queue.pop()\n callback(curr_vertex)\n for vertex in self.neighbors(curr_vertex):\n if vertex not in visited:\n queue.insert(0, vertex)\n visited.add(vertex)", "def bfs(maze):\n # TODO: Write your code here\n q = queue.Queue()\n q.put(maze.getStart())\n traversed = []\n path = []\n tracker = {maze.getStart(): None} #Tracker needs to contain tuples\n\n while q:\n curr_loc = q.get() \n\n if curr_loc not in traversed: #Add to traversed points list\n traversed.append(curr_loc)\n\n if maze.isObjective(curr_loc[0], curr_loc[1]): #Reached end of maze\n finished = curr_loc \n break\n\n nextpath = maze.getNeighbors(curr_loc[0], curr_loc[1]) #Search neighbor points\n for point in nextpath:\n if point not in traversed and maze.isValidMove(point[0], point[1]):\n q.put(point)\n tracker[point] = curr_loc #Update curr_loc\n\n while finished:\n path.insert(0, finished) \n finished = tracker[finished]\n\n return path", "def bfs(graph, start, goal):\n\n final = []\n agenda = [[start]]\n\n # Process node queue\n while agenda:\n path = agenda.pop(0)\n\n # Exit if a path is found which reaches the goal\n if path[-1] == goal:\n final = path\n break\n\n # Push the new paths onto the queue\n connected = graph.get_connected_nodes(path[-1])\n for node in connected:\n # Ignore previously visited nodes\n if node not in path:\n agenda.append(path + [node])\n\n # Return the final path or initial empty list\n return final", "def bfs(graph, root, max_depth):\n ###TODO\n pass", "def bfs_visited(ugraph, start_node):\n visited = set([start_node])\n queue = deque([start_node])\n while queue:\n node = queue.popleft()\n for neighbor in ugraph[node]:\n if neighbor not in visited:\n visited.add(neighbor)\n queue.append(neighbor)\n return visited", "def find_path_bfs(graph,start,end):\n\t\n\tvisited = set()\n\twatched = set()\n\n\twatched.add(start)\n\n\tnodes_queue = [(start,[start])]\n\twhile nodes_queue:\n\t\tcurrent_node, path = nodes_queue.pop(0)\n\n\t\tvisited.add(current_node)\n\n\t\tif (current_node == end):\n\t\t\treturn path\n\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append((adjacent_node, path+[adjacent_node]))\n\n\treturn None", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()", "def BFS(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"BFS: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"BFS: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n # Initialize a matrix of the same size as maze where each value is None.\n previous = [[None for i in range(n)] for j in range(n)]\n\n queue = deque() # Define our queue of \"fringe\" squares\n queue.append(start) # Push the start square into our queue\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(queue)): # While there exists items in the queue\n current = queue.popleft() # Pop the square at index 0\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n # If possible has not been visited yet\n if (not visited[possible[0]][possible[1]]):\n queue.append(possible) # Add possible to our queue\n # Set possible to visited\n visited[possible[0]][possible[1]] = 1\n # Set the previous square for possible to the current square\n previous[possible[0]][possible[1]] = current\n # If the while loop goes out, and the queue is empty, then there is no possible path\n return (False, [], number_of_nodes_visited)", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n try:\n queue.append(vertex_dict[queue[-1]])\n except KeyError:\n print(f\"Source: {source}, Dest: {destination}\")\n print(f\"Key {queue[-1]} not found in\")\n print_dict(\"bfs\", vertex_dict)\n break\n queue.reverse()\n return queue", "def BFS(initial_state, check_dict):\r\n \r\n print(\"Implementing BFS...\")\r\n q = deque()\r\n q.append(initial_state)\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = q.pop()\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n accomplished = True\r\n break\r\n \r\n state_container = next_possible_states(path, check_dict, False)\r\n for i in state_container:\r\n if len(path) <= 1:\r\n temp = list(path)\r\n temp.append(i)\r\n q.appendleft(temp)\r\n else:\r\n if i[0] != path[-2][0]:\r\n temp = list(path)\r\n temp.append(i)\r\n q.appendleft(temp)\r\n\r\n \r\n if accomplished:\r\n print(\"Solved! Number of moves:\", len(goal) - 1)\r\n return goal, True\r\n else:\r\n print(\"Cannot be solved. Number of moves:\", len(path) - 1)\r\n print(path)\r\n return path, False", "def bfs(self, queue, target, targetx,\n targety): # finds BFS path to the finish. if there is no path, will return nothing\n\n '''\n 1. So we have a parent matrix\n 2. This records the parent\n 3. We have a dictionary of cell: parents'''\n if self.map1[queue[0][0]][queue[0][1]] == target:\n return [1]\n\n thisset = {queue[0]}\n traceSet = {queue[0]: None}\n\n flag = False # variable to see if it is possible to reach the goal\n while queue:\n fringe = queue.pop(0) # gets 0, 0 first\n adjs = self.getAdj(fringe[0], fringe[1])\n\n if self.map1[fringe[0]][fringe[1]] == 2:\n print(\"Our attempt has started\")\n\n if self.map1[fringe[0]][fringe[1]] == target:\n print(\"Goal reached\")\n print(\"This is how you go about it\")\n # print(traceSet)\n ans = self.trace(traceSet, targetx, targety)\n path = self.savePath(ans)\n flag = True\n # print(ans.pop())\n break\n\n if self.map1[fringe[0]][fringe[1]] == 0 or self.map1[fringe[0]][fringe[1]] == 3 or self.map1[fringe[0]][fringe[1]] == 4:\n continue\n\n for i in range(len(adjs)):\n if self.legal(adjs[i][0], adjs[i][1]):\n if adjs[i] in thisset:\n continue\n\n thisset.add(adjs[i])\n traceSet[adjs[i]] = fringe\n queue.append(adjs[i])\n if flag is False:\n print(\"No way to goal\")\n return []\n return path", "def bfs(self, starting_vertex, destination_vertex):\n \"\"\" FIFO ir LILO\n Create a queue\n Enqueue PATH to starting Vertex\n Create a set top store visited vertices\n While the queue is NOT empty: e.g. > 0\n Dequeue the first PATH Vertex\n Get Vertex from END of PATH\n Check IF NOT visited:\n Mark as visited\n check if vertex is destination_vertex\n If TRUE, return path\n enqueue PATH to ALL of neighbors \n make COPY of current path\n add neighbor to path copy\n enqueue copy \n \"\"\"\n\n q = Queue() # Create a queue\n q.enqueue([starting_vertex]) # Enqueue starting at vertex into Queue (list)\n visited = set() # Create a set to store visited \n \n while q.size() > 0: # While the queue is NOT empty: \n path = q.dequeue() # Dequeue the first PATH Vertices\n v = path[-1] # Get Vertex from END of PATH\n\n if v not in visited: # Check IF NOT visited:\n visited.add(v) # Mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path, DONE\n\n for n in self.get_neighbors(v): # enqueue PATH to ALL of neighbors\n path_c = path [:] # make COPY of current path\n path_c.append(n) # add neighbor to path copy\n q.enqueue(path_c) # enqueue copy", "def bfs(self):\r\n Q = [self.head()]\r\n visited = []\r\n while Q != []:\r\n cur = Q[0]\r\n visited.append(cur)\r\n Q = Q[1:]\r\n Q.extend([ch for ch in self.get_deps(cur.get_field('index'))])\r\n for x in reversed(visited):\r\n yield x", "def bfs(graph, start, end):\n if start not in graph:\n raise RuntimeError('Unknown start node: {}'.format(start))\n search_queue = deque()\n search_queue += graph[start]\n searched = [start]\n while search_queue:\n item = search_queue.popleft()\n if item not in searched:\n searched.append(item)\n if item == end:\n return searched\n search_queue += graph[item]\n return None", "def greedy_bfs(start: Vector2D, goal: Vector2D, grid: Scene, heuristic: Callable[[Vector2D, Vector2D], float]) -> list:\n frontier = PriorityQueue() # nodes to be explored\n prev_node = dict() # maps n to node that precedes it in cheapest currently-known path from start to n\n explored = [] # keeps track of previously explored nodes, to be drawn later\n\n frontier.put(start, heuristic(start, goal))\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n\n if current == goal: # solution found!\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1: to remove 'start']\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n \n for neighbor in grid.get_unexplored_neighbors(current):\n frontier.put(neighbor, heuristic(neighbor, goal))\n prev_node[neighbor] = current\n\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1: to remove 'start']", "def bfs(self, start_node: int, flag: bool) :\n for n in self.dw_graph.get_all_v().values():\n n.visited = False\n queue = [self.dw_graph.nodes[start_node]]\n self.dw_graph.nodes[start_node].visited = True\n node_list = [start_node]\n while queue:\n current = queue.pop()\n if not flag:\n for e in self.dw_graph.all_out_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n else:\n for e in self.dw_graph.all_in_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n\n return node_list", "def bfs_visited(ugraph, start_node):\n \n visited = set([start_node])\n cola = deque([start_node])\n \n while len(cola)>0:\n node = cola.popleft() \n for neigh in ugraph[node]:\n if not neigh in visited:\n visited.add(neigh)\n cola.append(neigh)\n \n return visited", "def bfs_visited(ugraph, start_node):\n\tqueue = []\n\tvisited = [start_node]\n\tqueue.append(start_node)\n\twhile queue:\n\t\tcurrent = queue.pop(0)\n\t\tfor content in ugraph[current]:\n\t\t\tif content not in visited:\n\t\t\t\tvisited.append(content)\n\t\t\t\tqueue.append(content)\n\treturn set(visited)", "def breadthFirstSearch(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n #explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def bfs(g, startnode):\n\n # Initiating dictionaries.\n d = {}\n n = {}\n q = collections.deque()\n\n # Set all distances to infinity.\n for i in g.keys():\n d[i] = float(\"inf\")\n\n # Setting up the initial node's properties.\n d[startnode] = 0\n n[startnode] = 1\n\n q.append(startnode)\n\n while len(q) > 0:\n j = q.popleft()\n\n # For every neighbor of j.\n for h in g[j]:\n if d[h] == float(\"inf\"):\n d[h] = d[j] + 1\n n[h] = n[j]\n q.append(h)\n elif d[h] == d[j] + 1:\n n[h] = n[h] + n[j]\n\n return d, n", "def bfs(array, neighbors, start, similar):\n match = get_item(array, start)\n block = {start}\n visit = deque(block)\n child = deque.popleft\n while visit:\n node = child(visit)\n for offset in neighbors:\n index = get_next(node, offset)\n if index not in block:\n block.add(index)\n if is_valid(array, index):\n value = get_item(array, index)\n if similar(value, match):\n visit.append(index)\n yield node", "def bfs(self, vertex_s):\r\n nd_list = list(self.vertices())\r\n visited = dict((node, 0) for node in nd_list)\r\n\r\n nq = deque()\r\n pre_dict, dist = {}, {}\r\n nq.append(vertex_s)\r\n visited[vertex_s]=1\r\n dist[vertex_s] = 0\r\n\r\n loop_counts = 0\r\n while nq:\r\n s = nq.popleft()\r\n for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'\r\n loop_counts += 1\r\n \r\n #if not node in visited:\r\n if not visited[node]:\r\n nq.append(node) # let 'node' in queue\r\n pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'\r\n dist[node] = dist[s] + 1 # shortest path to 'root'\r\n visited[node]=1 # 'node' is visted\r\n #if node in visited and dist[node] == dist[s] + 1: # still within the shortest path\r\n if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path\r\n if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now\r\n pre_dict[node].append(s) \r\n \r\n if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance\r\n pre_dict[node] = [s]\r\n dist[node] = dist[s] + 1\r\n #print(\" #loops: %d\" %loop_counts)\r\n #current_bfs[vertex_s] = pre_dict\r\n \r\n return pre_dict", "def bfs(graph, start_node):\n start_node.distance = 0\n start.set_predecessor(None)\n queue = list()\n queue.append(start_node)\n while (len(queue) > 0):\n current_vertex = queue.pop()\n current_vertex.setState = \"visiting\"\n for vertex in current_vertex.links():\n if (vertex.getState == \"unvisited\"):\n vertex.setState == \"tobevisited\"\n vertex.set_predecessor(current_vertex)\n vertex.distance = current_vertex.distance + 1\n queue.append(vertex)\n current_vertex.setState = \"visited\"", "def bfs(self):\n\n root = self.root\n dq = collections.deque()\n\n dq.appendleft(root)\n visited = set()\n\n while dq:\n curr = dq.popleft()\n if curr in curr.children:\n curr.children.remove(curr) #Remove child from itself\n\n print(curr.name)\n\n currCh = curr.children\n filteredCh = set()\n\n for child in currCh:\n if child.parent != curr.name:\n continue\n else:\n dq.appendleft(child)\n filteredCh.add(child)\n curr.children = filteredCh", "def bfs(self, node: \"BSTNode\") -> Iterable[\"BSTNode\"]:\n queue = [node]\n\n while queue:\n current = queue.pop()\n yield current\n if current.left:\n queue.insert(0, current.left)\n if current.right:\n queue.insert(0, current.right)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #Queue to hold the node along with the path taken from the start node to reach that node\n queue = Queue()\n #Set to hold the node explored.\n explorednode = set()\n # Get the start node.\n startnode = problem.getStartState()\n print startnode\n # Push the starting node on the Queue along with an empty set to know the direction in order to reach the node.\n queue.push((startnode,[]))\n\n # Loop till the queue is empty\n while queue.isEmpty() is not True:\n # Pop the currentnode and the direction from the queue\n currentnode, direction = queue.pop()\n # Check if the currentnode is not in explorednode.\n if currentnode not in explorednode:\n # We will now add the node to set of explored node.\n explorednode.add(currentnode)\n # If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n # The direction holds the way to reach till the goal from the start node.\n print currentnode\n return direction\n # Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n # If the successor(child) is not explored\n if successor not in explorednode:\n # Add the successor to the queue along with the path to reach it.\n queue.push((successor, direction + [action]))\n util.raiseNotDefined()", "def bfs(start_node, goal_node, max_depth) -> \"solution path\":\n\td = deque([start_node,[]])\n\texplored = {}\n\tlevel = 0\n\n\t# Return empty path if start is equal to goal\n\tif start_node == goal_node:\n\t\treturn []\n\n\t# Keep exploring while the deque has nodes\n\twhile len(d) > 0:\n\t\tpath = d.popleft()\n\n\t\tif level == 0:\n\t\t\tnode = path\n\t\telse:\n\t\t\t# To keep track of levels an empty node gets popped between levels which will cause an exception\n\t\t\ttry:\n\t\t\t\tnode = path[-1]\n\t\t\texcept Exception:\n\t\t\t\tnode = []\n\t\t\t\tpass\n\n\t\tif len(node) == 0:\n\t\t\tlevel += 1\n\t\t\t# Return empty list if max depth was reached\n\t\t\tif max_depth == level:\n\t\t\t\treturn []\n\t\t\td.append(node)\n\n\t\telse:\n\t\t\tval = getNodeVal(node)\n\t\t\tif val not in explored:\n\n\t\t\t\t# Mark node as explored\n\t\t\t\texplored[val] = True\n\n\t\t\t\tfor row in range(len(node)):\n\t\t\t\t\tfor col in range(len(node)):\n\t\t\t\t\t\tchild = toggle(node, row, col)\n\t\t\t\t\t\tnew_path = list(path)\n\t\t\t\t\t\tif level == 0:\n\t\t\t\t\t\t\tnew_path = [new_path]\n\t\t\t\t\t\tnew_path.append(child)\n\t\t\t\t\t\td.append(new_path)\n\t\t\t\t\t\tif child == goal_node:\n\t\t\t\t\t\t\tlevel+=1\n\t\t\t\t\t\t\treturn new_path\n\t# No solution found\n\treturn []", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def bfs(maze, current_node):\n q = collections.deque()\n\n q.append(current_node)\n\n while len(q) > 0:\n current_node = q.popleft()\n maze[current_node.row][current_node.cell] = 1\n yield maze\n\n for neighbour in get_neighbours(maze, current_node):\n if maze[neighbour.row][neighbour.cell] == 2:\n backtrack(maze, neighbour)\n yield maze\n return\n else:\n q.append(neighbour)\n maze[neighbour.row][neighbour.cell] = -2\n\n yield maze\n maze[current_node.row][current_node.cell] = -3\n time.sleep(args.speed)", "def BFS(adj,s):\n # The running time of breadth-first search is O(|E|+|V|)\n V = range(len(adj)) # sequence of nodes\n # Note!!: this is not entirely general - there is no quarantee that\n # the graph node list is sequentially numbered from 0 to n-1\n\n approxInf = 2*len(V) # establish an impossibly far distance, signal not visited\n\n dist = [approxInf for u in V] # initialize distance to unk for all u∈V\n dist[s] = 0 # zero distance to start node\n Q = queue.Queue(maxsize = len(adj)+1) # initialize a sufficiently large queue\n enqueue(Q,s) # queue containing just s\n while not Q.empty(): # Q !empty\n (Q,u) = dequeue(Q) # ? is there a better way than passing this queue around?\n for v in adj[u]: # all (u,v) ∈ E\n if dist[v] == approxInf: # have not explored yet\n Q = enqueue(Q,v)\n dist[v] = dist[u] + 1 # increment distance, & signal node visited\n \n return(dist)", "def bfs(graph, i):\n visited = set()\n\n unexplored = deque()\n unexplored.append(i)\n\n while unexplored:\n curr = unexplored.popleft()\n visited.add(curr)\n edges = graph[curr]\n\n for edge in edges:\n if edge in visited:\n continue\n else:\n unexplored.appendleft(edge)\n\n return visited", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Queue()\n return GraphSearch(problem, 'bfs').search(fringe)", "def bfs(maze):\n # TODO: Write your code here\n\n q = []\n visited = {}\n keys = {}\n selected = None\n q.append(maze.getStart())\n\n while len(q) > 0:\n curr = q.pop(0)\n if maze.isObjective(curr[0], curr[1]):\n selected = curr\n break\n\n neighbors = maze.getNeighbors(curr[0], curr[1])\n\n for n in neighbors:\n if n not in visited:\n visited[n] = True\n q.append(n)\n keys[n] = curr\n\n curr = selected\n path = []\n while curr != maze.getStart():\n path.append(curr)\n curr = keys[curr]\n\n path.append(curr)\n path.reverse() # backtrace\n print(path)\n return path", "def bfs(self, starting_vertex, destination_vertex):\n # create an empty queue and enqueue A-PATH-TO the starting vertex ID\n # create a Set to store the visited vertices\n # while the queue is not empty ..\n # dequeue the first PATH\n # grab the last vertex from the PATH\n # if that vertex has not been visited ..\n # check if its the target\n #if yes, return path\n #mark it as visited\n # add A PATH TO its neighbots to the back of the queue\n # copt the path\n # append the neighbor to the back\n \n \n # create an empty Queue \n queue = Queue()\n #push the starting vertex ID as list\n queue.enqueue([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n path = queue.dequeue()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n queue.enqueue(new_path)", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n BFS(adjList, s, n)", "def find_path_all_bfs(graph,start,end):\n\tvisited = set()\n\twatched = set()\n\tpaths = []\n\n\twatched.add(start)\n\n\tnodes_queue = [(start,[start])]\n\twhile nodes_queue:\n\t\tcurrent_node, path = nodes_queue.pop(0)\n\n\t\tvisited.add(current_node)\n\n\t\tif (current_node == end):\n\t\t\tpaths.append(path)\n\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append((adjacent_node, path+[adjacent_node]))\n\n\treturn paths", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n queue.append(vertex_dict[queue[-1]])\n queue.reverse()\n return queue", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n queue = util.Queue() # queue for searshing the graph\n visited = [] # keep track of visited nodes\n start =problem.getStartState() # The start node\n queue.push((start, [])) # the sart state and empty path list is pushed to the queue\n \n while queue:\n (vrtx, path) = queue.pop()\n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx) :\n queue.push((successor[0], path+[successor]))\n\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Queue\n q = Queue()\n mapper = {} #child_point : (parent_point, direction_to_child)\n q.push(problem.getStartState())\n mapper[problem.getStartState()] = None #root\n\n while (not q.isEmpty()):\n point = q.pop()\n\n if (problem.isGoalState(point)):\n c = point\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n for child in problem.getSuccessors(point):\n if (child[0] not in mapper):\n q.push(child[0])\n mapper[child[0]] = (point, child[1])\n\n # util.raiseNotDefined()", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def breadth_first(graph,start, end):\n queue = []\n queue.append([start])\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node == end:\n return path\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n explored = set()\n Frontier = util.Queue()\n Frontier.push([[startState,None,0]])\n while not Frontier.isEmpty():\n StateTriples = Frontier.pop()\n node = StateTriples[-1][0]\n if problem.isGoalState(node):\n solution = []\n for i in StateTriples[1:]:\n solution = solution + [i[1]]\n return solution\n if node not in explored:\n explored.add(node)\n for i in problem.getSuccessors(node):\n Frontier.push(StateTriples+[list(i)])\n print(Frontier.isEmpty())\n util.raiseNotDefined()", "def my_BiBFS2(self, beginWord, endWord, wordList):\n if not beginWord or not endWord:\n return []\n \n \"\"\"\n hit --> h*t \n ==> hot --> *ot\n ==> dot --> do*\n ==> dog --> *og\n ==> cog\n ==> lot --> lo*\n ==> log --> *og\n ==> cog\n \"\"\"\n\n word_set = set(wordList) # for Quick Access later on\n if endWord not in word_set:\n return 0\n # Build Graph\n map_d = {} # edges\n for i in range(len(word_set)):\n word = wordList[i]\n for j in range(len(word)):\n pattern = word[:j] + '*' + word[j+1:]\n map_d[pattern] = map_d.get(pattern, []) + [word]\n \n\n Q = collections.deque()\n end_Q = collections.deque()\n distance = {}\n end_distance = {}\n \n Q.append((beginWord, 1))\n end_Q.append((endWord, 1))\n distance[beginWord] = 1\n end_distance[endWord] = 1\n while Q and end_Q:\n res = self.bfs2(Q, distance, end_distance, map_d)\n if res >= 0:\n # TODO:\n # pass\n print('distance: ', distance)\n print('end_distance: ', end_distance)\n return res\n res = self.bfs2(end_Q, end_distance, distance, map_d)\n if res >= 0:\n # pass\n print('distance: ', distance)\n print('end_distance: ', end_distance)\n return res\n print('distance: ', distance)\n print('end_distance: ', end_distance)\n return 0", "def find_BFS(self, value):\n to_visit = [self]\n while to_visit:\n curr = to_visit.pop(0) # BFS -> .pop(0) -> queue \n if curr.value == value:\n return curr\n to_visit.extend(curr.children)", "def bfs(initial_state, dimension=3):\n\t\n\treturn search(initial_state, Frontier(Queue), dimension)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()", "def breadthFirstSearch(initialState, finalState):\n\n def exploreNext(neighbor, move):\n \"\"\"Finds out if the neighbor is withinf the boundaries and explore it.\n `explored` is the set used in the BFS function.\n `stateQueue` is the queue inside the BFS function.\n `currentState` is each visited node inside the loop of the BFS function.\n\n \"\"\"\n if (neighbor != None and tuple(neighbor) not in explored):\n nextState = State(neighbor)\n nextState.path = currentState.path.copy()\n nextState.path.append(move)\n stateQueue.append(nextState)\n\n stateQueue = deque([]) # List of States\n explored = set() # Set of tuples of each visited state of the puzzle\n sizeBytesCounter = 0\n\n # Init queue\n stateQueue.append(State(initialState))\n\n # while queue is not empty\n while stateQueue:\n currentState = stateQueue.popleft()\n sizeBytesCounter += sys.getsizeof(currentState)\n\n # Add an unmodified list to the set, a tuple\n explored.add(tuple(currentState.puzzle))\n\n if finalState == currentState.puzzle:\n return currentState, explored, sizeBytesCounter\n \n # Create a node of the current state\n currentNode = Node(currentState.puzzle)\n\n # Iterate over posible paths\n exploreNext(*currentNode.up())\n exploreNext(*currentNode.down())\n exploreNext(*currentNode.left())\n exploreNext(*currentNode.right())\n \n return None", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n result = []\n qu = util.Queue()\n visited = set([])\n current = [problem.getStartState()]\n\n qu.push(current)\n\n while not qu.isEmpty():\n current = qu.pop()\n visited.add(current[-1])\n\n if problem.isGoalState(current[-1]):\n result = current\n break\n\n for each in problem.getSuccessors(current[-1]):\n if each[0] not in visited:\n temp = list(current)\n temp.append(each[0])\n qu.push(temp)\n visited.add(each[0])\n\n path = get_path(result)\n return path\n util.raiseNotDefined()", "def bfs(self, starting_vertex, destination_vertex):\n # creating an empty list of visited vertices\n visited = []\n # creating a queue with the starting vertex in it\n queue = [[starting_vertex]]\n # while we have items in our queueueue\n while queue:\n # pop the first item in the queueueue\n path = queue.pop(0)\n # getting the last value in our path\n node = path[-1]\n # checking to see if it has been seen already or not\n if node not in visited:\n # checking the neighbors of our farthest node\n for n in self.vertices[node]:\n # creating a new path list and appending the nieghbors\n # to it and the queueueueue\n new_path = list(path)\n new_path.append(n)\n queue.append(new_path)\n # if the destination is in the new_path\n # we are done and return the new path\n if n == destination_vertex:\n return new_path\n # adding the node to the visited list\n visited.append(node)", "def breadthFirstSearch(problem):\n marcado = set()\n fila = util.Queue()\n fila.push((problem.getStartState(), []))\n while not fila.isEmpty():\n pos, movimento = fila.pop()\n if problem.isGoalState(pos):\n return movimento\n if pos in marcado:\n continue\n marcado.add(pos)\n candidatos = problem.getSuccessors(pos)\n for candidato, acao, custo in candidatos:\n fila.push((candidato, movimento + [acao]))\n\n return []", "def bfs(maze):\n # TODO: Write your code here.\n start = maze.getStart() \n frontier = [] \n path = [] \n dim = maze.getDimensions()\n objs = maze.getObjectives()\n rows = dim[0]\n cols = dim[1]\n visited = {} # visited as a dictionary\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n frontier.append(start)\n visited[(i,j)] = (-2, -2)\n while frontier:\n pt = frontier.pop(0)\n if maze.isObjective(pt[0], pt[1]) == True:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pt[0], pt[1])\n for i in list_of_neighbors:\n if visited.get(i) == (-1, -1): \n frontier.append(i)\n visited[i] = pt \n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\" \n startState = problem.getStartState()\n visitedNodes = []\n fringe = util.Queue()\n cost = 0 \n if (problem.isGoalState(startState) == True ):\n return [] # No Specific Actions\n else :\n fringe.push((startState , [] , cost ))\n while ( fringe.isEmpty() == False ):\n currentState , actions , cost = fringe.pop()\n \"\"\" get the latest node in the Queue \"\"\"\n \n if ( problem.isGoalState(currentState) == True ): \n \"\"\" check if the node is our goal or not \"\"\"\n #print(\"Final Path : \" + str(actions))\n return actions\n else:\n if ( (currentState in visitedNodes) == False ): \n \"\"\" check if this node is alreay visited or needs to be extended ? \"\"\"\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n if(not node in visitedNodes):\n state , action , cost = node \n if ( not state in visitedNodes):\n fringe.push((state , actions + [action] , cost ))\n \n util.raiseNotDefined()", "def BFSUtility(obj,visited,vertex):\n stack = []\n subGraph = []\n stack.insert(0,vertex)\n visited[vertex] = True\n while(stack):\n subGraph.append(stack.pop())\n for nbrVertex in obj.adjList[subGraph[-1]]:\n if visited[nbrVertex]:\n continue\n stack.insert(0,nbrVertex)\n visited[stack[0]] = True\n return subGraph", "def bfs(vertex, graph, distances, shortest_ways, queue=deque()):\n if vertex not in distances:\n distances[vertex] = 0\n shortest_ways[vertex] = vertex\n for neighbour in graph[vertex]:\n if neighbour not in distances:\n queue.append(neighbour)\n distances[neighbour] = distances[vertex] + 1\n shortest_ways[neighbour] = shortest_ways[vertex] + ' ' + vertex + neighbour\n while len(queue) > 0:\n vertex = queue.popleft()\n bfs(vertex, graph, distances, shortest_ways, queue)", "def breadthFirstSearch(problem):\n visitedStates = set([])\n startState = problem.getStartState()\n expandedStates = set([])\n\n fringe = util.Queue()\n fringe.push((startState, []))\n visitedStates.add(startState)\n\n while not fringe.isEmpty():\n state, actions = fringe.pop()\n\n if(problem.isGoalState(state)):\n return actions\n\n expandedStates.add(state)\n for nextState, action, cost in problem.getSuccessors(state):\n if(nextState not in visitedStates):\n visitedStates.add(nextState)\n fringe.push((nextState, actions + [action]))\n\n return []", "def breadthFirstSearch(problem):\n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in tempSuccList:\n repeat = False\n for s in successor:\n if (s[0] == succ[0]):\n repeat = True\n if (repeat == False):\n successor.append(succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def bfs(graph, startnode):\n dist = {}\n\n # Initialize distances\n for node in graph:\n dist[node] = float('inf')\n dist[startnode] = 0\n\n # Initialize search queue\n queue = deque([startnode])\n\n # Loop until all connected nodes have been explored\n while queue:\n node = queue.popleft()\n for nbr in graph[node]:\n if dist[nbr] == float('inf'):\n dist[nbr] = dist[node] + 1\n queue.append(nbr)\n return dist", "def bfs(self, start_node, visit_func, distance_func = None):\n from collections import deque\n\n distances = dict()\n distances[start_node] = 0\n visited = set()\n qu = deque()\n qu.appendleft(start_node)\n while len(qu) != 0:\n node = qu.pop()\n if node in visited:\n continue\n visit_func(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n qu.appendleft(neighbor_node)\n if neighbor_node not in distances.keys():\n distances[neighbor_node] = distances[node] + 1\n if distance_func:\n distance_func(distances)", "def BFS(self,s,t,parent):\n #mark all vertices as not visited\n visited = [False]*(self.ROWS);\n # initialize a queue\n queue = []\n # add source to q and mark it visited\n queue.append(s)\n visited[s] = True\n #Breadth-first-search\n while queue:\n n = queue.pop(0)\n for index,val in enumerate(self.graph[n]):\n if visited[index] == False and val>0:\n queue.append(index)\n visited[index] = True\n parent[index] = n\n #return True if sink was visted\n if visited[t]:\n return True\n else:\n return False", "def bfs(graph, source):\n visited = [False] * len(graph.graph)\n print(visited)\n\n result = \"\"\n queue = []\n\n queue.append(source)\n visited[source] = True\n\n while queue:\n source = queue.pop(0)\n result += str(source)\n\n while graph.graph[source] is not None:\n data = graph.graph[source].vertex\n if not visited[data]:\n queue.append(data)\n visited[data] = True\n graph.graph[source] = graph.graph[source].next\n return result", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n frontier = util.Queue()\n start = problem.getStartState()\n record = [] # gonna use dictionary to keep track of movements\n frontier.push(start)\n explored = [start]\n\n location = 0 # to remember which successor part im accessing\n action = 1\n\n while not frontier.isEmpty():\n current_location = frontier.pop()\n print(current_location)\n\n if problem.isGoalState(current_location):\n break\n\n\n for each in problem.getSuccessors(current_location):\n if each[location] not in explored:\n frontier.push(each[location])\n record.append({'From': current_location, 'To': each[location], 'By': each[action]})\n explored.append(each[location])\n\n while not problem.isGoalState(record[-1]['To']): # loop removes last couple of movements which don't lead to goal\n record.remove(record[-1])\n\n x = len(record)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if record[x - 1]['From'] != record[x - 2]['To']: # starts from goal and works backwards\n record.remove(record[x - 2])\n x = len(record)\n else:\n x -= 1\n\n return [path['By'] for path in record]\n\n return []", "def bfs(graph: np.ndarray, row: int, s: int, t: int, parent: list) -> bool:\r\n visited = [False] * row\r\n queue = []\r\n queue.append(s)\r\n visited[s] = True\r\n\r\n while queue:\r\n\r\n u = queue.pop(0)\r\n\r\n for ind, val in enumerate(graph[u]):\r\n if visited[ind] is False and val > 0:\r\n queue.append(ind)\r\n visited[ind] = True\r\n parent[ind] = u\r\n\r\n return True if visited[t] else False", "def get_bfs(self, s):\n # create a queue for BFS\n queue = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n # mark the start node as visited and enqueue it\n visited[s] = True\n queue.append(s)\n results = []\n\n while queue:\n # dequeue a vertex from queue and append to results.\n p = queue.pop(0)\n results.append(p)\n # get all adjacent vertices of the dequeued vertex s,\n # and for any unvisited adjacent, mark it visited and enqueue it.\n for v in self.graph[p]:\n if visited[v] is False:\n visited[v] = True\n queue.append(v)\n\n return results", "def bfs(self, starting_vertex, destination_vertex):\n visited = set()\n paths = [[starting_vertex]]\n \"\"\"\n For every list in paths. If the last item in the list is \n the destination return the list. If the last item is not \n in the visited cache add it and make a new path for all \n of it's edges. If the last item has been visited remove \n it from the paths list.\n \"\"\"\n for path in paths:\n vertex = path[-1]\n if vertex == destination_vertex:\n return path\n if vertex not in visited:\n visited.add(vertex)\n for key in self.get_neighbors(vertex):\n newPath = path + [key]\n paths.append(newPath)", "def bfs(maze):\n # TODO: Write your code here\n frontier = Queue()\n visited = []\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives.copy())\n frontier.put(start)\n explored = []\n \n\n while not frontier.empty(): # while frontier queue is not empty\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n\n objectivesLeft.remove(currentCell)\n \n # all objectives found, initialise backtrace and exit loop\n # if len(objectivesLeft) == 0:\n path.append(currentState)\n ret.append(currentCell)\n visited.append(currentState)\n break\n\n # current cell is not objective nor visited\n if visited.count(currentState) == 0:\n explored.append(currentCell)\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n\n # if neighbor is not visited, add it to the frontier\n if visited.count(neighbor) == 0:\n neighbor.setParent(currentState)\n frontier.put(neighbor)\n\n visited.append(currentState)\n\n #backtrace\n while path[0] != start:\n\n currentState = path[0]\n path.insert(0, currentState.parent())\n ret.insert(0, currentState.parent().cell())\n\n return ret", "def order_bfs(self) -> List[Nodo]:\n\n # Nodos por buscar, es una cola\n pending: List[Nodo] = [self.root]\n # Nodos ya visitados\n visited: List[Nodo] = []\n\n # Mientras la cola tenga items\n while len(pending) > 0:\n # Procesar el primer elemento\n curr = pending.pop(0)\n visited.append(curr)\n\n # Agregar los hijos no visitados del nodo a la cola\n for child in curr.children:\n if child in visited:\n continue\n pending.append(child)\n\n return visited", "def BFS(obj,vertex):\n validateVertex(vertex,obj.vertexList)\n order = []\n visited = dict()\n for ver in obj.vertexList:\n visited[ver] = False\n\n \n order.append(BFSUtility(obj,visited,vertex))\n for ver in visited.keys():\n if visited[ver]:\n continue\n order.append(BFSUtility(obj,visited,ver))\n return order", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Get the start node\n start_state = problem.getStartState()\n print(start_state)\n\n # Define a stack\n plan_stack = util.Queue()\n start_plan = [start_state] # node, cost\n plan_stack.push(start_plan)\n\n # Visited nodes\n visited_nodes = set(start_state)\n\n goal_found = False\n\n while not goal_found:\n # Get the plan from the stack\n plan_to_expand = plan_stack.pop()\n node_to_exp = plan_to_expand[-1]\n all_nxt_nodes = problem.getSuccessors(node_to_exp)\n\n # Traverse through all the next nodes\n for nxt_node in all_nxt_nodes:\n nxt_pos = nxt_node[0]\n\n if nxt_pos in visited_nodes: # Check if node is already visited\n continue\n\n visited_nodes.add(nxt_pos) # Add the node to visited nodes\n nxt_plan = plan_to_expand + [nxt_pos] # add node to the plan\n plan_stack.push(nxt_plan) # push the plan into the stack\n goal_found = problem.isGoalState(nxt_pos) # Check if goal is achieved\n if goal_found:\n break\n \n \n print(goal_found)\n print(nxt_plan)\n\n moves = []\n # Convert plan to moves\n for i in range(len(nxt_plan) - 1):\n for nxt_node in problem.getSuccessors(nxt_plan[i]):\n nxt_pos = nxt_node[0]\n nxt_mv = nxt_node[1]\n if nxt_pos == nxt_plan[i+1]:\n moves.append(nxt_mv)\n break\n \n return moves\n\n \n\n # Calculate the minimum plan cost \n #min_val = float(\"inf\")\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost < min_val:\n # min_val = plan_cost\n\n ## Expand the nodes with minimum plan cost\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost == min_val:\n # plan_step = one_plan[0] \n # # Expand the last node of plan\n # last_node = plan_step[end]\n # for nxt_node in problem.getSuccessors(last_node):\n\n\n\n util.raiseNotDefined()", "def breadth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while (True):\r\n state = fringe[0]\r\n del fringe[0]\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ", "def bfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def breadth_first_search(problem):\n fringe = util.Queue()\n return general_search(problem, fringe)", "def breadthFirstSearchPaths(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n #explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def bfs(self, v_start, v_end=None) -> []:\n if not self.contains_vertex(v_start):\n return []\n if v_end is not None:\n if not self.contains_vertex(v_end):\n v_end = None\n\n traversed_vertices = []\n queue = deque([v_start])\n\n while len(queue) != 0:\n current = queue.popleft()\n if current not in traversed_vertices:\n traversed_vertices.append(current)\n if (v_end is not None) and (current == v_end):\n return traversed_vertices\n options = sorted(self.adj_list[current])\n for vertex in options:\n queue.append(vertex)\n return traversed_vertices", "def bidirectional_search(self):\n begin = time.time()\n\n initial_node = self.get_node(self.initial_state)\n\n final_node = self.get_node(self.final_state)\n\n queue = [initial_node, final_node]\n\n initial_node.visited_right = True\n \n final_node.visited_left = True\n\n visited_nodes = []\n \n while queue:\n node = queue.pop(0)\n\n if self.is_intersecting(node):\n end = time.time()\n\n method_time = end - begin\n\n copy_node = node\n\n path = []\n\n while node:\n path.append(node)\n\n node = node.parent_right\n\n path.reverse()\n\n del path[-1]\n\n while copy_node:\n path.append(copy_node)\n\n copy_node = copy_node.parent_left\n\n self.add_result('Busca bidirecional', method_time, path, visited_nodes)\n \n return True\n else:\n states = node.neighboring_states()\n\n neighbors = [self.add_node(state) for state in states]\n\n for neighbor in neighbors:\n if node.visited_left and not neighbor.visited_left:\n neighbor.parent_left = node\n\n neighbor.visited_left = True\n\n queue.append(neighbor)\n \n visited_nodes.append(neighbor)\n\n if node.visited_right and not neighbor.visited_right:\n neighbor.parent_right = node\n\n neighbor.visited_right = True\n\n queue.append(neighbor)\n\n visited_nodes.append(neighbor)\n \n end = time.time()\n\n method_time = end - begin\n\n self.add_result('Busca bidirecional', method_time, [], visited_nodes)\n\n return False", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Queue()\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init)\n closed = []\n while not open.isEmpty():\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currCost = currNode[2]\n\n if problem.isGoalState(currState):\n return currPath[1:]\n else:\n if currState not in closed:\n closed.append(currState)\n successors = problem.getSuccessors(currState)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath + [each[1]], currCost + each[2])\n open.push(temp)\n return False" ]
[ "0.7630348", "0.7485004", "0.7482269", "0.746009", "0.73893636", "0.7359191", "0.7313912", "0.72900593", "0.7280733", "0.7267169", "0.72345245", "0.72265714", "0.71806186", "0.71715397", "0.7168454", "0.7154696", "0.713683", "0.7130648", "0.7121028", "0.706812", "0.706548", "0.7055547", "0.7037251", "0.70306826", "0.7026697", "0.7013327", "0.70106715", "0.69872785", "0.69455445", "0.6929407", "0.6925641", "0.6923132", "0.69159734", "0.68879074", "0.6874149", "0.68718076", "0.68676186", "0.68385196", "0.6828939", "0.68195397", "0.6819062", "0.68068194", "0.6799971", "0.6799723", "0.6787852", "0.67864954", "0.67713356", "0.6764945", "0.6752057", "0.67488897", "0.67478883", "0.67428744", "0.6741716", "0.6738471", "0.6730697", "0.6717589", "0.6715251", "0.6704273", "0.67003185", "0.6698588", "0.66968286", "0.66960764", "0.6691086", "0.66729075", "0.6662407", "0.6655687", "0.66460335", "0.6640305", "0.66326714", "0.6630248", "0.6624078", "0.6623577", "0.6623054", "0.6622801", "0.661849", "0.66135883", "0.6601665", "0.6587202", "0.6586892", "0.6584722", "0.6584072", "0.6571239", "0.65491986", "0.65435225", "0.6541816", "0.65394783", "0.65367115", "0.6534377", "0.65270543", "0.65257764", "0.65061873", "0.650513", "0.65048397", "0.64961433", "0.6492572", "0.64888096", "0.6480507", "0.64732534", "0.64694846", "0.6465794", "0.6454147" ]
0.0
-1
A DFS solution that runs in O(N) in time and space
def dfs(self, root: TreeNode) -> int: if not root: return 0 def dfs(node): if not node: return float('inf') if not node.left and not node.right: return 1 return min(dfs(node.left), dfs(node.right)) + 1 return dfs(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def DFS(graph, s, n):\n # Stack implemented using list. list.append() and list.pop() inherently have LIFO structure.\n visited = [False] * n\n stack = []\n \n stack.append(s)\n visited[s] = True\n while(stack):\n v = stack.pop()\n print(v, end= \" \")\n \n for i in range(len(graph[v])):\n if (not visited[graph[v][i]]):\n stack.append( graph[v][i] )\n visited[graph[v][i]] = True", "def dfs(node: TreeNode):\n if not node:\n return\n helper(node, 0, sum)\n dfs(node.left)\n dfs(node.right)", "def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n \n st = Stack()\n mapper = {}\n mapper[problem.getStartState()] = None\n\n st.push(problem.getStartState())\n while not(st.isEmpty()):\n vertex = st.pop()\n \n if (problem.isGoalState(vertex)):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n neigh = problem.getSuccessors(vertex)\n # neigh.reverse()\n # neigh.sort()\n for child in neigh:\n if child[0] not in mapper:\n st.push(child[0])\n mapper[child[0]] = (vertex, child[1])\n # print mapper\n \n # visited = []\n # p = dfsRecursive(problem, problem.getStartState(), st, visited, [])\n # return p\n \n # pathfind = {}\n # st.push(problem.getStartState())\n # iterative approach:\n # while (not st.isEmpty()):\n # point = st.pop() # (x,y)\n # if problem.isGoalState(point):\n # # print point\n # print pathfind\n # # print visited\n # elif (not (point in visited)):\n # visited.append(point)\n # # print pathfind, '\\n'\n # print visited, '\\n'\n # for child in problem.getSuccessors(point):\n # st.push(child[0])\n # pathfind[child[0]] = point #this preemptively adds!\n # util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITH FOR DFS\n \n function graph-search(problem, fringe) retuen a sloution or failure\n \n closed <-- an empty set\n fringe <-- insert (make-node (initial-state [problem]), fringe)\n \n loop do :\n if fringe is empty then return failure\n node <-- Remove-front (fringe)\n if goal-test (problem, state[node]) then return node\n if state[node] is not in closed then \n add STATE[node] to closed\n for child-node in EXPAND(STATE[node],problem) do\n fringe <-- Insert (child-node, fringe)\n end\n end\n \"\"\"\n\n templist=[]\n explored = set()\n fringe = util.Stack()\n #print \"the stat node is : \", problem.getStartState()\n\n fringe.push((problem.getStartState(),templist))\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n # print \"Pacman is currently at : \", currentNode\n if problem.isGoalState(currentNode):\n # print \" Goal State Found : \", currentNode\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n # print \"Adding current node to explored\"\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # print \"child node : \", childNode , \" is added \"\n fringe.push((childNode[0],currDir+[childNode[1]]))\n\n return pathToGoal", "def dfs(self, starting_vertex, destination_vertex):\n \"\"\" LIFO\n Create a stack\n Create a set to store visited\n PUSH starting vertex into an array (STACK)\n While the STACK is NOT empty \n get((pop) first PATH vertex\n get Vertex from END of PATH\n check if NOT visited\n mark as visited\n check if vertex is destination_vertex\n If TRUE, return path \n PUSH path to ALL of neighbors\n make copy of current path\n add neighbor to path copy\n PUSH path copy\n \"\"\" \n s = Stack() # Create a stack\n s.push([starting_vertex]) # PUSH starting vertex into an array (STACK)\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the STACK is NOT empty\n path = s.pop() # get(pop) first PATH vertex)\n v = path[-1] # get Vertex from END of PATH \n\n while v not in visited: # check if NOT visited\n visited.add(v) # mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path \n\n for n in self.get_neighbors(v): # PUSH path to ALL of neighbors\n path_c = path[:] # make copy of current path\n # path_c.extend([n]) # add neighbor to path copy\n path_c.append(n) # add neighbor to path copy\n s.push(path_c) # PUSH path copy", "def depthFirstSearch(problem):\n container = util.Stack() \n return depthOrBreadthFirstSearch(problem, container)", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def dfs(adj, used, order, x):\n # write your code here\n # Mark as visited\n used[x] = 1\n for v in adj[x]:\n if not used[v]:\n # If not visited, run dfs\n dfs(adj, used, order, v)\n # When no more recursion, add to the order list\n order.append(x)", "def depthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\tfrontera = util.Stack()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def iterativeDeepeningSearch(problem):\n \"*** YOUR CODE HERE FOR TASK 1 ***\"\n\n # Retrieve the init state\n # state model ( (position, depth), path, cost)\n initState = ( (problem.getStartState(), 1) , ['Stop'], 0)\n limit = 1\n while True:\n # Initialization each iteration\n open = util.Stack()\n open.push(initState)\n closed = {}\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0][0]\n currDepth = currState[0][1]\n currPath = currState[1]\n currCost = currState[2]\n\n closed[currPos] = currCost\n if currDepth <= limit:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n nextDepth = currDepth + 1\n for each in successors:\n nextCost = currCost + each[2]\n nextPath = currPath + [each[1]]\n if each[0] not in closed.keys() or nextCost < closed[each[0]]:\n temp = ( (each[0], nextDepth), nextPath, nextCost)\n open.push(temp)\n if problem.isGoalState(temp[0][0]):\n return nextPath[1:]\n limit += 1", "def dfs(n):\n if seen[n]: return seen[n] == 1 \n seen[n] = 1\n if any(dfs(nn) for nn in digraph.get(n, set())): return True \n seen[n] = 2\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()", "def dfs(self, starting_vertex, destination_vertex): # great for if you know the start and end, like a maze with 1 entry/1 exit\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack\n s.push([starting_vertex]) # push the starting vertex to the top of the stack \n\n while s.size() > 0: # loop if the size is greater than 0\n path = s.pop() # pop off the top element of the stack and store \n v = path[-1] # store the vertex from the end of path\n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors\n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n s.push(path_copy) # push the path copy to the Stack", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n Pilha_Caminho = Stack()\n Pilha_Estados = Stack()\n Caminho = []\n Visitados = []\n\n Pilha_Caminho.push(Caminho) # empilha caminho (vazio, no começo)\n Pilha_Estados.push(problem.getStartState()) # empilha estado inicial\n\n while (Pilha_Caminho.isEmpty() == False and Pilha_Estados.isEmpty() == False):\n Caminho_Andado = Pilha_Caminho.pop() # atualiza caminho\n Estado_Atual = Pilha_Estados.pop() # atualiza estado\n if problem.isGoalState(Estado_Atual): # caso estado atual seja o desejado,\n return Caminho_Andado # retorna o caminho total\n if Estado_Atual not in Visitados: # caso estado atual não tenha sido visitado\n Visitados.append(Estado_Atual) # marca estado como visitado\n for Sucessor in problem.getSuccessors(Estado_Atual): # busca sucessores\n if Sucessor[0] not in Visitados: # caso sucessor não tenha sido visitado\n Pilha_Caminho.push(Caminho_Andado + [Sucessor[1]]) # atualiza caminho total na pilha\n Pilha_Estados.push(Sucessor[0]) # atualiza estado\n return", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()", "def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def dfs(start_node, goal_state, limit = None, iterative = False, graphSearch = False, improved_descendants = False):\t\n\tfringe = [start_node]\n\tnumber_nodes_expanded = 0\n\tnumber_nodes_visited = 0\n\n\tt0 = time.time()\n\n\tif graphSearch:\n\t\tclosed = {} #hash_map\n\n\twhile len(fringe) > 0:\n\t\tnumber_nodes_visited += 1\n\t\tnode = fringe.pop()\n\t\tnode.count = number_nodes_visited\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\tif iterative:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\tif node.check_solution(goal_state):\n\t\t\t_ = print_solution(node, number_nodes_expanded, goal_state)\n\t\t\tif iterative:\n\t\t\t\treturn True, number_nodes_visited\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True \n\n\n\t\tif limit == None or node.depth < limit:\n\t\t\tif graphSearch:\n\t\t\t\tnode_hash = node.build_hash()\n\t\t\t\tnode_depth = node.depth\n\t\t\t\t#can also add if it's found i at smaller depth. Grants solution every time\n\t\t\t\tif node_hash not in closed or closed[node_hash] > node_depth:\n\t\t\t\t\tclosed[node_hash] = node_depth\n\t\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\t\tfringe.append(child_nodes[i])\n\t\t\telse:\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\tfringe.append(child_nodes[i])\n\t\n\tif iterative:\n\t\treturn False, number_nodes_visited\n\t\t\t\n\treturn False", "def DFS(graph):\n stack = []\n actual_position = '1'\n stack.append(actual_position)\n visited_vertices = []\n\n while True:\n for neighbors in graph.values():\n try:\n neighbors.remove(actual_position) #usun sasiadow o wartosci aktualnej pozycji dla wszystich wierzcholkow grafu\n except ValueError:\n pass\n\n visited_vertices.append(actual_position) #odwiedzone wierzcholki\n\n try:\n actual_position = min(graph[actual_position]) #przejdz do sasiada o najnizszym numerze\n except ValueError:\n stack.remove(actual_position) # sciagamy ze stosu na stos\n if stack == []:\n return visited_vertices\n actual_position = stack.pop(-1) # ustaw z wierzchu stosu pozycje aktualna\n\n stack.append(actual_position) # dajemy na stos aktualna pozycje", "def bruteForceSearch(digraph, start, end, maxTotalDist, maxDistOutdoors):\n return DFS1(digraph, start, end, [], maxTotalDist, maxDistOutdoors)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.Stack()\n start_node = problem.getStartState()\n\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,[]))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n explored.add(node[0])\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1][:]\n actions.append(action)\n new_node = (nextState, actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n #Stack to hold the node that have been visited along with the path taken from the start node to reach that node.\n stack = Stack()\n #Set to hold the node explored.\n explorednode = set()\n #Get the start node.\n startnode = problem.getStartState()\n #Push the starting node on the Stack along with an empty set to know the direction in order to reach the node.\n stack.push((startnode,[]))\n #Loop till the stack is empty\n while stack.isEmpty() is not True:\n #Pop the currentnode and the direction from the stack\n currentnode, direction = stack.pop()\n #We will now add the node to set of explored node.\n explorednode.add(currentnode)\n #If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n #print currentnode, direction\n #The direction holds the way to reach till the goal from the start node.\n #print direction\n return direction\n #Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n #If the successor(child) is not explored\n if successor not in explorednode:\n #Add the successor to the stack along with the path to reach it.\n stack.push((successor, direction + [action]))", "def _dfs_iteration(self, v):\n stack1 = [v]\n self._visited[v] = True\n while stack1:\n curr = stack1.pop()\n for w in self._G.adj(curr):\n if not self._visited[w]:\n stack1.append(w)\n self._visited[w] = True\n self._pre[w] = curr\n elif self._pre[w] != curr:\n self.cycle = True", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n stack = util.Stack() # stack for searshing the graph\n visited = [] # Keep track of visited nodes\n start =problem.getStartState() # The start node\n stack.push((start, [])) # the sart state and empty path list is pushed to the stack\n \n while stack:\n (vrtx, path) = stack.pop() # Pop tfrom the stack , vrtx: the poped node for expantion.\n if vrtx not in visited: # if the node is visited alraedy \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx):\n stack.push((successor[0], path+[successor]))\n util.raiseNotDefined()", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()", "def dfs(graph, root, method='dfs', max_depth=10000):\n \n # Get node object from node ID\n root = graph.getnodes(root)\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n visited = []\n stack = [root.nid]\n depth = 0\n \n while stack or depth == max_depth:\n node = stack.pop(stack_pop)\n \n if node not in visited:\n visited.append(node)\n stack.extend(\n [x for x in node_neighbors(graph, node) if x not in visited])\n depth += 1\n \n return visited", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def recursive_dft(self, start, visited=[]):\n if start not in visited:\n visited.append(start)\n for i in self.neighbors(start):\n self.recursive_dft(i, visited)\n return visited", "def dfs( self ):\n\n #print self.state; \n #print self.visited;\n SearchProblem.stateVisited= SearchProblem.stateVisited+1 \n \n if self.stop: # check class variable and stop searching...\n return;\n\n for action in self.edges(): # consider each edge leading out of this node\n\n action.destination.path = self.path + str(action.label); \n # get the label associated with the\n # action and append it to the path\n # string\n\n action.destination.visited = self.visited.copy();\n # make copy of source node's visited set\n # and use it as destination node's\n # visited set\n\n action.destination.visited.add( repr(action.destination.state) );\n\n if action.destination.is_target(): \n # check if destination of edge is target node\n action.destination.target_found(); # perform target found action\n if not self.continue_search(): # stop searching if not required\n SearchProblem.stop = True; # set class variable to record that we\n break; # are done\n\n if repr(action.destination.state) in self.visited:\n continue; # skip if we've visited this one before\n\n action.destination.dfs(); # resume recursive search ", "def dfs(starting_vertex):\n s = Stack()\n\n s.push([starting_vertex])\n\n while s.size() > 0:\n p = s.pop()\n l = p[-1]\n\n if l not in new_visited_rooms:\n return p\n neighbors = set(get_neighbors(l))\n \n for n in neighbors:\n new_path = p.copy()\n new_path.append(n)\n s.push(new_path)", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #util.Stack() = LIFO for DFS\n #travel down path until end of line unlike BFS, backtrack until there is another path\n\n visited = []\n\n frontier = util.Stack()\n frontier.push( (problem.getStartState(), []) ) \n\n while not frontier.isEmpty():\n node,actions = frontier.pop()\n\n if problem.isGoalState(node):\n return actions\n\n visited.append(node)\n\n for coord,direction,cost in problem.getSuccessors(node):\n if not coord in visited:\n frontier.push((coord, actions + [direction]))\n\n return []", "def dfs_iter(graph, start):\n # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat\n stack = [(start, 0)]\n time = 1\n graph.discovery_time[start] = time\n graph.visited[start] = True\n\n while stack: # not empty\n u, v = stack.pop()\n\n while v < graph.size and not is_edge(graph, u, v):\n v += 1\n\n if v < graph.size:\n # found successor, u is not yet finished\n stack.append((u, v + 1))\n\n if not graph.visited[v]:\n # we have discovered v\n stack.append((v, 0))\n graph.parent[v] = u\n graph.visited[v] = True\n time += 1\n graph.discovery_time[v] = time\n else:\n # u has no more successors\n time += 1\n graph.finishing_time[u] = time", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def dfs(self, start_node, cbfunc):\n visited = set()\n stack = [start_node]\n\n while len(stack) != 0:\n node = stack.pop()\n if node in visited:\n continue\n cbfunc(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n stack.append(neighbor_node)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs(self, starting_vertex, destination_vertex):\n # create an empty stack \n stack = Stack()\n #push the starting vertex ID as list\n stack.push([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n path = stack.pop()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n stack.push(new_path)", "def dfs(self, initialSt, goalSt): # Depth­First Search\n\n self.__reset_all_variables()\n\n start = time.perf_counter()\n\n frontier = deque() # deque will be treated as a stack\n frontier.append(initialSt)\n frontier_U_explored = set()\n frontier_U_explored.add(initialSt) # for fasten up the lookup time\n explored = set()\n\n max_frontier_size = 0\n max_ram_used = psutil.virtual_memory().used\n max_depth = initialSt.depth\n\n while len(frontier):\n currentState = frontier.pop()\n explored.add(currentState)\n frontier_U_explored.add(currentState)\n\n max_depth = currentState.depth if currentState.depth > max_depth else max_depth\n\n if goalSt == currentState:\n\n end = time.perf_counter()\n\n self.__success(initialSt,\n currentState,\n len(explored)-1,\n len(frontier),\n max_frontier_size,\n max_depth,\n end-start,\n max_ram_used,\n \"dfs\")\n return True\n\n h = currentState.children()\n h.reverse()\n for child in h:\n if child not in frontier_U_explored:\n frontier.append(child)\n frontier_U_explored.add(child)\n\n max_frontier_size = len(frontier) if len(\n frontier) > max_frontier_size else max_frontier_size\n max_ram_used = psutil.virtual_memory().used if psutil.virtual_memory(\n ).used > max_ram_used else max_ram_used\n\n return False", "def dfs(node):\n nonlocal ans\n if not node: return []\n if node.left is node.right is None: return [0]\n left,right = dfs(node.left), dfs(node.right)\n ans += sum(2 + x + y <= distance for x in left for y in right)\n return [1 + x for x in left + right]", "def depthFirstSearch(problem):\n #print \"Start:\", problem.getStartState()\n #print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n #print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n #created a frontier Stack for DFS\n #Here the stack acts as a LIFO stack\n neighbourNodes = util.Stack()\n #created a list of moves which will be returned in then end\n moves = []\n #pushed the start node and empty moves list, onto the frontier stack\n neighbourNodes.push((problem.getStartState(),moves))\n #this is a set of nodes which have been seen, to avoid adding nodes already visited \n seenNodes = set()\n #condition evaluated based on the existence of elements in the frontier stack\n while not neighbourNodes.isEmpty():\n #last node in the stack is popped and its state and action is stored\n poppedNodeState, poppedNodeAction = neighbourNodes.pop()\n #condition to check if the node is already been visited\n if(poppedNodeState in seenNodes):\n #if yes then it just skips the iteration using the continue statement\n continue\n #condition to check if the current node is the goal node\n if problem.isGoalState(poppedNodeState):\n #if yes then return the action or moves to be performed list\n return poppedNodeAction\n #if not visited before then node is added to the seenNodes set\n seenNodes.add(poppedNodeState)\n #loop to parse the successor nodes and check and add them to the frontier stack\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n #checking if the successor node has already been visited before\n if(state in seenNodes):\n #if yes then it skips that node\n continue\n #else it adds that successor along with it action appeneded with the already existing actions\n neighbourNodes.push((state, poppedNodeAction+[action]))\n #the list of moves if finally returned\n return moves\n #util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Stack()\n\n # Retrieve the init state\n initState = (problem.getStartState(), ['Stop'], 0)\n open.push(initState)\n closed = []\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0]\n currPath = currState[1]\n currCost = currState[2]\n\n if problem.isGoalState(currPos):\n return currPath[1:]\n else:\n closed.append(currPos)\n if currState not in closed:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath+[each[1]], currCost+each[2])\n open.push(temp)\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"", "def dfs(x):\n if x <= n:\n ans.append(x)\n for xx in range(10): dfs(10*x + xx)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n '''\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState((2,2))\n print \"Start's successors:\", problem.getSuccessors((1,1))\n suc=problem.getSuccessors(problem.getStartState())\n actionList=[]\n stateList=[]\n import random\n randomNum=random.randrange(0,len(suc),1)\n \n \n print len(suc)\n #for i in range(1000):\n while not problem.isGoalState(suc[randomNum][0]):\n\tprint randomNum\n\trandomNum=random.randrange(0,len(suc),1)\n\trandomAction=suc[randomNum][1]\n\t\n \t#print randomNum\n\tif suc[randomNum][0] not in stateList:\n\t\tstateList.append(suc[randomNum][0])\n\t\tactionList.append(randomAction)\n \t\tsuc=problem.getSuccessors(suc[randomNum][0]) \n \n #actionList.append(suc[randomNum][0])\n #if kiki==0:\n print actionList\n \n return actionList\n\n\n #util.raiseNotDefined()\n '''\n return DFS(problem,problem.getStartState(),[])", "def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n\n frontier = Stack()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n\n start = node(problem.getStartState(),'','')\n frontier.push(start)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n if achou == False:\n successor = node(vertex[0],path.path,vertex[1])\n frontier.push(successor)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n # YOUR CODE HERE\n frontier = util.Stack()\n explored = set()\n initialState = problem.getStartState()\n frontier.push(initialState)\n while not frontier.isEmpty():\n choice = frontier.pop()\n if convertStateToHash(choice) not in explored:\n if problem.isGoalState(choice):\n return choice\n successors = problem.getSuccessors(choice)\n for successor in successors:\n frontier.push(successor[0])\n explored.add(convertStateToHash(choice))\n # util.raiseNotDefined()", "def DFS(initial_state, check_dict): \r\n \r\n print(\"Implementing DFS...\")\r\n q = deque()\r\n q.append(initial_state)\r\n accomplished = False\r\n \r\n while len(q) != 0:\r\n path = q.pop()\r\n \r\n if is_goal(path[-1][0]):\r\n goal = path\r\n accomplished = True\r\n break\r\n \r\n state_container = next_possible_states(path, check_dict, False)\r\n for i in state_container:\r\n if len(path) <= 1:\r\n temp = list(path)\r\n temp.append(i)\r\n q.append(temp)\r\n else:\r\n if i[0] != path[-2][0]:\r\n temp = list(path)\r\n temp.append(i)\r\n q.append(temp)\r\n\r\n \r\n if accomplished:\r\n print(\"Solved! Number of moves:\", len(goal) - 1)\r\n return goal, True\r\n else:\r\n print(\"Cannot be solved. Number of moves:\", len(path) - 1)\r\n print(path)\r\n return path, False", "def dfs(pos, dis):\n global ans\n if pos == e:\n ans = dis - 1 if not ans or dis < ans else ans\n return\n\n # Backtracking\n if ans and dis > ans:\n return\n\n # Check the point visited\n visited[pos[0]][pos[1]] = 1\n for i in range(4):\n ny = pos[0] + dy[i]\n nx = pos[1] + dx[i]\n if 0 <= ny < N and 0 <= nx < N:\n # If the new point is not wall and not visited\n if maze[ny][nx] != 1 and not visited[ny][nx]:\n dfs([ny, nx], dis + 1)\n visited[pos[0]][pos[1]] = 0", "def depth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while(len(fringe) > 0):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while(True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val: #( x, y, z)\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def dfs(self, node):\n self.preOrderNumbers[node] = self.counter\n self.counter = self.counter + 1\n self.stackP.append(node)\n self.stackS.append(node)\n for neighbor_vertex in self.graph.edges[node]:\n if neighbor_vertex not in self.preOrderNumbers:\n self.dfs(neighbor_vertex)\n elif neighbor_vertex in self.notAssignedVertices:\n while self.preOrderNumbers[self.stackP[-1]] > self.preOrderNumbers[neighbor_vertex]:\n self.stackP.pop()\n\n if node == self.stackP[-1]:\n self.stackP.pop()\n component = []\n while node in self.stackS:\n vertex = self.stackS.pop()\n component.append(vertex)\n self.notAssignedVertices.remove(vertex)\n self.scComponents.append(component)", "def idfs(start_node, goal_state, improved_descendants = False):\t\n\tnumber_nodes_expanded = 0\n\tt0 = time.time()\n\n\tfor lim in range(21): #from depth 0 to 20\n\t\tsolution, number_nodes_expanded_iter = dfs(start_node, goal_state, lim, iterative= True, improved_descendants= improved_descendants)\n\t\tnumber_nodes_expanded += number_nodes_expanded_iter\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\treturn False\n\n\t\tif solution:\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True\n\t\t\n\treturn False", "def depthFirstSearch(problem):\n\n\n no = problem.getStartState()\n if (problem.isGoalState(no)):\n return []\n \n pilha = util.Stack()\n pilha.push((no, []))\n \n explorados = []\n \n while not pilha.isEmpty():\n (no, caminho) = pilha.pop()\n \n if problem.isGoalState(no):\n return caminho\n \n explorados.append(no)\n for filho in problem.getSuccessors(no):\n if (filho[0] not in explorados):\n pilha.push((filho[0], caminho + [filho[1]]))\n\n return []", "def depthFirstSearch(problem):\n stack = Stack()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state)\n current_path = []\n actions_dict = dict()\n final_actions = []\n flag = False\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n current_path.append(current_state)\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n\n successors = problem.getSuccessors(current_state)\n\n for s in successors:\n flag = False\n if s[0] not in visited:\n stack.push(s[0])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n flag = True\n\n\n\n if not successors and not stack.isEmpty() or flag is False:\n current_state = stack.pop()\n while current_path[-1] != parent_dict[current_state]:\n current_path.pop()\n stack.push(current_state)\n\n for i in range(len(current_path)-1):\n final_actions.append(actions_dict[current_path[i],current_path[i+1]])\n\n\n return final_actions", "def dfs(i, adj_dict, edges, checked, cycle, start):\n for vertex in adj_dict[i]:\n pos = search_pos(i, vertex, edges, checked)\n if pos != -1:\n checked[pos] = True\n if vertex[0] == start and not (False in checked):\n cycle.append((vertex[0], i, vertex[1]))\n return True\n if dfs(vertex[0], adj_dict, edges, checked, cycle, start):\n cycle.append((vertex[0], i, vertex[1]))\n return True\n checked[pos] = False\n return False", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None:\n visited = set()\n visited.add(starting_vertex)\n print(starting_vertex)\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n self.dft_recursive(neighb_vert, visited)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # current path stack\n path_stack = util.Stack()\n action_stack = util.Stack()\n path_stack.push(problem.getStartState())\n\n # visited (so don't )\n visited = []\n visited.append(problem.getStartState())\n\n i = 0\n while not path_stack.isEmpty():\n\n # check goal state\n if problem.isGoalState(path_stack.list[-1]): # check if goal\n return action_stack.list\n\n # get next possible state (choose first in list)\n successors = problem.getSuccessors(path_stack.list[-1])\n forward=False\n for successor in successors:\n ss,aa,_ = successor\n if ss not in visited:\n\n path_stack.push(ss)\n action_stack.push(aa)\n visited.append(ss) # you don't pop visited\n forward=True\n break\n\n # backtrack\n if forward==False:\n path_stack.pop()\n action_stack.pop()\n\n i+=1\n #if i==25:\n # import pdb; pdb.set_trace()\n #print(path_stack.list)", "def dfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n if w in goals:\n v = boundary.pop()\n visited += [v]\n return visited\n\"\"\"", "def dfs_recursion(self, tour, sque_v, gain):\n i = len(sque_v) // 2 # step i done already\n if i == self.max_depth:\n return\n dahuitou = (i + 1) % self.submove_size == 0\n v_2i_2, v_2i_1 = sque_v[-2], sque_v[-1]\n # step i+1: search for (v_2i, v_2ip1)\n for v_2i in self.candidates[v_2i_1]:\n if v_2i in sque_v: # disjunctivity criterion\n continue\n new_gain = gain + self.cost_d[v_2i_2, v_2i_1] - self.cost_d[v_2i_1, v_2i]\n if new_gain <= 0:\n continue\n for v_2ip1 in tour.neighbours(v_2i):\n if v_2ip1 in sque_v: # disjunctivity criterion\n continue\n if dahuitou:\n if tour.check_feasible(sque_v + [v_2i, v_2ip1]):\n if new_gain + self.cost_d[v_2i, v_2ip1] - self.cost_d[v_2ip1, sque_v[0]] > 0:\n return tour.k_exchange(sque_v + [v_2i, v_2ip1])\n else:\n result = self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n if result is not None:\n return result\n else: # optional, can be deleted\n continue\n else:\n if new_gain + self.cost_d[v_2i, v_2ip1] - self.cost_d[v_2ip1, sque_v[0]] > 0 and \\\n tour.check_feasible(sque_v + [v_2i, v_2ip1]):\n return tour.k_exchange(sque_v + [v_2i, v_2ip1])\n else:\n result = self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n if result is not None:\n return result\n return", "def DFS(G: List, i: int, U: List) -> List:\n node = G[i]\n node.visited = True\n U.remove(i)\n for adj_node in node.adjacent:\n if not adj_node.visited:\n DFS(G, adj_node.value, U)\n return [G, U]", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)", "def dfs(graph, start):\n dfs_rec(graph, start, 0)", "def depthFirstSearch(problem):\n \n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in reversed(tempSuccList):\n successor.insert(0,succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def dft_recursive(self, starting_vertex, visited = None):\n \"\"\"\n Check if Vertex is in visited\n if NOT visited, add to visited set\n Call dft_recursive on every neighbor \n \n\n \"\"\"\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # 1) base case >> where to stop recursion\n\n # init a set that persists after recursions loops to save visited\n if visited == None:\n visited = set()\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n\n print(starting_vertex)\n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n self.dft_recursive(n, visited) # 2) ", "def dfs(state):\n\n #if the current state is a goal state, then return it in a list\n if state.is_goal():\n return [state]\n else:\n # else, recurse on the possible next states\n result = []\n \n for s in state.next_states():\n # append all of the s\n result += dfs(s)\n \n return result", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # Frontier stored in a Stack\n frontier = util.Stack()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there]) \n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n \n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n \n frontier.push((coordinates, pathTaken + [direction]))\n\n\n util.raiseNotDefined()", "def all_nodes_dfs(log_T, initial_state, min_score, sub_info, max_depth=1000000000000000000, maxtraversals=1000000000000000000):\n # default argument for sub_info: empty_sub_info = (np.array([], dtype=int), np.array([], dtype=int), 1000000000000000000)\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n order = np.zeros(log_T.shape, np.int64)\n for i in xrange(order.shape[1]):\n order[i] = (-log_T[i]).argsort()\n n_states = log_T.shape[0]\n node = [order[initial_state, 0]] # most likely first node\n node_idx = [0]\n lengths_dfs = [-1.0]\n nodes_dfs = [[-1, ]]\n for it in xrange(maxtraversals):\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n lengths_dfs.append(-score)\n nodes_dfs.append(list(node))\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n break # end of the generator, can't increase even the root\n else:\n assert False, \"Number of traversals exceeded\"\n\n return lengths_dfs[1:], nodes_dfs[1:]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n startState=problem.getStartState()\n currentLocation = startState \n\n #for GSA implementation\n exploredStates = []\n exploredStates.append(startState)\n \n #To transform the graph to stack for better access in DFS\n frontierStack = util.Stack()\n for frontier in problem.getSuccessors(startState):\n frontierRoute = frontier + (frontier[1],)\n frontierStack.push(frontierRoute)\n\n currentRoute = []\n\n #start DFS\n while not(frontierStack.isEmpty()):\n currentStage = frontierStack.pop()\n currentState = currentStage[0]\n currentRoute = currentStage[3]\n\n if problem.isGoalState(currentState): \n break\n if currentState not in exploredStates:\n for frontier in problem.getSuccessors(currentState):\n if frontier[0] not in exploredStates:\n nextRoute = currentRoute + \",\" + frontier[1]\n frontierRoute = frontier + (nextRoute,)\n frontierStack.push(frontierRoute)\n exploredStates.append(currentState)\n \n return currentRoute.split(\",\")\n\n util.raiseNotDefined()", "def dfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n stack = collections.deque()\n stack.append(source)\n while stack:\n vertex = stack.pop()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n visited.add(vertex)\n neighbors = [n for n in get_neighbors(vertex) if n not in visited]\n if neighbors:\n stack.append(vertex)\n stack.append(neighbors[0])\n parents[neighbors[0]] = vertex\n return []", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n mystack = util.Stack()\n startNode = (problem.getStartState(), '', 0, [])\n mystack.push(startNode)\n visited = set()\n while mystack :\n node = mystack.pop()\n state, action, cost, path = node\n if state not in visited :\n visited.add(state)\n if problem.isGoalState(state) :\n path = path + [(state, action)]\n break;\n succNodes = problem.expand(state)\n for succNode in succNodes :\n succState, succAction, succCost = succNode\n newNode = (succState, succAction, cost + succCost, path + [(state, action)])\n mystack.push(newNode)\n actions = [action[1] for action in path]\n del actions[0]\n return actions", "def depthFirstSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n \n visitedlist = []\n st = Stack()\n outputlist = []\n st.push(problem.getStartState())\n visitedlist.append(problem.getStartState())\n recurseDFS(st,problem,visitedlist)\n if st.isEmpty():\n print \"No Path exist\"\n else:\n while not st.isEmpty():\n value = st.pop()\n if len(value) == 2:\n continue\n if value[1] == 'South':\n outputlist.append(s)\n elif value[1] == 'North':\n outputlist.append(n)\n elif value[1] == 'East':\n outputlist.append(e)\n elif value[1] == 'West':\n outputlist.append(w)\n \n return outputlist[::-1]", "def depth_first_search(problem):\n fringe = util.Stack()\n return general_search(problem, fringe)", "def dfsIterative(m, start):\n \n s = [start] # list, use as stack\n visited = {start} # set\n out = []\n \n while len(s) > 0:\n cur = s.pop()\n pr('cur')\n out.append(cur)\n \n for vertex, connected in enumerate(m[cur]):\n # vertex is column in matrix (i)\n # connected is the True/False, 1 or 0 value\n if connected and not vertex in visited:\n s.append(vertex)\n visited.add(vertex)\n return out", "def dfs(self, nums, cur_sum, S):\n ways = 0\n nums_length = len(nums)\n if nums_length == 0:\n if cur_sum == S:\n return 1\n return 0\n\n multiple = [1, -1]\n for value in multiple:\n new_num = nums[nums_length - 1]\n new_nums = nums[0:nums_length - 1]\n new_sum = cur_sum + new_num * value\n ways = ways + self.dfs(new_nums, new_sum, S)\n\n return ways", "def depthFirstSearch(problem):\n #Initializing variables\n fringe = util.Stack()\n #Creating visited list\n visited = []\n #Pushing start state to Stack\n fringe.push((problem.getStartState(), []))\n #Adding start state to visited list\n visited.append(problem.getStartState())\n \n #Popping point from the stack\n while fringe.isEmpty() == False:\n state, actions = fringe.pop()\n #Getting successor nodes\n for next in problem.getSuccessors(state):\n newstate = next[0]\n newdirection = next[1]\n #Pushing successor nodes to the stack and appending to visited\n if newstate not in visited:\n if problem.isGoalState(newstate):\n return actions + [newdirection] \n else:\n fringe.push((newstate, actions + [newdirection]))\n visited.append(newstate)\n\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n\n explored = set()\n frontier = []\n start_state = problem.getStartState()\n frontier.append(start_state)\n parent_hash = {}\n parent_hash[start_state] = (None, None)\n\n def get_path(state):\n path_stack = util.Stack()\n actions = []\n current = state\n while parent_hash[current][0] is not None:\n path_stack.push(parent_hash[current][0])\n current = parent_hash[current][1]\n while not path_stack.isEmpty():\n actions.append(path_stack.pop())\n\n return actions\n\n while len(frontier):\n node = frontier.pop()\n if problem.isGoalState(node):\n return get_path(node)\n explored.add(node)\n for state, action, _ in problem.getSuccessors(node):\n if state not in explored and state not in frontier:\n parent_hash[state] = (action, node)\n frontier.append(state)", "def dfs(n, g, val=1):\n if n in vals: return \n vals[n] = val, g\n for nn, w in graph.get(n, []): dfs(nn, g, w*val)", "def DFS(adj): # adj is the list of vertices in graph G\n\n global cc\n global visited\n\n for v in range(len(adj)): # adjacency list has length == number of nodes\n visited[v] = False\n cc = 1\n\n for v in range(len(adj)):\n if not visited[v]:\n explore(v)\n # increment connected component count after each return from explore()\n cc = cc + 1 # only increment for each unvisited node explored here\n return cc", "def dfs(self):\n def add_to_stack(stack, done, src, path):\n for dest in self.edges[src]:\n if dest not in done:\n for step_path in self.edges[src][dest]:\n stack.append((dest, step_path, path))\n done.add(src)\n stack = [] # Stack of steps to take\n done = set() # Nodes we've visited\n # Seed the stack with all edges from the start cell.\n add_to_stack(stack, done, self.start_cell, '')\n while stack:\n (src, step_path, path) = stack.pop()\n path = path + step_path\n if src == self.exit_cell:\n return path\n add_to_stack(stack, done, src, path)\n return '' # No path found.", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (stack y set)\n openNodes = util.Stack()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Metemos el nodo en la pila\n openNodes.push(node)\n\n #Iteramos para cada nodo de la pila\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #Sacamos el nodo de arriba de la pila\n node = openNodes.pop()\n if problem.isGoalState(node.name):\n break\n else: #Expandimos los nodos sucesores del nodo n si no estan en closed\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n #Metemos al sucesor en la pila\n openNodes.push(succNode)\n #Metemos el nodo n en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringeList = util.Stack()\n print \"fringeList\",fringeList\n closedList = {str(problem.getStartState()): ([])} #Hash Map to maintain state to path\n print \"closed list:\", closedList\n isGoalStateArrived = False\n\n # Push start state into fringeList\n fringeList.push((problem.getStartState()))\n\n while not isGoalStateArrived and not fringeList.isEmpty():\n currentNode = fringeList.pop()\n print \"currentNode\",currentNode\n currentNodePath = closedList[str(currentNode)]\n print \"currentNodepath:\",currentNodePath\n # Explore children\n childrenOfCurrentNode = problem.getSuccessors(currentNode)\n print \"childrenOfCurrentNode:\",childrenOfCurrentNode\n for childNode in childrenOfCurrentNode:\n if str(childNode[0]) not in closedList:\n path = copy.copy(currentNodePath)\n path.append(childNode[1])\n print \"child [0] %s, child [1] %s\", childNode[0],childNode[1]\n print \"path \", path\n fringeList.push(childNode[0])\n closedList[str(childNode[0])] = path # Put parent node in closed List\n if problem.isGoalState(childNode[0]):\n isGoalStateArrived = True\n goalState = childNode[0]\n break\n\n if isGoalStateArrived:\n #print closedList[str(problem.getStartState())]\n return closedList[str(goalState)]\n \"util.raiseNotDefined()\"", "def DFS1(digraph, start, end, path, maxTotalDist, maxDistOutdoors):\n listP=[]\n path = path + [start]\n #print 'Current dfs path:', printPath(path)\n if start == end:\n listP.append(path)\n for node in digraph.childrenOf(start):\n if node not in path: #avoid cycles\n newPath = DFS1(digraph,node,end,path, maxTotalDist, maxDistOutdoors)\n if newPath != None:\n if outDisAdd(digraph, newPath) <maxDistOutdoors:\n listP.append(newPath)\n if listP==[]:\n raise ValueError\n else:\n return whichShortest(digraph, listP, maxTotalDist )", "def dfs(graph, start):\n\tstack,path = [start],[]\n\twhile stack:\n\t\tele = stack.pop()\n\t\tif ele in path:\n\t\t\tcontinue\n\t\telse:\n\t\t\tpath.append(ele)\n\t\t\tfor neighbours in graph[ele]:\n\t\t\t\tstack.append(neighbours)\n\n\treturn path", "def solve(self):\n self.left -= len(self.nodes)\n \n def depths(x,depth = 0):\n depth+=1\n for y in self.graph[x]:\n if y in self.nodes:\n self.nodes.remove(y)\n depth = depths(y,depth)\n return depth\n \n while len(self.nodes):\n x = self.nodes.pop()\n self.firstGen.append(depths(x))\n #print self.graph\n #print self.nodes\n #print self.firstGen", "def depthFirstSearch(problem):\n marcado = set()\n pilha = util.Stack()\n pilha.push((problem.getStartState(), []))\n while not pilha.isEmpty():\n posicao, movimento = pilha.pop()\n if problem.isGoalState(posicao):\n return movimento\n if posicao in marcado:\n continue\n marcado.add(posicao)\n candidatos = problem.getSuccessors(posicao)\n for candidato, acao, custo in candidatos:\n pilha.push((candidato, movimento + [acao]))\n return []", "def dfs(node):\n nonlocal ans \n if not node: return 0\n lx, rx = dfs(node.left), dfs(node.right) \n if not node.left or node.left.val != node.val: lx = 0\n if not node.right or node.right.val != node.val: rx = 0 \n ans = max(ans, 1 + lx + rx)\n return 1 + max(lx, rx)", "def dft_recursive(self, starting_vertex):\n \n visited = []\n\n def helper(vert, visited):\n visited.append(vert)\n print(vert)\n\n for child in self.vertices[vert]:\n if child not in visited:\n helper(child, visited)\n\n helper(starting_vertex, visited)", "def depthFirstSearch(problem):\n\n\n \"*** YOUR CODE HERE ***\"\n st = util.Stack()\n strt = problem.getStartState()\n st.push(strt) \n visited = []\n came_from ={}\n came_from [strt] =(None,None)\n\n while not st.isEmpty():\n state = st.pop()\n if state in visited :\n continue\n visited.append(state)\n if problem.isGoalState(state) :\n break\n nodes = problem.getSuccessors(state)\n for (successor,action,cost) in nodes:\n if successor not in visited :\n st.push(successor)\n came_from[successor] = (state , action) \n \n # exit while\n actions = []\n while(state != strt) :\n (parent,action) =came_from[state]\n state = parent\n actions.append(action)\n actions.reverse()\n return actions", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def DFS(self, nDepth, treenode, state):\n \n visited = []\n visited.insert(0, (state, treenode))\n \n for index in range(0, nDepth-1): \n actions = self.priorProb(state)\n treenode.expansion(actions)\n treenode.updateU_value(actions)\n treenode, action = treenode.selection() \n state = state.do_move(action).copy()\n visited.insert(0, (state, treenode)) \n \n for index in range(0, len(visited)-1): \n if(visited[index][1].isLeaf() == True):\n value = self.leafEvaluation(visited[index][0])\n else: \n value = visited[index][1].backUp(value)\n visited[-1][1].updateQ_value(value)\n visited[-1][1].updateVisits()\n return visited[-1][1]", "def dfs_recursion(self, tour, sque_v, gain):\n i = int(len(sque_v)/2) # step i done already\n dahuitou = (i + 1) % self.max_exchange == 0\n v_2i_2, v_2i_1 = sque_v[-2], sque_v[-1]\n # step i+1: search for (v_2i, v_2ip1)\n for v_2i in self.candidates[v_2i_1]:\n if v_2i in sque_v: # disjunctivity criterion\n continue\n new_gain = gain + self.cost[v_2i_2, v_2i_1] - self.cost[v_2i_1, v_2i]\n if new_gain <= 0:\n continue\n for v_2ip1 in tour.neighbours(v_2i):\n if v_2ip1 in sque_v: # disjunctivity criterion\n continue\n if dahuitou:\n if tour.check_feasible(sque_v+[v_2i, v_2ip1]):\n if new_gain + self.cost[v_2i, v_2ip1] - self.cost[v_2ip1, sque_v[0]] > 0:\n tour.k_exchange(sque_v + [v_2i, v_2ip1])\n return tour\n else:\n return self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n else: # optional, can be deleted\n continue\n else:\n if new_gain + self.cost[v_2i, v_2ip1] - self.cost[v_2ip1, sque_v[0]] > 0 and \\\n tour.check_feasible(sque_v+[v_2i, v_2ip1]):\n tour.k_exchange(sque_v + [v_2i, v_2ip1])\n return tour\n else:\n return self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n # if (i + 1) % self.max_exchange == 0:\n # continue\n # return self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n # gain += - self.cost[v_2i_1, v_2i] + self.cost[v_2i, v_2ip1]\n # if gain - self.cost[v_2ip1, sque_v[0]] > 0:\n # # check feasibility immediately\n # if tour.check_feasible(sque_v+[v_2i, v_2ip1]):\n # tour.k_exchange(sque_v + [v_2i, v_2ip1])\n # return tour\n # # if not feasible, check whether stop or search for next two nodes\n # if (i+1) % self.max_exchange == 0:\n # continue\n # delta_gain = self.cost[sque_v[2 * i - 2], sque_v[2 * i - 1]] - self.cost[sque_v[2 * i - 1], v_2i]\n # return self.dfs_recursion(tour, sque_v+[v_2i, v_2ip1], gain+delta_gain)\n return", "def dfs(self):\n\n stack = [self.root]\n\n while stack:\n node = stack[-1]\n\n if node.goal:\n return True\n\n if not node.visited:\n node.visited = True\n\n for adj_node in self.return_adj_nodes(node):\n if adj_node and not adj_node.visited and not adj_node.wall:\n stack.append(adj_node)\n break\n else:\n stack.pop()\n\n return False", "def dfs(node, all_nodes, depth):\r\n node.depth = depth\r\n to_return = [node,]\r\n for subnode in all_nodes:\r\n if subnode.parent and subnode.parent.id == node.id:\r\n to_return.extend(dfs(subnode, all_nodes, depth+1))\r\n return to_return", "def depth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Stack()\n prev_node = dict()\n explored = []\n\n frontier.put(start)\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1:] to remove start from list\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n\n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n\n # grid.set_cell(neighbor, Cell(val = CellType.searched))\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1:] to remove start from list", "def dfs(graph, v, f, leader_list, visited, leader):\n global time\n visited[v] = True\n leader_list[v] = leader\n for t in graph[v]:\n if not visited[t]:\n dfs(graph, t, f, leader_list, visited, leader)\n time += 1\n f[v] = time" ]
[ "0.7127771", "0.71177804", "0.70137775", "0.6997336", "0.6967545", "0.6946851", "0.6918456", "0.69104403", "0.68680465", "0.68369263", "0.68138105", "0.6810762", "0.6806383", "0.6798006", "0.6774225", "0.675221", "0.673416", "0.67276394", "0.6717568", "0.66977406", "0.6696019", "0.6686872", "0.6671533", "0.6664095", "0.66622514", "0.665985", "0.6647888", "0.6644246", "0.6638335", "0.662451", "0.6600796", "0.65994734", "0.65988034", "0.65865743", "0.6586197", "0.65732634", "0.656958", "0.6564751", "0.65602", "0.65602", "0.65593827", "0.65577507", "0.65513176", "0.6550313", "0.6546543", "0.65446055", "0.65376884", "0.6529837", "0.65246713", "0.65240824", "0.6520974", "0.64963835", "0.64940786", "0.64927", "0.647925", "0.64726645", "0.64720535", "0.64332825", "0.6417803", "0.64082557", "0.64025015", "0.63996965", "0.6398591", "0.6392254", "0.63875735", "0.6386746", "0.63823533", "0.6378848", "0.63745594", "0.6371518", "0.6367102", "0.6365903", "0.63628703", "0.63470185", "0.634329", "0.6343075", "0.6341865", "0.63267297", "0.6323475", "0.6321033", "0.6315681", "0.6312712", "0.6312587", "0.6312237", "0.629309", "0.62827975", "0.628273", "0.6276546", "0.6269654", "0.6269285", "0.62659293", "0.6265789", "0.62617344", "0.6251534", "0.62486887", "0.62462026", "0.6243667", "0.62425727", "0.6240046", "0.62282765", "0.621114" ]
0.0
-1
Given a binary tree, find its minimum depth. The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
def minDepth(self, root: TreeNode) -> int: return self.bfs(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_depth(node):\n if not node:\n return 0\n elif (not node.left) and (not node.right):\n # found leaf\n return 1\n elif not node.left:\n # if the root has only 1 child, this prevents the minimum depth from\n # equaling zero\n return min_depth(node.right) + 1\n elif not node.right:\n return min_depth(node.left) + 1\n return min(min_depth(node.left), min_depth(node.right)) + 1", "def min_depth(t):\n if is_leaf(t):\n return 0\n h = float('inf')\n for b in branches(t):\n # Still works fine!\n h = min(h, 1 + min_depth(b))\n return h", "def max_depth(root):\n # basic case\n if root is None:\n return 0\n\n # breadth-first traversal\n queue = collections.deque([root])\n depth = 0\n while queue:\n queue_size = len(queue)\n for i in range(queue_size):\n curr = queue.popleft()\n if curr.left is not None:\n queue.append(curr.left)\n if curr.right is not None:\n queue.append(curr.right)\n depth += 1\n\n return depth", "def find_depth_tree(root):\n if root is not None:\n max_depth = 0\n if root.branches is None:\n return 1\n else:\n for value in root.branches.values():\n max_depth = max(max_depth, DecisionTree.find_depth_tree(value))\n return 1 + max_depth\n else:\n return 1", "def depth(self):\n\t\tdef helper(tree, d):\n\t\t\tif tree.isLeaf():\n\t\t\t\treturn d\n\t\t\telse:\n\t\t\t\td_left=helper(tree.left, d+1) if tree.hasLeftChild() else 0\n\t\t\t\td_right=helper(tree.right, d+1) if tree.hasRightChild() else 0\n\t\t\t\treturn max(d_left, d_right)\n\n\t\treturn helper(self.root, 1) if not self.isEmpty() else 0", "def get_depth(self):\n if self.root is None:\n return 0\n else:\n node_queue = list()\n node_queue.append(self.root)\n depth = 0\n while len(node_queue):\n q_len = len(node_queue)\n while q_len:\n q_node = node_queue.pop(0)\n q_len = q_len - 1\n if q_node.left is not None:\n node_queue.append(q_node.left)\n if q_node.right is not None:\n node_queue.append(q_node.right)\n depth = depth + 1\n return depth", "def treeLevel(root):\n\n if not root:\n return 0\n else:\n return 1+max(treeLevel(root.left),treeLevel(root.right))", "def max_depth(node):\n if not node:\n return 0\n return max(max_depth(node.left), max_depth(node.right)) + 1", "def tree_depth(tree):\r\n if(tree==None):\r\n return 0\r\n elif(left(tree)!=None):\r\n return 1+tree_depth(left(tree))\r\n elif(right(tree)!=None):\r\n return 1+tree_depth(right(tree))\r\n else:\r\n return 0", "def maxDepth(node):\n\tif node is None: \n\t\treturn 0 \n\telse: \n\t\tlDepth=maxDepth(node.left)\n\t\trDepth=maxDepth(node.right) \n\t\tif lDepth>rDepth: \n\t\t return lDepth+1\n\t\telse: \n\t\t return rDepth+1", "def depth(self):\n L, R = 0,0\n if self.left:\n L = self.left.depth()\n if self.right:\n R = self.right.depth()\n\n return 1 + max(L, R)", "def dfs(self, root: TreeNode) -> int:\n if not root:\n return 0\n\n def dfs(node):\n if not node:\n return float('inf')\n if not node.left and not node.right:\n return 1\n return min(dfs(node.left), dfs(node.right)) + 1\n\n return dfs(root)", "def test_MaxDepth_SimpleTree(self):\n\n root = TreeNode(0)\n root.addLeft(1)\n root.addRight(5)\n root.left.addLeft(2)\n root.left.addRight(3)\n root.left.right.addRight(4)\n root.right.addRight(6)\n\n self.assertEqual(findMaxDepthDFS(root),3)", "def depth(self):\n result = 0\n if self.val is None:\n return result\n return max(self.left.depth(), self.right.depth()) + 1", "def max_depth(self):\n if len(self.children) == 0:\n return 1\n else:\n child_depths = [c.max_depth() for c in self.children]\n return 1 + max(child_depths)", "def min_len(BST):\r\n if isinstance(BST,tuple):\r\n return min_len(BST[0]) + min_len(BST[1])\r\n else:\r\n return BST[0]", "def depth(self, node):\n if node is self.root:\n return 0\n return nx.shortest_path_length(self.graph, self.root, node)", "def depth(self, d=0):\n d1 = 0\n d2 = 0\n if self.leftChild:\n d1 = max(self.leftChild.depth(d + 1), d)\n if self.rightChild:\n d2 = max(self.rightChild.depth(d + 1), d)\n return max(d1, d2, d)", "def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()", "def depth(self):\n left_depth = self.left.depth() if self.left is not None else 0\n right_depth = self.right.depth() if self.right is not None else 0\n return max(left_depth, right_depth) + 1", "def depth(self, node):\n\n if not node:\n return 0\n else:\n l_depth = self.depth(node.left)\n r_depth = self.depth(node.right)\n\n if l_depth > r_depth:\n return l_depth + 1\n else:\n return r_depth + 1", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def find_min(self) -> TreeNode:\n node = self.root\n while True:\n if not node.left:\n return node\n node = node.left", "def minKeyTree(root):\n try:\n min = None\n if (root is not None):\n if (root['left'] is None):\n min = root\n else:\n min = minKeyTree(root['left'])\n return min\n except Exception as exp:\n error.reraise(exp, 'BST:minKeyNode')", "def max_depth(self) -> int:\n return 0", "def diameterOfBinaryTree(self, root):\n self.max_length = 0\n def maxDepth(root):\n if not root:\n return 0\n left_branch = maxDepth(root.left)\n right_branch = maxDepth(root.right)\n self.max_length = max(self.max_length, left_branch + right_branch)\n return max(left_branch, right_branch) + 1\n maxDepth(root)\n return self.max_length", "def node_depths_recursive(root):\n depth_sums = 0\n depth_sums = sum_node_depths(root, depth_sums, 0)\n return depth_sums", "def get_min_depth(l_k):\n return max(l_k.values())", "def depth(self):\n if self.children is None:\n return 1\n\n max_depth = 0\n for child in self.children:\n if child is None:\n return 1\n child_depth = child.depth()\n if child_depth > max_depth:\n max_depth = child_depth\n\n return max_depth+1", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def depth(self) -> int:\n depth = 0\n node = self\n while node:\n depth += 1\n node = node.parent\n return depth", "def node_depths_recursive_original(root, depth=0):\n # Base case\n if root is None:\n return 0\n\n return depth + node_depths_recursive_original(root.left, depth + 1) + \\\n node_depths_recursive_original(root.right, depth + 1)", "def get_max_depth(clade):\n depths = clade.depths()\n if not max(depths.values()):\n depths = clade.depths(unit_branch_lengths=True)\n return max(depths.values()) * tree_depth / actual_tree_depth", "def bfs(self, root: TreeNode) -> int:\n if not root:\n return 0\n queue = deque([(root, 1)])\n while queue:\n node, level = queue.popleft()\n if not node.left and not node.right:\n return level\n if node.left:\n queue.append((node.left, level + 1))\n if node.right:\n queue.append((node.right, level + 1))\n return -1", "def get_max_depth_node(nodes):\n curr = nodes[0]\n for i in range(0, len(nodes)):\n if nodes[i].depth > curr.depth:\n curr = nodes[i]\n return curr", "def find_smallest(node):\n smallest = node.value\n\n while node.left is not None:\n node = node.left\n smallest = node.value\n\n return smallest", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def minimum_spanning_tree(graph):\n mst = [] # initialize a list to record the edges\n weight = 0 # initialize the total weight to zero\n mst.append(0) # add 0 to the ordering of vertices\n while len(mst) != len(graph): # while all vertices have not been added yet\n min2 = float('inf') # initialize to negative infinity\n node_add = 0\n new_w = 0\n for j in mst: # for every node in the graph\n inner_dict = graph[j] # retrieve the inner dictionary\n for k in inner_dict: # for every node in the inner dictionary\n if inner_dict[k] < min2 and k not in mst: # get the minimum edge\n min2 = inner_dict[k]\n new_w = min2\n node_add = k\n mst.append(node_add) # append the next node\n weight += new_w # add the weight to the tally\n return mst, weight # return the final ordering and the total weight", "def minimal_tree(array: list):\n bst = BST()\n def build(l, r):\n if l == r: bst.insert(array[l]); return\n m = (l+r)//2\n # insert into the tree\n bst.insert(array[m])\n # build recursively\n build(l, m)\n build(m+1, r)\n build(0, len(array)-1)\n return bst", "def minDiffInBST(self, root: TreeNode) -> int:\n prev = [-float('inf')]\n curr_min = [float('inf')]\n def inorder_traverse(node, prev, curr_min):\n if node:\n inorder_traverse(node.left, prev, curr_min)\n curr_min[0] = min(curr_min[0], node.val - prev[0])\n prev[0] = node.val\n inorder_traverse(node.right, prev, curr_min)\n \n inorder_traverse(root, prev, curr_min)\n return curr_min[0]", "def depth(self):\n ch = self.children\n return 0 if not ch else 1 + max([c.depth for c in ch])", "def depth(x):\n return max(int(x * depth_multiplier), 8)", "def depth(self):\n return max(n.depth for n in self.iternodes())", "def _calc_tree_depth(refinement_tree):\n if not refinement_tree:\n return 0\n\n depth = 0\n for k, v in refinement_tree.items():\n d = _calc_tree_depth(v)\n if d > depth:\n depth = d\n\n return 1 + depth", "def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:\n q = deque([(root, 1)])\n max_width = 1\n\n while len(q) > 0:\n temp_q = deque()\n local_max_width = float('-inf')\n local_min_width = float('+inf')\n\n for (node, position) in q:\n local_max_width = max(local_max_width, position)\n local_min_width = min(local_min_width, position)\n if node.left:\n temp_q.append((node.left, position * 2 - 1))\n if node.right:\n temp_q.append((node.right, position * 2))\n max_width = max(max_width, local_max_width - local_min_width + 1)\n q.clear()\n q = temp_q\n\n return max_width", "def height(root:Node) -> int:\n current = root.left\n depth = 0\n maxdepth = [0]\n #track the value and whether it has a branchpoint or not (bool)\n seen = dict()\n\n #do the left side first, then the right\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val:True})\n else:\n seen.update({current.val:False})\n depth +=1\n maxdepth.append(depth)\n if current.left is not None:\n current = current.left\n elif current.right is not None:\n current = current.right\n else:\n current = None\n\n print(' maxdepth left so far is {}'.format(maxdepth))\n\n current = root.right\n depth = 0\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val: True})\n else:\n seen.update({current.val: False})\n depth +=1\n maxdepth.append(depth)\n if current.right is not None:\n current = current.right\n elif current.left is not None:\n current = current.left\n else:\n current = None\n print(' maxdepth right so far is {}'.format(maxdepth))\n\n return max(maxdepth)", "def get_depth(self, current, n):\n if current is not None:\n return max(self.get_depth(current.left, n + 1), self.get_depth(current.right, n + 1))\n else:\n return n", "def levels(root):\n # if Tree is empty\n if not root:\n return 0 \n\n #if leaf node return 1 (Bcz. leaf node is present at level 1)\n if root.left==None and root.right==None:\n return 1\n\n #recursively compute the levels of left and right subtree \n left_subtree_levels=levels(root.left)\n right_subtree_levels=levels(root.right)\n\n #compute the overall levels of tree\n total_levels =max(left_subtree_levels,right_subtree_levels)+1\n\n return total_levels", "def helper(root):\n if not root or not root.children: return 0\n \n if len(root.children) == 1:\n depth = 1 + helper(root.children[0])\n self.diameter = max(self.diameter, depth)\n return depth\n \n else:\n depths = [1+helper(child) for child in root.children]\n max1, max2 = 0, 0\n for depth in depths:\n if depth >= max1:\n max1, max2 = depth, max1\n elif depth < max1 and depth > max2:\n max2 = depth\n self.diameter = max(self.diameter, max1+max2)\n return max1", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def depth(self):\n if not self.root:\n return None\n else:\n return self.root.balance_number", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def depth(self, p):\n if self.is root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def deep_min(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[0]\r\n return node.keys[0] if node.keys else None", "def min_span_tree(adjacency_matrix, indices_to_connect):\n\n if len(indices_to_connect) > 1:\n Root = indices_to_connect[0]\n M = Prim(adjacency_matrix, Root)\n adjacency_matrix, W, Path, Degree, TreeNbr = M.mst_prim(adjacency_matrix,\n [Root], [], M.degree, M.tree_nbr)\n\n return W, Path, Degree, TreeNbr", "def getDepthToTop(self):\n mindep = 9999999\n for quad in self._quadrilaterals:\n P0, P1, P2, P3 = quad\n depths = np.array([P0.depth, P1.depth, P2.depth, P3.depth])\n if np.min(depths) < mindep:\n mindep = np.min(depths)\n return mindep", "def minDiffInBST2(self, root: TreeNode) -> int:\n def flatten(node, all_nodes):\n if node:\n flatten(node.left, all_nodes)\n all_nodes.append(node.val)\n flatten(node.right, all_nodes)\n \n nodes = []\n flatten(root, nodes)\n diff = nodes[1] - nodes[0]\n for i in range(2, len(nodes)):\n diff = min(diff, nodes[i] - nodes[i-1])\n return diff", "def minimax(node,depth):\n if node.isLeaf():\n return node.evaluate(),None\n elif node.isMax:\n max_score = float(\"-inf\")\n max_path = None\n for C in node.children():\n score,path = minimax(C,depth+1)\n if score > max_score:\n max_score = score\n max_path = C.name,path\n return max_score,max_path\n else:\n min_score = float(\"inf\")\n min_path = None\n for C in node.children():\n score,path = minimax(C,depth+1)\n if score < min_score:\n min_score = score\n min_path = C.name,path\n return min_score,min_path", "def node_depths_while(root):\n sum_of_depths = 0\n stack = [{\"node\": root, \"depth\": 0}]\n\n while len(stack) > 0:\n node_info = stack.pop()\n node, depth = node_info[\"node\"], node_info[\"depth\"]\n\n if node is None:\n continue\n\n sum_of_depths += depth\n stack.append({\"node\": node.left, \"depth\": depth + 1})\n stack.append({\"node\": node.right, \"depth\": depth + 1})\n\n return sum_of_depths", "def _tree_depth(self):\n return self._flat_data._tree_depth()", "def test_depth(known_bst):\n assert known_bst[0].depth() == 3", "def depth(self) -> int:\n if len(self.children) == 0:\n return 0\n\n # Regresar la altura máxima de sus hijos más uno\n return max([n.depth() for n in self.children]) + 1", "def get_max_depth(self):\n return self.MAX_DEPTH", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def depth(self):\n if self.size == 0:\n return 0\n return int(math.log(self.size, 2)) + 1", "def kth_smallest(root, k=1):\n\n node_status = defaultdict(lambda: 'unvisited')\n\n count = 0\n stack = [root] # A list is a stack.\n\n while len(stack) != 0:\n do_increment = False\n current_node = stack[-1] # ie peek\n\n node_status[current_node] = 'visited'\n\n if current_node.left is not None:\n\n left_status = node_status[current_node.left]\n if left_status == 'exited':\n stack.pop()\n do_increment = True\n\n elif left_status == 'unvisited':\n stack.append(current_node.left)\n continue\n else:\n stack.pop()\n do_increment = True\n\n if current_node.right is not None:\n stack.append(current_node.right)\n\n if do_increment:\n node_status[current_node] = 'exited'\n count += 1\n if count == k:\n return current_node.key\n\n raise IndexError(\"Tree is too small! Only found: {} nodes\".format(count))", "def depth(self):\n if self.parent is None:\n return 0\n else:\n return self.parent.depth() + 1", "def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")", "def lower_binary_tree(self):\n return self.min_linear_extension().binary_search_tree_shape(left_to_right=False)", "def bfs_w_depth(tree):\n visited = []\n frontier = [(0, tree)]\n while frontier:\n depth, tree = frontier.pop(0)\n if tree is not None:\n visited.append((depth, tree[0]))\n frontier.append((depth + 1, tree[1]))\n frontier.append((depth + 1, tree[2]))\n return visited", "def max_depth(self):\r\n lvl = 1\r\n has_lvl_desc = True\r\n while has_lvl_desc:\r\n num_children = len(self.level_n_descendants(lvl))\r\n if num_children==0:\r\n has_lvl_desc = False\r\n else:\r\n lvl+=1\r\n return lvl-1", "def depth(self,p):\n return 0 if self.is_root(p) else 1 + self.depth(self.parent(p))", "def heightTree(root):\n try:\n if (root is None):\n return -1\n else:\n return 1 + max(heightTree(root['left']), heightTree(root['right']))\n except Exception as exp:\n error.reraise(exp, 'RBT:heightTree')", "def deleteMinTree(root):\n try:\n if (root['left'] is None):\n return None\n if ((not isRed(root['left'])) and ((not isRed(root['left']['left'])))):\n root = moveRedLeft(root)\n root['left'] = deleteMinTree(root['left'])\n root = balance(root)\n return root\n\n except Exception as exp:\n error.reraise(exp, 'RBT:deleteMinTree')", "def test_depth_returns_correct_value_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.depth() == 3", "def test_TypicalTree(self):\n\n self.bst.insert(10,1)\n self.bst.insert(10,2)\n \n self.bst.insert(5,2)\n \n self.bst.insert(20,3)\n self.bst.insert(20,4)\n \n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.bst.insert(5,123)\n self.bst.insert(14,456)\n\n self.assertEqual(findMaxDepthDFS(self.bst.root), 3)\n self.assertEqual(findMaxDepthBFS(self.bst.root), 3)", "def max_depth(self): # DirObj.max_depth\n md=self.depth\n if len(self.subdirs.keys()):\n for name, entry in self.subdirs.iteritems():\n if not entry.deleted:\n td = entry.max_depth()\n if td > md:\n md=td\n return md\n elif len(self.files.keys()):\n return md + 1\n else:\n return md", "def _find_smallest(node):\n if node.left:\n return BinarySearchTree._find_smallest(node.left)\n else:\n return node", "def depth(self):\n return self._max_depth", "def DEFAULT_MIN_DEPTH(self): # real signature unknown; restored from __doc__\n pass", "def get_node_depth(dag_graph, root_node, node_interest, path_visited=[]):\n\n shortest_route = None\n path_visited, node_depth = path_visited + [node_interest], 0\n\n # if the node of interest is == to the root node then no depth\n if node_interest == root_node:\n return path_visited, node_depth\n\n # for every incoming node to node of interest, backtrack till root/end\n for node in find_incoming_nodes(dag_graph, node_interest):\n if node not in path_visited:\n # get a new route\n new_route_visited = get_node_depth(dag_graph, root_node,\n node, path_visited)[0]\n if new_route_visited:\n if not shortest_route or \\\n len(new_route_visited) < len(shortest_route):\n shortest_route = new_route_visited\n\n if shortest_route:\n node_depth = len(shortest_route) - 1\n else:\n node_depth = node_depth\n return shortest_route, node_depth", "def calculate_tree_height(tree):\n max_height = 0\n for i in tree.values():\n if i.is_leaf():\n path = i.path_to_root()\n if len(path) > max_height:\n max_height = len(path)\n\n return max_height", "def get_tree_depth(attributes):\n depth = 0\n for attribute in attributes:\n if attribute == -1:\n depth = depth + 1\n return depth", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def tree_size(self) -> int:\n Q = Queue()\n count = 0\n Q.put(self.root)\n while not Q.empty():\n node = Q.get()\n count += 1\n for child in node.children.values():\n Q.put(child)\n return count", "def test_minimum_spanning_tree():\n \n # A very simple graph\n g = UndirectedGraph([('A', 'B'), ('B', 'D'), ('D', 'C'), ('A', 'C')], \n weights=[7, 6, 2, 3])\n mst = g.minimum_spanning_tree('A')\n assert mst == UndirectedGraph([('B', 'D'), ('D', 'C'), ('A', 'C')], \n weights=[6, 2, 3])\n \n # A slightly more complicated graph\n g = UndirectedGraph([('A', 'B'), ('B', 'D'), ('D', 'C'), ('A', 'C'),\n ('C', 'B'), ('A', 'D')], \n weights=[7, 6, 2, 3, 2, 1])\n mst = g.minimum_spanning_tree('A')\n assert mst == UndirectedGraph([('D', 'C'), ('C', 'B'), ('A', 'D')], \n weights=[2, 2, 1])", "def dfs(node):\n nonlocal ans \n if not node: return 0\n lx, rx = dfs(node.left), dfs(node.right) \n if not node.left or node.left.val != node.val: lx = 0\n if not node.right or node.right.val != node.val: rx = 0 \n ans = max(ans, 1 + lx + rx)\n return 1 + max(lx, rx)", "def get_min(self):\n if self.root is None: # BC1\n return float('+inf')\n\n current = self.root\n while current.left is not None: # Traverse like a linked-list\n current = current.left\n\n return current.key", "def bst_count_leaves(tree):\n leaves = 0\n\n def _walk(node=None):\n nonlocal leaves\n if node is None:\n return\n\n if node.left is not None:\n _walk(node.left)\n\n if node.left is None and node.right is None:\n leaves += 1\n\n if node.right is not None:\n _walk(node.right)\n\n _walk(tree.root)\n return leaves", "def minimum_spanning_tree(self):\n if self._directed:\n raise Exception('Current implementation of minimum spanning tree does not work for directed graphs')\n vertices = [self._vertex_dict[x].abstract_vertex for x in self._vertex_dict]\n tree = {'vertices': [random.choice(vertices)], 'edges': []}\n while len(tree['vertices']) < len(vertices):\n best_edge_number = None\n best_edge = None\n best_vertex = None\n vertex_names = [vertex.label for vertex in tree['vertices']]\n for vertex in tree['vertices']:\n for edge in vertex.edges:\n if edge not in vertex_names and (vertex.edges[edge] < best_edge_number or best_edge is None):\n best_edge_number = vertex.edges[edge]\n best_edge = self._edge_dict[vertex.label, edge]\n best_vertex = edge\n tree['vertices'].append(self._vertex_dict[best_vertex].abstract_vertex)\n tree['edges'].append(best_edge)\n return tree['edges']", "def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0", "def get_tree_size(self, node):\n\n # If the tree has not been created yet.\n if node == None:\n return 0\n n_nodes = 1\n for child in node.children:\n n_nodes += self.get_tree_size(node.children[child])\n return n_nodes", "def _min_node(node):\n if not node:\n return None\n i = node\n while i.left:\n i = i.left\n return i", "def minimumDominationCount(leaf):\n minimumDominationCount = np.nanmin(leaf.calDominationCount())\n return minimumDominationCount", "def min_tree_id(self) -> int:\n\n return min(self.tree_ids) if len(self.tree_ids)>0 else 0", "def test_depth_empty():\n bst = BST()\n assert bst.depth() == 0", "def max_single_path(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n\n if root in self.single_path_cache:\n return self.single_path_cache[root]\n\n result = max(0, root.val)\n if root.left:\n left = self.max_single_path(root.left)\n result = max(result, left + root.val)\n if root.right:\n right = self.max_single_path(root.right)\n result = max(result, right + root.val)\n\n self.single_path_cache[root] = result\n return result", "def height(t: Tree):\n if len(t.children) == 0:\n return 1\n else:\n return 1 + max([height(c) for c in t.children])" ]
[ "0.8461286", "0.8181243", "0.71641576", "0.7144373", "0.6993832", "0.6950117", "0.6907654", "0.6858466", "0.68347263", "0.6743698", "0.67391497", "0.6727257", "0.6615435", "0.6594133", "0.657416", "0.6555998", "0.6510495", "0.64694285", "0.646406", "0.6413745", "0.6391296", "0.6391089", "0.6387154", "0.6385762", "0.63813376", "0.63660586", "0.6352175", "0.63458717", "0.6342528", "0.63359886", "0.6331758", "0.63260275", "0.6320206", "0.6306062", "0.6259383", "0.62355256", "0.6215687", "0.62116694", "0.62026435", "0.6201323", "0.62012434", "0.6185807", "0.616986", "0.615571", "0.61433727", "0.61211765", "0.6119394", "0.6075703", "0.6073193", "0.60595673", "0.60595673", "0.60595673", "0.6050707", "0.60245925", "0.60225636", "0.6014162", "0.599122", "0.598637", "0.59851736", "0.59814847", "0.5968966", "0.5960664", "0.59565794", "0.5948049", "0.59384656", "0.59063876", "0.5897928", "0.58915323", "0.5883327", "0.5863568", "0.5861011", "0.5857178", "0.58518475", "0.5846012", "0.5839113", "0.58382064", "0.58340347", "0.5829457", "0.5829133", "0.5818457", "0.5805844", "0.57910454", "0.5788218", "0.5784471", "0.57789516", "0.57715255", "0.5767105", "0.5761262", "0.5750652", "0.57408714", "0.57310224", "0.5694508", "0.56749016", "0.56690013", "0.56666327", "0.566549", "0.56585747", "0.56376106", "0.5627804", "0.56188923" ]
0.86733794
0
Converts the complex number `c` to a string in Fortranformat, i.e. (Re c, Im c). If c is iterable, it returns a string of the form [(Re c_1, Im c_1), ...].
def str_complex(c, kindstr=''): if hasattr(c, '__iter__'): return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']' else: c = complex(c) return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_vct_str ( vct , format = '%.5g%-+.5gj' ) :\n try :\n lst = [] \n for c in vct :\n cc = complex ( c )\n item = format % ( cc.real , cc.imag )\n lst.append ( cc ) \n return '[ ' + ', '.join ( lst ) + ' ]' \n except TypeError :\n pass\n return complex_vct_str ( vct , format = '%.5g%-+.5gj' )", "def __str__( self ) :\n\n return( ' '.join( [ \"%g\" % c_l for c_l in self.coefficients ] ) )", "def to_string(inputs, outputs):\n r_val = '# Column 01: frequency\\n'\n r_val += '# 02: hp - real\\n'\n r_val += '# 03: hp - imaginary\\n'\n r_val += '# 04: hc - real\\n'\n r_val += '# 05: hc - imaginary\\n'\n for f_i, hp_i, hc_i in zip(inputs.freqs, outputs.hp, outputs.hc):\n r_val += \"%8.2f %12.5e %12.5e %12.5e %12.5e\\n\" % (f_i, hp_i.real, hp_i.imag, hc_i.real, hc_i.imag)\n return r_val", "def __str__(self):\n return f'{self.real:02} + {self.imaginary:02}i'", "def fortran_c_wrapper(self) -> str:\n result = ''\n for member in self.members:\n result += member.fortran_c_wrapper()\n return result", "def complexinfo(a, str=None):\n\n if str:\n print \n print \"\\t\", str\n re = a.real.copy()\n im = a.imag.copy()\n _log.debug(\"\\t%.2e %.2g = re.sum im.sum\" % (re.sum(), im.sum()))\n _log.debug(\"\\t%.2e %.2g = abs(re).sum abs(im).sum\" % (abs(re).sum(), abs(im).sum()))", "def nice_cubic_polynomial(p):\n tmp = \"\"\n if p[\"a\"] == 1:\n tmp += \" x^3\"\n elif p[\"a\"] != 0:\n tmp += \"%.2fx^3\" % p[\"a\"]\n if p[\"b\"] == 1:\n tmp += \"\\t+ x^2\"\n elif p[\"b\"] != 0:\n tmp += \"\\t+ %.2fx^2\" % p[\"b\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"c\"] == 1:\n tmp += \"\\t+ x\"\n elif p[\"c\"] != 0:\n tmp += \"\\t+ %.2fx\" % p[\"c\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"d\"] != 0:\n tmp += \"\\t+ %.2f\" % p[\"d\"]\n return tmp", "def list_to_str(\n l: list,\n c: str,\n ) -> str:\n\n s = c.join(map(str, l))\n\n return s", "def getComplex(self, base, aspirated=False):\n res = ''\n if base == 'c':\n res = self.useRetroflex and 'ʈ͡ʂ' or 't͡ɕ'\n elif base == 'j':\n res = self.useRetroflex and 'ɖ͡ʐ' or 'd͡ʑ'\n elif base == 'ts':\n res = 't͡s'\n else:\n res = 'd͡z'\n if aspirated:\n res += 'ʰ'\n return res", "def _repr_(self):\n return \"Complex Field with %s bits of precision\"%self._prec", "def build_ascii_fmtstr(pc_):\n fmtstr = []\n for t, cnt in zip(pc_.type, pc_.count):\n if t == 'F':\n fmtstr.extend(['%.10f'] * cnt)\n elif t == 'I':\n fmtstr.extend(['%d'] * cnt)\n elif t == 'U':\n fmtstr.extend(['%u'] * cnt)\n else:\n raise ValueError(\"don't know about type %s\" % t)\n return fmtstr", "def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string", "def order2string(order):\n nparray = np.array(order)\n num_x = np.sum(nparray==0)\n num_y = np.sum(nparray==1)\n num_z = np.sum(nparray==2)\n string_repr = \"$\"\n if num_x == 0 and num_y == 0 and num_z == 0:\n return \"constant\"\n if num_x > 0:\n string_repr += \"x^{{{}}}\".format(num_x)\n if num_y > 0 :\n string_repr += \"y^{{{}}}\".format(num_y)\n if num_z > 0:\n string_repr += \"z^{{{}}}\".format(num_z)\n string_repr += \"$\"\n return string_repr", "def __str__(self):\n #Get an ordered list of the elements strings so it outputs always the same\n #string given a mass function.\n elements = []\n for element in self.focals:\n elements.append((element, str(element)))\n sortedList = sorted(elements, key=lambda x:x[1])\n \n result = \"\"\n first = True\n for t in sortedList:\n if first:\n result += t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n first = False\n else:\n result += \", \" + t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n return \"{\" + result + \"}\"", "def __str__(self):\n my_str=\"[\"\n for elem in range(self.size):\n x=cArray.cModule.get_element(self.arrayRef,ctypes.c_int(elem))\n my_str+=str(x)+\" \"\n my_str+=\"]\"\n return my_str", "def __repr__(self) -> str:\n\t\treturn \",\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def float_vct_str ( vct , format = '%.5g' ) :\n try :\n return '[ ' + ', '.join ( [ format % v for v in vct ] ) + ' ]' \n except TypeError :\n pass\n return float_vct_str ( vct , format = '%.5g' )", "def to_cmd(c: Coordinate, pose_flag: Optional[int] = 7):\n txt = (\"{:.{d}f}\".format(i, d=c.digits) if i is not None else \"\" for i in c.values)\n return f'({\",\".join(txt)}) ({pose_flag},0)'", "def __str__(self):\n [r,c],f = self.D, self.F\n lmax = len(str(max(iter(self)))) + 1\n s = '\\n'.join( (' '.join('{0:{l}G}'.format(f(i,j),l=lmax) if isinstance(f(i,j), int) or isinstance(f(i,j), float) else str(f(i,j)) for j in range(c))) for i in range(r))\n return s", "def __str__(self):\n\t\treturn 'f(z) = ' + self.p.coeffString() + ' / ' + self.q.coeffString()", "def complexCompose(self,coefficients,t=1):\n c=coefficients\n N=len(c)//2\n s=lambda t,n:c[n+N]*cmath.exp(1j*n*t)\n a=0\n g=[]\n z=[]\n\n #for i in range(len(c)):\n # if i==0: n=0\n # elif i%2==1: n=(i+1)//2\n # elif i%2==0: n=-i//2\n # pass\n\n #print([a[1] for a in z])\n #z=sorted(z,key=lambda x:1,reverse=True)\n #print([a[1] for a in z])\n #z=[a[0] for a in z]\n\n for n in range(-N,N+1):\n a+=s(t,n)\n g.append((a.real,a.imag))\n\n return g", "def format_sampler(self, val):\n if isinstance(val, numbers.Number):\n val = complex(val)\n return \"%s,%s\" % (val.real, val.imag)\n return val", "def matrix2str(A):\n s = \"\"\n for x in numpy.nditer(A, order='F'):\n s = s + str(x) + \",\"\n\n return s", "def discreteComplexCompose(self,c,n):\n z=self.discreteComplexInverseTransform(c,n)\n return (z.real,z.imag)", "def matrixToString(matrix):\n nRows = len(matrix)\n if nRows == 0:\n return '[0,0](())'\n nCols = len(matrix[0])\n string = '[%d,%d](' % (nRows, nCols)\n for r in range(nRows):\n string += '('\n for c in range(nCols):\n string += str(float(matrix[r][c]))\n if c != nCols - 1:\n string += ','\n string += ')'\n if r != nRows - 1:\n string += ','\n string += ')'\n return string", "def name_circulant(num_vertices, j_value_set):\n\n return f\"Cir [{num_vertices}] [{j_value_set}]\"", "def floatArrayToString(fvalues, prec=3, delem=\",\"):\n\tsvalues = list(map(lambda v : formatFloat(prec, v), fvalues))\n\tdelem = \" \" if delem is None else delem\n\treturn delem.join(svalues)", "def create_string(iteration, dic):\n return str(iteration) + '|' + dic['Year'] + '/' + \\\n get_month_number(dic['Month']) + '/' + \\\n dic['Day'] + '|' + dic['Hour'] + ':' + \\\n dic['Min'] + ':' + dic['Seg'] + '|' + \\\n dic['Energy']", "def __str__(self):\n output = \"\"\n for i in self.values:\n st = []\n output += \"[\"\n for j in i:\n st.append(str(j))\n output += \",\".join(st)+\"]\"\n return str(self.m)+\"x\"+str(self.n)+\" [\" + output + \"]\"", "def str2(self):\n signs = [ ('+' if f >= 0 else '-') for f in self.mVector ]\n vals = [ abs(f) for f in self.mVector ]\n\n return '%s %s %si %s %sj %s %sk' % (self.mScalar, \n signs[0],\n vals[0],\n signs[1],\n vals[1],\n signs[2],\n vals[2])", "def latex_str_expanded(self):\n try:\n len(self.coeff[0])\n coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]\n basis_strs = bernstein_basis_simplex_latex(self.r, self.vertices)\n for i in range(len(basis_strs)):\n if len(basis_strs[i]) > 3:\n basis_strs[i] = \"(\" + basis_strs[i] + \")\"\n return str_dot_product(coeff_strs, basis_strs)\n except TypeError:\n coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]\n basis_strs = bernstein_basis_simplex_latex(self.r, self.vertices)\n for i in range(len(basis_strs)):\n if len(basis_strs[i]) > 3:\n basis_strs[i] = \"(\" + basis_strs[i] + \")\"\n return str_dot_product(coeff_strs, basis_strs)", "def obs_to_string(observations):\n str_obs = []\n for obs in observations:\n str_obs.append(obs.reshape(-1).tostring())\n return str_obs", "def i2s(i):\n return \"[%0.3f,%0.3f]\" % (i[0], i[1])", "def complex(real, imag):", "def _complex(real, imag):\n real = np.asarray(real)\n imag = np.asarray(imag)\n cplx = 1j * imag \n return cplx + real", "def rule_str(C: List, fmt: str = \"%.3f\") -> str:\n s = \" \" + \"\\n∨ \".join([\"(%s)\" % (\" ∧ \".join([fatom(a[0], a[1], a[2], fmt=fmt) for a in c])) for c in C])\n return s", "def complex_magnitude(c):\n return (c * c.conjugate()) ** 0.5", "def credits_to_string(amount: int, significant_numbers: int = 3) -> str:\n letter = ''\n divider = 1\n absAmount = abs(amount)\n\n if absAmount >= 10**15:\n letter = 'Q'\n divider = 10**15\n elif absAmount >= 10**12:\n letter = 'T'\n divider = 10**12\n elif absAmount >= 10**9:\n letter = 'B'\n divider = 10**9\n elif absAmount >= 10**6:\n letter = 'M'\n divider = 10**6\n \n if divider == 1:\n return '{:,} C'.format(int(amount))\n if amount >= 10**18:\n return '{:,} {}C'.format(int(amount / divider), letter)\n else:\n power_of_10 = max(0,int(math.floor(math.log10(absAmount))))\n precision = significant_numbers - 1 - (power_of_10 % 3)\n return '{1:.{0}f} {2}C'.format(precision,\n math.floor(amount / 10**(power_of_10 - significant_numbers + 1)) / 10**precision, \n letter)", "def _com_to_string(com):\n return sep.join([str(x) for x in com])", "def c_to_f(temp):\n if type(temp) is list or type(temp) is tuple:\n return [c * 1.8 + 32 for c in temp]\n else:\n return temp * 1.8 + 32.0", "def c_to_f(temp):\n if type(temp) is list or type(temp) is tuple:\n return [c * 1.8 + 32 for c in temp]\n else:\n return temp * 1.8 + 32.0", "def _cmplx_to_complex_ ( s ) :\n return complex ( s.real () , s.imag () )", "def tostr (x):\n if isinstance (x, tuple):\n return tuple ( map (tostr, x))\n if isinstance(x, (float, numpy.float32,numpy.float64)):\n return float_to_str(x)\n return str(x)", "def _cmplx_to_complex_ ( s ) :\n return complex ( s.real , s.imag )", "def fortran_c_wrapper(self) -> str:\n return ''.join([i.fortran_c_wrapper() for i in self.instances])", "def basis_to_txt(basis):\n\n basis_txt = ''\n for atom in basis['atoms']:\n basis_txt += atom['symbol'] + ' 0\\n'\n for shell in atom['shells']:\n # check if shell is pure or cartesian\n if shell['shell_type'].endswith('_'):\n shell_type = shell['shell_type'][:-1]\n else:\n shell_type = shell['shell_type']\n\n basis_txt += '{} {} {}\\n'.format(shell_type.upper(), len(shell['p_exponents']), 1.00)\n for p, c, pc in zip(shell['p_exponents'], shell['con_coefficients'], shell['p_con_coefficients']):\n if shell['shell_type'].upper() in ['SP']:\n basis_txt += '{:15.10e} {:15.10e} {:15.10e} \\n'.format(p, c, pc)\n else:\n basis_txt += '{:15.10e} {:15.10e} \\n'.format(p, c)\n\n basis_txt += '****\\n'\n return basis_txt", "def complex_inverse(c1,cr):", "def _gto_from_ccdata(self):\n\n gbasis = self.ccdata.gbasis\n lines = []\n\n for no, basis in enumerate(gbasis):\n lines.append(f\"{no + 1:3d} 0\")\n for prims in basis:\n lines.append(f\"{prims[0].lower():s} {len(prims[1]):5d} 1.00\")\n for prim in prims[1]:\n lines.append(f\"{prim[0]:15.9e} {prim[1]:15.9e}\")\n lines.append('')\n lines.append('')\n return lines", "def toString(self):\n \n if not self.coeff_map:\n raise Exception('no coeffs in constrain %s'%self.name)\n \n if self.result is None:\n raise Exception('result of this constrain is unknown!')\n \n if self.name is None:\n res=\"\"\n else:\n res=self.name+\": \"\n \n res+=coeff_sum(self.coeff_map) \n \n res+=self.op\n res+=\" \"+str(self.result)\n \n return res;", "def cFormal(self):\n if not self.type:\n return self.name # special case for '...'\n else:\n arr = self.array or ''\n pointers = self.pointers or ''\n return \"%s %s%s%s\" % (self.type, pointers, self.name, arr)", "def __str__(self):\n #When b is negative\n if self._imNum < 0:\n return str(self._reNum) + str(self._imNum) + \"i\"\n #When b is zero\n if self._reNum == 0:\n return str(self._imNum) + \"i\"\n return str(self._reNum) + \"+\" + str(self._imNum) + \"i\"", "def __repr__(self):\n return str([(n,c,str(p)) for (n,c,p) in self.frontierpq])", "def matrix_to_str(matrix):\n output_str = \"[\"\n for i in matrix:\n output_str += \"[\"\n for j in i:\n output_str += str(j) + \", \"\n output_str = output_str[:-2] + \"], \"\n output_str = output_str[:-2] + \"]\"\n return output_str", "def get_corporal_comp(imc: float):\n if imc < 18.5:\n return \"Peso inferior al normal\"\n if imc >= 18.5 and imc < 25:\n return \"Normal\"\n if imc >= 25 and imc < 30:\n return \"Peso superior al normal\"\n if imc >= 30:\n return \"Obesidad\"", "def float_array_string(arr: Iterable[float]) -> str:\n return \"[\" + \", \".join([\"{:.4f}\".format(el) for el in arr]) + \"]\"", "def describe(self):\n composition = str()\n for n, comp in enumerate(self.components):\n if self.molefractions[n] > 0.0:\n composition += comp.name\n composition += \" %.2f\" % self.molefractions[n]\n composition += \"; \"\n return composition", "def stringReco(obj):\n name = obj.get_name()\n name = obj._pid if (name is None) else name\n return (\"pdg: \" + name + \" E: \" + str(obj._E)\n + \" px: \" + str(obj._px) + \" py: \" + str(obj._py)\n + \" pz: \"+ str(obj._pz) + \" mass: \" + str(obj._m))", "def latex_str(self):\n k = dimension(self.vertices)\n try:\n len(self.coeff[0])\n coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]\n basis_strs = bernstein_basis_latex_compact(self.r, k)\n return str_dot_product(coeff_strs, basis_strs)\n except TypeError:\n coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]\n basis_strs = bernstein_basis_latex_compact(self.r, k)\n return str_dot_product(coeff_strs, basis_strs)", "def path_to_string(path: Path) -> str:\n assert_continuous(path)\n\n pieces = [\"M {} {}\".format(path[0].p0[0], path[0].p0[1])]\n for curve in iter(path): # iter cast not strictly necessary\n piece = \"C {} {} {} {} {} {}\".format(\n int(round(curve.c0[0])), int(round(curve.c0[1])),\n int(round(curve.c1[0])), int(round(curve.c1[1])),\n int(round(curve.p1[0])), int(round(curve.p1[1]))\n )\n pieces.append(piece)\n\n return \" \".join(pieces)", "def __str__(self):\n from nodepy.utils import array2strings\n\n c = array2strings(self.c,printzeros=True)\n A = array2strings(self.A)\n b = array2strings(self.b,printzeros=True)\n lenmax, colmax = _get_column_widths([A,b,c])\n\n s=self.name+'\\n'+self.info+'\\n'\n for i in range(len(self)):\n s+=c[i].ljust(colmax+1)+'|'\n for j in range(len(self)):\n s+=A[i,j].ljust(colmax+1)\n s=s.rstrip()+'\\n'\n s+='_'*(colmax+1)+'|'+('_'*(colmax+1)*len(self))+'\\n'\n s+= ' '*(colmax+1)+'|'\n for j in range(len(self)):\n s+=b[j].ljust(colmax+1)\n return s.rstrip()", "def eigvec2str(eigvec, m, n, nctr, nvis = 6, npc = 6, iws = ' '):\n\n resstr = iws + '{0:<10}{1}\\n'.format('norm', 'principal components')\n # guarantee len(eigvec) = n * m\n for j in range(max(0, nctr - int(nvis / 2) + 1), min(n, nctr + int(nvis / 2) + 1)):\n eabs = [ abs(eigvec[i * n + j]) ** 2 for i in range(m) ]\n norm = sum(eabs)\n # print out the norm\n resstr += iws + '{0:<10.5f}'.format(norm)\n # the wavefunction to print: |nk> = ...\n resstr += '|n={0:>4},k> = '.format(j)\n # Find the npc elements with the largest norm\n indices = heapq.nlargest(npc, range(len(eabs)), key = lambda i : eabs[i])\n for i in indices:\n resstr += '({0:11.3f})|B_{1}> + '.format(eigvec[i * n + j], i)\n # resstr = resstr[:-3] # delete the last +\n resstr += '... \\n'\n return resstr", "def fmtd_str(self,c=False,prefix_symbol=\"\"):\n psym = prefix_symbol\n ms1 = (\n f\"{self.filename_prefix_mono}{self.event_kind:9} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lms1 = len(ms1)+len(\"(\")\n join_mstr = f\",\\n{' '*lms1}\"\n mavs = (\n f\"{self.argvars}\"\n )\n ms = f\"{psym}{ms1}{mavs}\"\n if c:\n aac = argvars_argname_color = \"MAGENTA\"\n ps1 = (\n f\"{self.filename_prefix_poly}{self.color.fore(f'{self.event_kind:9}','KIND')} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lps1 = lms1\n join_pstr = f\",\\n{' '*lps1}\"\n pavs = (\n f\"{self.argvars}\"\n )\n ps = f\"{psym}{ps1}{pavs}\"\n return ps\n return ms", "def _scfconv_from_ccdata(self):\n\n lines = [f\"scf-first 1 THROUGH {len(self.ccdata.scfenergies)}\"]\n\n for scfenergy in self.ccdata.scfenergies:\n lines.append(f\"{scfenergy:15.6f}\")\n\n return lines", "def writeContactPoint(cp):\n return ' '.join(str(v) for v in (cp.x+cp.n+[cp.kFriction]))", "def get_str(nr_rational):\n return f'{get_numarator(nr_rational)} / {get_numitor(nr_rational)}'", "def single(self):\n return u''.join([str(x) for x in self.cpf])", "def _cmplx_conjugate_ ( s ) :\n return complex ( _real_ ( s ) , - _imag_ ( s ) )", "def values_to_string(values, decimals):\n\n res = []\n for value in values:\n if isinstance(value, list):\n tmp = [format_value(val, decimals) for val in value]\n res.append(f'{tmp[0]} +/- {tmp[1]}')\n else:\n res.append(format_value(value, decimals))\n return res", "def test_repr_format(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n assert t.repr_format(\"asfa\") == \"OneHotEncode(Enumerate(asfa))\"", "def __str__(self):\n return \"f(\" + \",\".join([str(p) for p in self.points]) + \")\"", "def mat2str(mat: list, dec: int = 1, sw: int = 3, sh: int = 1, pdw: int = None, pdh: int = None) -> str:\n maxn = maxc1 = 0\n for row in mat:\n maxc1 = max(maxc1, len(('{:.' + str(dec) + 'f}').format(row[0])))\n for n in row:\n maxn = max(maxn, len(('{:.' + str(dec) + 'f}').format(n)))\n pdw = (math.ceil(sw / 2) if len(mat[0]) > 1 else 1) if pdw is None else pdw\n pdh = math.ceil((sh + 0.6) / 2) if pdh is None else pdh\n emptyc = (maxn * (len(mat[0]) - 1) + maxc1 + pdw * 2) + sw * (len(mat[0]) - 1)\n smat = '\\u250C' + '\\u2500' * pdw + (emptyc - pdw * 2) * ' ' + '\\u2500' * pdw +\\\n '\\u2510\\n' + ('\\u2502' + emptyc * ' ' + '\\u2502\\n') * (pdh - 1) if pdh > 0 else ''\n for i in range(len(mat)):\n smat += '\\u2502' + ' ' * pdw\n for j in range(len(mat[i])):\n smat += ('{:' + str(maxc1 if j == 0 else maxn) + '.' + str(dec) + 'f}').format(mat[i][j])\n smat += ' ' * sw if j < len(mat[0]) - 1 else ' ' * pdw\n smat += '\\u2502\\n' + ('\\u2502' + emptyc * ' ' + '\\u2502\\n')\\\n * sh if sh > 0 and i < len(mat)- 1 else '\\u2502\\n'\n smat += ('\\u2502' + emptyc * ' ' + '\\u2502\\n') * (pdh - 1) + '\\u2514' +\\\n '\\u2500' * pdw + (emptyc - pdw * 2) * ' ' + '\\u2500' * pdw + '\\u2518\\n' if pdh > 0 else ''\n return smat", "def __repr__(self):\n result = '\"{0}\"'.format(self._filepath.unexpanded)\n if self.nonlocal is None: result += \", None\"\n else: result += ', \"%s\"' % (self._nonlocal.unexpanded)\n result += \", %f, %f, %f, %f, %f\" % (self.s, self.p, self.d, self.pnl, self.dnl)\n return result", "def _repr_(self):\n s = \"Constellation of length {} and degree {}\".format(self.length(),\n self.degree())\n for i in range(self.length()):\n s += \"\\ng{} {}\".format(i, self._g[i].cycle_string(True))\n return s", "def __repr__(self):\n fmt_str = 'Cityscapes Split: %s\\n' % self.cs_split\n fmt_str += '----Number of images: %d\\n' % len(self.cs)\n fmt_str += 'COCO Split: %s\\n' % self.coco_split\n fmt_str += '----Number of images: %d\\n' % len(self.coco)\n return fmt_str.strip()", "def _matrixToStr(self, name, mat):\n r = []\n r.append(\"\\n\" + name)\n for i in range(len(self.answer['a priori state vector'])):\n r.append(\", \".join([\"{0:=+10.4g}\".format(float(v)) \n for v in mat[:, i]]))\n return \"\\n\".join(r)", "def cam_to_string(cam):\n cam_string = (\"near;{:8f}\\n\").format(cam.nearPlane)\n cam_string += (\"far;{:8f}\\n\").format(cam.farPlane)\n cam_string += (\"focal_length;{:8f}\\n\".format(cam.projectionMatrix[0][0]))\n cam_string += (\"fov;{}\").format(cam.fov)\n return cam_string", "def __str__(self) -> str:\n\t\treturn f\"dim {self.dimM},{self.dimN}\" +\"\\n\" \\\n\t\t\t+ \"\\n\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def generate_str (obj):\n d = obj.dim(obj)\n units = [ obj( ( [zer]*i + [one] + [zer]*(d-i-1) )) for i in range(d) ]\n table = []\n raw_table = []\n for j in units:\n table.append([])\n raw_table.append([])\n for i in units:\n if DEBUG: raw_table[-1].append(str(j*i))\n table[-1].append(str(obj([c.name_in(s) for c in (j*i).state])))\n if DEBUG: print('{} × {} = {}'.format(j,i,j*i))\n return table", "def __repr__(self):\n modulename = str(type(self).__module__)\n\n ichars = len(str(int(self.max())))\n slen = ichars + casas\n fstr = \"{{:>{}.{}g}}\".format(slen, casas)\n\n if modulename == \"__main__\":\n s = str(type(self).__name__)\n else:\n s = modulename + '.' + str(type(self).__name__)\n\n s += '(['\n s += ', '.join([fstr.format(x) for x in self.elem])\n s += '])'\n\n return s", "def tupleToString(vector):\n string = '[%d](' % len(vector)\n for x in vector[:-1]:\n string += '%f,' % x\n string += '%f)' % vector[-1]\n return string", "def __str__(self):\n # special cases\n if self.is_nan() :\n return \"nan\"\n elif self.coeff == 1 :\n if self.expt == 1 :\n return \"x\"\n else :\n return \"x^\" + str(self.expt)\n elif self.coeff == -1 :\n if self.expt == 1 :\n return \"-x\"\n else :\n return \"-x^\" + str(self.expt)\n \n # str_builder\n if self.expt == 0 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator))\n elif self.expt == 1 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x\"\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x\"\n else :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x^\" + str(self.expt)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x^\" + str(self.expt)", "def real_of_complex(z):\n return np.vstack((np.real(z[:,0]),np.imag(z[:,0]),np.real(z[:,1]),np.imag(z[:,1]))).T", "def vstr(a):\n def _isallvect(foo):\n if not isinstance(foo,list):\n return False\n if len(foo)==1:\n return isinstance(foo[0],list) and \\\n (isvect(foo[0]) or _isallvect(foo[0]))\n else:\n return (isvect(foo[0]) or _isallvect(foo[0])) and \\\n _isallvect(foo[1:])\n def _makestr(foo):\n if len(foo) ==1:\n return vstr(foo[0])\n else:\n return vstr(foo[0]) + \", \" + _makestr(foo[1:])\n # if it's not a list, it's not a vector, line, or poly \n if not isinstance(a,list):\n return str(a) # fall back to default string formatting\n # if it is a vector, format it to leave out extraneous\n # coordinates. NOTE: this means that 3 vectors that happen to\n # fall into the z=0 plane will be formatted as though they were 2\n # vectors.\n if isvect(a):\n if abs(a[3]-1.0) > epsilon: # not in w=1\n return \"[{}, {}, {}, {}]\".format(a[0],a[1],a[2],a[3])\n elif abs(a[2]) > epsilon: # not in z=0\n return \"[{}, {}, {}]\".format(a[0],a[1],a[2])\n else: # in x-y plane\n return \"[{}, {}]\".format(a[0],a[1])\n # if it is a list of vectors (line or poly) format it appropriately\n elif len(a)>0 and _isallvect(a):\n return \"[\"+_makestr(a)+\"]\"\n else: #it's none of those things, fall back to default string\n #formatting\n return str(a)", "def intArrayToString(ivalues, prec=3, delem=\",\"):\n\tsvalues = list(map(lambda v : str(v), ivalues))\n\tdelem = \" \" if delem is None else delem\n\treturn delem.join(svalues)", "def __complex__(self):\r\n return eval(str(self))", "def str_fmt(x):\n if isinstance(x, (list, tuple, np.ndarray)):\n return [str_fmt(x) for x in x]\n if x <= 0.1:\n return f'${x:.2f}$'\n return f'${x:.1f}$' if x <= 1 else f'${int(x)}$'", "def shape2str(x):\n return \"[\" + \" x \".join([str(x) for x in x.shape]) + \"]\"", "def format_matrix(x):\n return ' '.join([format_vector(y) for y in x])", "def writeContactPoint(cp):\n return ' '.join([str(v) for v in cp.x+cp.n+[cp.kFriction]])", "def arbitrary_metric_conformal(n):\n str1 = ','.join(n*[n*'# '+'0 0'])\n return ','.join([str1, n*'0 '+'1 0', n*'0 '+'0 -1'])", "def showm():\n def show1(i):\n coeff=[]\n for m in range(5):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.m%d' % (i+1,m+1) ,qmax_)\n coeff.append(a)\n for o in range(3):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.o%d' % (i+1,o+1) ,qmax_)\n coeff.append(a)\n return coeff\n print ' ant m1 m2 m3 m4 m5 o1 o2 o3'\n for i in range(6):\n m = show1(i)\n print ' 00%d %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % (i+1,m[0],m[1],m[2],m[3],m[4],m[5],m[6],m[7])", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z" ]
[ "0.7047887", "0.58852553", "0.5754803", "0.5726591", "0.57228047", "0.56523186", "0.56442934", "0.56111693", "0.5576192", "0.55657303", "0.5538955", "0.55268145", "0.5501663", "0.54070234", "0.5394753", "0.53852344", "0.536029", "0.535686", "0.53531355", "0.53468204", "0.53373665", "0.53193367", "0.531241", "0.52965844", "0.52930677", "0.52841514", "0.5272729", "0.52694833", "0.52590585", "0.5258443", "0.52410626", "0.52203363", "0.5206719", "0.5205602", "0.5205466", "0.5190976", "0.5185597", "0.51814306", "0.51695186", "0.51611745", "0.51611745", "0.51457447", "0.514305", "0.51369756", "0.51147795", "0.51109535", "0.51037294", "0.50812507", "0.50725216", "0.50666505", "0.50665855", "0.50577474", "0.50575954", "0.50564986", "0.5054061", "0.50478274", "0.50363046", "0.5025064", "0.5007218", "0.4996881", "0.49927422", "0.4988488", "0.4984126", "0.49726328", "0.4971047", "0.49551523", "0.49514407", "0.49482366", "0.4947891", "0.4946075", "0.4933702", "0.49325508", "0.4927818", "0.49275926", "0.49253", "0.49250996", "0.49227455", "0.4919545", "0.4919544", "0.49155545", "0.49108115", "0.49026898", "0.48952964", "0.48942694", "0.48894924", "0.48884282", "0.48809466", "0.48782042", "0.48773894", "0.4864755", "0.4859298", "0.48546076", "0.48546076", "0.48546076", "0.48546076", "0.48546076", "0.48546076", "0.48546076", "0.48546076", "0.48546076" ]
0.7722821
0
Select PORT update events, notify the observers upon a port update in APPL_DB/CONFIG_DB or a XCVR insertion/removal in STATE_DB
def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler): if not stop_event.is_set(): (state, _) = sel.select(SELECT_TIMEOUT_MSECS) if state == swsscommon.Select.TIMEOUT: return if state != swsscommon.Select.OBJECT: logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT') return for port_tbl in asic_context.keys(): while True: (key, op, fvp) = port_tbl.pop() if not key: break if not validate_port(key): continue fvp = dict(fvp) if fvp is not None else {} if 'index' not in fvp: fvp['index'] = '-1' port_index = int(fvp['index']) port_change_event = None if op == swsscommon.SET_COMMAND: port_change_event = PortChangeEvent(key, port_index, asic_context[port_tbl], PortChangeEvent.PORT_SET, fvp) elif op == swsscommon.DEL_COMMAND: port_change_event = PortChangeEvent(key, port_index, asic_context[port_tbl], PortChangeEvent.PORT_DEL, fvp) if port_change_event is not None: port_change_event_handler(port_change_event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n reason_dict = {ofproto.OFPPR_ADD: \"added\",\n ofproto.OFPPR_DELETE: \"deleted\",\n ofproto.OFPPR_MODIFY: \"modified\", }\n\n if reason in reason_dict:\n\n print \"switch%d: port %s %s\" % (dpid, reason_dict[reason], port_no)\n else:\n print \"switch%d: Illeagal port state %s %s\" % (port_no, reason)", "def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def update_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.update_port(port)\n except:\n pass", "def process_update_port(self, context, data, result):\n\n orginal_exten = copy.deepcopy(result)\n # Process extension data\n self._find_port_dict_extensions(\n result, None, session=context.session)\n\n port_ext = self._update_port_ext(\n result, data, session=context.session)\n switchports = self._update_switchports(\n result, data, session=context.session)\n self._find_port_dict_extensions(\n result, None, port_ext=port_ext,\n switchports=switchports, session=context.session)\n\n # We only want to commit on a state change\n if orginal_exten.get(\"commit\") != result[\"commit\"]:\n # If we are transitioning to active, validate\n if not orginal_exten.get(\"commit\") and result[\"commit\"]:\n self._validate_port_can_commit(\n result, None, session=context.session)", "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)", "def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices", "def refreshPorts(self, event):\n logging.debug(\"Refreshing ports.\")\n self.availablePorts = self.controller.getAvailablePorts()\n\n # Delete old dropdown options\n self.portSelector[\"menu\"].delete(0, \"end\")\n for value in self.availablePorts:\n\n def _callback(value=value):\n self.controller.updatePort(value)\n self.serialPortVar.set(value)\n\n self.portSelector[\"menu\"] \\\n .add_command(label=value,\n command=_callback)\n return", "def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler):\n if not stop_event.is_set():\n (state, _) = sel.select(SELECT_TIMEOUT_MSECS)\n if state == swsscommon.Select.TIMEOUT:\n return\n if state != swsscommon.Select.OBJECT:\n logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')\n return\n\n read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler)", "def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature", "def treat_devices_added_or_updated(self, details):\n device = details['device']\n LOG.debug(\"Processing port: %s\", device)\n # REVISIT(ivar): this is not a public facing API, we will move to\n # the right method once the redesign is complete.\n port = self.bridge_manager.get_vif_port_by_id(device)\n if port:\n gbp_details = details.get('gbp_details')\n trunk_details = details.get('trunk_details')\n neutron_details = details.get('neutron_details')\n if gbp_details and 'port_id' not in gbp_details:\n # The port is dead\n details.pop('port_id', None)\n if (gbp_details and gbp_details.get('host') and\n gbp_details['host'] != self.host):\n self.port_unbound(device)\n return False\n elif neutron_details and 'port_id' in neutron_details:\n LOG.info(\"Port %(device)s updated. Details: %(details)s\",\n {'device': device, 'details': details})\n # Inject GBP/Trunk details\n port.gbp_details = gbp_details\n port.trunk_details = trunk_details\n self.treat_vif_port(port, neutron_details['port_id'],\n neutron_details['network_id'],\n neutron_details['network_type'],\n neutron_details['physical_network'],\n neutron_details['admin_state_up'],\n neutron_details['fixed_ips'],\n neutron_details['device_owner'],\n neutron_details['segmentation_id'])\n # update plugin about port status\n if neutron_details.get('admin_state_up'):\n LOG.debug(\"Setting status for %s to UP\", device)\n self.plugin_rpc.update_device_up(\n self.context, device, self.agent_id, self.host)\n else:\n LOG.debug(\"Setting status for %s to DOWN\", device)\n self.plugin_rpc.update_device_down(\n self.context, device, self.agent_id, self.host)\n LOG.info(\"Configuration for device %s completed.\",\n device)\n else:\n LOG.warn(\"Device %s not defined on plugin\", device)\n if port and port.ofport != -1:\n self.port_unbound(port.vif_id)\n return False\n else:\n # The port disappeared and cannot be processed\n LOG.info(\"Port %s was not found on the integration bridge \"\n \"and will therefore not be processed\", device)\n self.port_unbound(device)\n return False\n return True", "def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)", "def datachange_notification(self, node, val, data):\n \n logger.debug(\"New data change event. node:{}, value:{}\".format(node, val))\n \n # Sorry about these lines of code, but I don't see any nicer way of determining the port number than from \n # the identifier string. Then splitting it up to isolate the port number.\n # Example \"Status.Port_2.Selected\" is split into ['Status', 'Port_2', 'Selected'] then 'Port_2' is split into \n # ['Port', '2'] and then the '2' is turned into an intiger.\n path_list = str(node.nodeid.Identifier).split(\".\")\n\n # We can safely assume that the last term is the tag that updated.\n tag = path_list[-1] \n \n # Figure out the port number\n port_number = None\n if 'Port' in path_list[1]:\n port_number = int(path_list[1].split(\"_\")[-1]) \n \n \"\"\" Switch for each possible tag\"\"\"\n # If the command tag \"Select\" changes go select that port with the instructions saved in the command tag. \n if tag == 'Select' and port_number:\n if val == True:\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Instructions\".format(port_number))\n instructions = node.get_value()\n self._pbl.select_port(port_number, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Select\".format(port_number))\n node.set_value(False)\n \n elif tag == 'Deselect' and port_number:\n if val == True:\n self._pbl.deselect_port(port_number, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Deselect\".format(port_number))\n node.set_value(False)\n\n elif tag == 'ContentDisplayName' and port_number:\n self._pbl.set_content_key(port_number,'display_name', str(val))\n elif tag == 'ContentName' and port_number:\n self._pbl.set_content_key(port_number,'name', str(val))\n elif tag == 'ContentDescription' and port_number:\n self._pbl.set_content_key(port_number,'description', str(val))\n elif tag == 'ContentImagePath' and port_number:\n self._pbl.set_content_key(port_number,'image_path', str(val))\n \n elif tag == 'Select' and 'ByContent' in path_list[1]:\n if val == True:\n instructions = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Instructions\").get_value()\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n _, selected_port = self._pbl.select_content(name = name, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Select\")\n node.set_value(False)\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Result\")\n node.set_value(selected_port)\n\n elif tag == 'Deselect' and 'ByContent' in path_list[1]:\n if val == True:\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n self._pbl.deselect_content(name = name, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Deselect\")\n node.set_value(False)", "def db_change_callback(self, table, key, action, value, topic=None):\n if self.USE_CACHE:\n # Update cache\n if action == 'create' or action == 'set':\n if table == 'lport':\n self.cache_logical_port_by_port_id[key] = self.nb_api.get(l2.LogicalPort(id=key))\n if table == 'lrouter':\n self.cache_logical_router_by_dpid[key] = self.nb_api.get(l3.LogicalRouter(id=key))\n if action == 'del':\n if table == 'lport':\n # default if key does not exists is None\n self.cache_logical_port_by_port_id.pop(key, None)\n if table == 'lrouter':\n self.cache_logical_router_by_dpid.pop(key, None)\n\n print(\"L3 App: Received Update for table {} and key {} action {}\".format(table, key, action))\n if action == 'set':\n if table == 'lport':\n if self.USE_CACHE:\n updated_port = self.cache_logical_port_by_port_id[key]\n else:\n updated_port = self.nb_api.get(l2.LogicalPort(id=key))\n\n if len(updated_port.ips) is not 0:\n for ip in updated_port.ips:\n # new ip discovered\n # install route on every datapath\n # only update the other datapaths\n for dpid, datapath in self.cache_datapath_by_dpid.iteritems():\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid, ip)\n if out_port is None:\n continue\n out_port_id = \"{}:{}\".format(dpid, out_port)\n lout_port = self.nb_api.get(l2.LogicalPort(id=out_port_id))\n if ip in lout_port.ips:\n continue\n # else add new ip and install flow\n lout_port.ips.append(ip)\n self.nb_api.update(lout_port)\n # install flow\n print \"L3 IP via pubsub: installing flow on {}: out_port: {} src_mac:\" \\\n \" {} dst_mac: {}, ip: {}\".format(datapath.id, out_port, new_src_mac, new_dst_mac, ip)\n self.add_flow_gateway_for_ip(datapath, int(out_port), ip, new_src_mac, new_dst_mac)", "def updateAvailablePorts(self):\n # Build a port list\n device_list_all = comports()\n self.device_choices = list()\n for device in device_list_all:\n self.device_choices.append(device[0])\n\n if len(self.device_choices) < 1:\n tkinter.messagebox.showerror('No Available Serial Ports','No serial ports are available.')", "def process_port_state(self, dp_name, port, state):\n with self._lock:\n device = self._port_device_mapping.setdefault((dp_name, port), DeviceEntry())\n device.port_up = state\n if not state:\n device.assigned = None\n device.vlan = None\n self._send_device_port_event(device)", "def update_ports(self):\n \n # fetch only those ports having\n # VID:PID == a valid (VID, PID) pair in target_vid_pid\n ports = []\n\n for valid_pair in self.target_vid_pid:\n vid_pid = valid_pair[0] + ':' + valid_pair[1]\n ports = ports + [p for p in list_ports.grep(vid_pid)]\n #ports = list_ports.comports()\n \n # add new ports to connected_ports\n # and update new_ports\n new_ports = []\n for p in ports:\n if not p in self.connected_ports:\n self.connected_ports.append(p)\n new_ports.append(p)\n\n # remove missing ports from devices_found\n # and update removed_ports\n removed_ports = []\n for p in self.connected_ports:\n if not p in ports:\n self.connected_ports.remove(p)\n removed_ports.append(p)\n\n return new_ports, removed_ports", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def update_port_postcommit(self, mech_context):\n LOG.debug(\"update_port_postcommit: called\")", "def db_changed(self):\n self.dbstate.db.connect('person-add', self.update)\n self.dbstate.db.connect('person-delete', self.update)\n self.dbstate.db.connect('person-update', self.update)\n self.dbstate.db.connect('family-add', self.update)\n self.dbstate.db.connect('family-delete', self.update)\n self.dbstate.db.connect('family-update', self.update)", "def check_port_connections(self):\n all_ports = crest.get_all_ports(self.model)\n influences_to_target = {p: [] for p in all_ports}\n updates_to_target = {p: [] for p in all_ports}\n actions_to_target = {p: [] for p in all_ports}\n\n # fill data stores\n for inf in crest.get_all_influences(self.model):\n influences_to_target[inf.target].append(inf)\n\n for up in crest.get_all_updates(self.model):\n updates_to_target[up.target].append(up)\n\n for action in crest.get_all_actions(self.model):\n actions_to_target[action.target].append(action)\n\n for port in all_ports:\n assert not (len(influences_to_target[port]) > 0 and (\n len(updates_to_target[port]) > 0 or len(actions_to_target[port]) > 0)\n ), f\"There are [influences and (updates or actions)] writing to port {port._name} (entity: {port._parent._name})\"\n\n assert len(influences_to_target[port]) < 2, f\"There are two influences writing to {port._name}\"\n\n states = [update.state for update in updates_to_target[port]]\n assert len(states) == len(set(states)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple updates linked to the same state\"\n\n transitions = [action.transition for action in actions_to_target[port]]\n assert len(transitions) == len(set(transitions)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple actions linked to the same transition\"", "def servicesChanged(self) -> None:\n ...", "def notify(self, ports):\n if self._state == JobState.PENDING:\n self._process.notify(ports)", "def _port_stats_reply_handler(self, ev):\n body = ev.msg.body\n dpid = ev.msg.datapath.id\n self.stats['port'][dpid] = body\n self.free_bandwidth.setdefault(dpid, {})\n\n for stat in sorted(body, key=attrgetter('port_no')):\n # self.link_loss[dpid][stat.port_no] = [stat.rx_packets,stat.tx_packets]\n port_no = stat.port_no\n if port_no != ofproto_v1_3.OFPP_LOCAL:\n key = (dpid, port_no)\n value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,\n stat.duration_sec, stat.duration_nsec)\n\n self._save_stats(self.port_stats, key, value, 5)\n\n # Get port speed.\n pre = 0\n period = setting.MONITOR_PERIOD\n tmp = self.port_stats[key]\n if len(tmp) > 1:\n pre = tmp[-2][0] + tmp[-2][1]\n period = self._get_period(tmp[-1][3], tmp[-1][4],\n tmp[-2][3], tmp[-2][4])\n\n speed = self._get_speed(\n self.port_stats[key][-1][0] + self.port_stats[key][-1][1],\n pre, period)\n\n self._save_stats(self.port_speed, key, speed, 5)\n self._save_freebandwidth(dpid, port_no, speed)", "def forwarder_state_changed(self, ev):\n\n\n dp = ev.dp\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n\n if ev.enter is True:\n # in plain MAC setup, this should install only ICMP and ARP re-route rules, watchout for hardcoded DP id\n self.on_inner_dp_join(dp)\n\t ##For evry new forwarder we send out discovery ICMP packets out of every port except OFPP_CONTROLLER\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' saying hello to Unifycore Controller, Unifycore warmly welcomes you!')\n for port in dp.ports:\n if port != (ofp.OFPP_CONTROLLER):\n LOG.debug('TOPO MNGR: Controller is sending topology discovery ICMPs to forwarder: ' + str(dp.id) + ', port: ' + str(port))\n _icmp_send(dp,port,DISCOVERY_IP_SRC, DISCOVERY_IP_DST)\n\n ##For evry new forwarder we send out discovery ARP packets out of every port except OFPP_CONTROLLER to find APN\n for apn in APN_POOL:\n if apn.ip_addr != None:\n LOG.debug('TOPO MNGR: Forwarder: '+str(dp.id)+', port: '+ str(port) + ' is looking for APN: ' + str(apn.name) +' at IP: '+str(apn.ip_addr)+' with ARP search source IP: ' + str(apn.arp_origin_ip))\n _arp_send(dp=dp, port_out=port, arp_code=1, ip_target=apn.ip_addr, ip_sender=apn.arp_origin_ip)\n\n\n\n\n\n if ev.enter is False:\n\t ##TODO: We need to scan if any tunnels were affected, and if so, if any PDP COntexts were affected\n ##JUST REMOVING NODE FROM TOPOLOGY ISNT ENOUGH!\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is leaving topology. It was a pleasure for us!')\n topo.del_forwarder(dp.id)", "def connect_callbacks(self):\n self.winch_MIDI_controller.connect_midi_processor(self.main.winch_midi_logic)\n\n self.DMX_controller.connect_callback(self.main.dmx_slider_change)\n self.dmxSelect.callback = self.main.dmx.set_and_open_port\n self.dmxSelect.set_items(self.main.dmx.available_ports())\n\n self.winchMidiInputCombo.callback = self.main.winch_midi_listener.open_MIDI_input\n self.winchMidiInputCombo.set_items(self.main.winch_midi_listener.get_midi_port_names())\n\n self.midiOutputCombo.callback = self.main.midi_sender.open_MIDI_output\n self.midiOutputCombo.set_items(self.main.midi_sender.get_midi_port_names())\n\n self.oscListenerConfig.callback = self.main.osc_listener.set_OSC_port\n self.oscSenderConfig.callback = self.main.osc_sender.set_OSC_port\n\n for winch, selector in zip(self.main.winches, self.winchSelects):\n selector.callback = winch.set_and_open_port\n selector.set_items(winch.available_ports())\n\n return", "def loop(self):\n _logger.info(\"Bus.loop listen imbus on db postgres\")\n # PATCH !!\n with odoo.sql_db.db_connect(_get_imbus_db()).cursor() as cr:\n conn = cr._cnx\n cr.execute(\"listen imbus\")\n cr.commit();\n while True:\n if select.select([conn], [], [], TIMEOUT) == ([], [], []):\n pass\n else:\n conn.poll()\n channels = []\n while conn.notifies:\n channels.extend(json.loads(conn.notifies.pop().payload))\n # dispatch to local threads/greenlets\n events = set()\n for channel in channels:\n events.update(self.channels.pop(hashable(channel), []))\n for event in events:\n event.set()", "def _onConnectionEvent(args):\n ctx = current_context()\n pvname = name(args.chid)\n global _cache\n\n if ctx is None and len(_cache.keys()) > 0:\n ctx = list(_cache.keys())[0]\n if ctx not in _cache:\n _cache[ctx] = {}\n\n # search for PV in any context...\n pv_found = False\n for context in _cache:\n if pvname in _cache[context]:\n pv_found = True\n break\n\n if not pv_found:\n _cache[ctx][pvname] = {'conn':False, 'chid': args.chid,\n 'ts':0, 'failures':0, 'value': None,\n 'callbacks': []}\n\n # set connection time, run connection callbacks\n # in all contexts\n for context, cvals in _cache.items():\n if pvname in cvals:\n entry = cvals[pvname]\n ichid = entry['chid']\n if isinstance(entry['chid'], dbr.chid_t):\n ichid = entry['chid'].value\n\n if int(ichid) == int(args.chid):\n conn = (args.op == dbr.OP_CONN_UP)\n chid = args.chid\n entry.update({'chid': chid, 'conn': conn,\n 'ts': time.time(), 'failures': 0})\n for callback in entry.get('callbacks', []):\n poll()\n if hasattr(callback, '__call__'):\n callback(pvname=pvname, chid=chid, conn=conn)\n return", "def notify_observers(self, new_gamestate) -> None:", "def switch_features_handler(self, event):\n\t\tmsg = event.msg\n\t\tdatapath = msg.datapath\n\n\t\tLOG.info(\"Configuring switch %d...\" % datapath.id)\n\n\t\t\"\"\" Set table 0 as stateful \"\"\"\n\t\treq = bebaparser.OFPExpMsgConfigureStatefulTable(\n\t\t\t\tdatapath=datapath,\n\t\t\t\ttable_id=0,\n\t\t\t\tstateful=1)\n\t\tdatapath.send_msg(req)\n\n\t\t\"\"\" Set lookup extractor = {eth_dst} \"\"\"\n\t\treq = bebaparser.OFPExpMsgKeyExtract(datapath=datapath,\n\t\t\t\tcommand=bebaproto.OFPSC_EXP_SET_L_EXTRACTOR,\n\t\t\t\tfields=[ofproto.OXM_OF_ETH_DST],\n\t\t\t\ttable_id=0)\n\t\tdatapath.send_msg(req)\n\n\t\t\"\"\" Set update extractor = {eth_dst} \"\"\"\n\t\treq = bebaparser.OFPExpMsgKeyExtract(datapath=datapath,\n\t\t\t\tcommand=bebaproto.OFPSC_EXP_SET_U_EXTRACTOR,\n\t\t\t\tfields=[ofproto.OXM_OF_ETH_DST],\n\t\t\t\ttable_id=0)\n\t\tdatapath.send_msg(req)\n\n\t\t###########################################################################################\n\n\t\t\"\"\" Set GDV[2]=4 \"\"\"\n\t\treq = bebaparser.OFPExpMsgsSetGlobalDataVariable(\n\t\t\t\tdatapath=datapath,\n\t\t\t\ttable_id=0,\n\t\t\t\tglobal_data_variable_id=2,\n\t\t\t\tvalue=4\n\t\t\t)\n\t\tdatapath.send_msg(req)\n\n\t\t\"\"\" Set condition 5: FDV[0] >= GDV[2] (i.e. counter >= 4) \"\"\"\n\t\treq = bebaparser.OFPExpMsgSetCondition(\n\t\t\t\tdatapath=datapath,\n\t\t\t\ttable_id=0,\n\t\t\t\tcondition_id=5,\n\t\t\t\tcondition=bebaproto.CONDITION_GTE,\n\t\t\t\toperand_1_fd_id=0,\n\t\t\t\toperand_2_gd_id=2\n\t\t\t)\n\t\tdatapath.send_msg(req)\n\n\t\t\"\"\" If counter <4 then forward() & Update function: FDV[0] = FDV[0]+1 (i.e. counter = counter+1) \"\"\"\n\t\tmatch = ofparser.OFPMatch(condition5=0)\n\t\tactions = [ofparser.OFPActionOutput(ofproto.OFPP_FLOOD),\n\t\tbebaparser.OFPExpActionSetDataVariable(table_id=0, opcode=bebaproto.OPCODE_SUM, output_fd_id=0, operand_1_fd_id=0, operand_2_cost=1)]\n\t\tself.add_flow(datapath=datapath,\n\t\t\t\ttable_id=0,\n\t\t\t\tpriority=0,\n\t\t\t\tmatch=match,\n\t\t\t\tactions=actions)\n\n\t\t\"\"\" If counter >=4 then drop() \"\"\"\n\t\tmatch = ofparser.OFPMatch(condition5=1)\n\t\tactions = []\n\t\tself.add_flow(datapath=datapath,\n\t\t\t\ttable_id=0,\n\t\t\t\tpriority=0,\n\t\t\t\tmatch=match,\n\t\t\t\tactions=actions)\n\n\t\t\"\"\"\n\t\t$ sudo mn --topo single,4 --switch user --controller remote --mac --arp\n\t\tmininet> h1 ping h2 -c10\n\t\tIt should drop all the packets from the 5-th \n\t\t\"\"\"", "def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)", "def pull_port(self, port, update_fields=None):\n import_time = timezone.now()\n imported_port = self.import_port(\n port.vm.backend_id, port.backend_id, save=False\n )\n\n port.refresh_from_db()\n if port.modified < import_time:\n if not update_fields:\n update_fields = models.Port.get_backend_fields()\n\n update_pulled_fields(port, imported_port, update_fields)", "def listen(DSN, channels):\n\n\n\n for channel in channels:\n\n curs.execute(\"LISTEN %s\" % (channel))\n\n \n print \"Waiting for notifications on channel '%s'\" % (channel)\n\n while True:\n if select.select([conn], [], [], 10) == ([], [], []):\n # heartbeat here\n heartbeat()\n \n else:\n \n heartbeat()\n \n conn.poll()\n while conn.notifies:\n notify = conn.notifies.pop(0) #default -1\n #curs.execute(\"notify test,'abc'\")\n #print notify.pid\n #dispatch(notify)\n gevent.spawn(dispatch, notify)\n #gevent.sleep(0.1)", "def device_update_callback(runtime, dname, sname, _):\r\n # evaluate policies and notify the django server\r\n # use a separate thread so that it won't block caller\r\n th = Thread(target=evaluate_policies_and_notify, args=(runtime, dname, sname))\r\n th.start()", "def get_all_port(self, conf, dpid):\n\t\tpass", "def process_port_state(self, dp_name, port, state):\n self._servicer.process_port_state(dp_name, port, state)", "def update_port_postcommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n if port_context.original_binding_levels is None:\n prev_bind = None\n else:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n self.communicator.kick()\n elif (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n self.communicator.kick()", "def send_on_output_port_change(self):\n return self._send_on_output_port_change", "def on_events_updated(self, updates, original):\n\n event = deepcopy(original)\n event.update(updates)\n plannings = list(get_resource_service('events').get_plannings_for_event(event))\n\n if not plannings:\n # If this Event has no associated Planning items\n # then there is no need to send notifications\n return\n\n changed_fields = []\n\n for field in ['location', 'event_contact_info', 'files', 'links']:\n if (updates.get(field) or []) != (original.get(field) or []):\n changed_fields.append(field)\n\n if not changed_fields:\n # If no relevant Event fields have changed\n # then there is no need to send notifications\n return\n\n # Add 'assigned_to' details to all the coverages\n get_resource_service('planning').generate_related_assignments(plannings)\n\n for planning in plannings:\n for coverage in planning.get('coverages') or []:\n assigned_to = coverage.get('assigned_to') or {}\n\n slugline = (coverage.get('planning') or {}).get('slugline') or ''\n coverage_type = (coverage.get('planning') or {}).get('g2_content_type') or ''\n\n PlanningNotifications().notify_assignment(\n coverage_status=(coverage.get('assigned_to') or {}).get('state'),\n target_user=assigned_to.get('user'),\n target_desk=assigned_to.get('desk') if not assigned_to.get('user') else None,\n message='assignment_event_metadata_msg',\n slugline=slugline,\n coverage_type=get_coverage_type_name(coverage_type),\n event=event,\n client_url=app.config['CLIENT_URL'],\n no_email=True,\n contact_id=assigned_to.get('contact')\n )", "def send_on_input_port_change(self):\n return self._send_on_input_port_change", "def treat_devices_added(self, devices):\n LOG.info(_(\"treat_devices_added %s\"), devices)\n resync = False\n self.prepare_devices_filter(devices)\n for device in devices:\n LOG.debug(_(\"Port %s added\"), device)\n try:\n details = self.plugin_rpc.get_device_details(self.context,\n device,\n self.agent_id)\n except Exception as e:\n LOG.debug(_(\"Unable to get port details for \"\n \"%(device)s: %(e)s\"),\n {'device': device, 'e': e})\n resync = True\n continue\n if 'port_id' in details:\n LOG.info(_(\"Port %(device)s updated. Details: %(details)s\"),\n {'device': device, 'details': details})\n # If a device has been added but it's not active, don't\n # do anything with it. We'll add it later. Otherwise, configure\n # it.\n if details['admin_state_up']:\n # create the networking for the port\n network_type = details.get('network_type')\n if network_type:\n segmentation_id = details.get('segmentation_id')\n else:\n # compatibility with pre-Havana RPC vlan_id encoding\n vlan_id = details.get('vlan_id')\n (network_type,\n segmentation_id) = lconst.interpret_vlan_id(vlan_id)\n\n if self.routing_mgr.add_interface(details['network_id'],\n network_type,\n details['physical_network'],\n segmentation_id,\n details['port_id'],\n details['fixed_ips'],\n details['mac_address']):\n\n # update plugin about port status\n resp = self.plugin_rpc.update_device_up(self.context,\n device,\n self.agent_id,\n cfg.CONF.host)\n else:\n resp = self.plugin_rpc.update_device_down(self.context,\n device,\n self.agent_id,\n cfg.CONF.host)\n LOG.info(_(\"Update device response: %s\"), resp)\n else:\n LOG.info(_(\"Device %s not defined on plugin\"), device)\n return resync", "def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)", "def update(self, events):\n events = events", "def inform_listeners(self):\n d = self.get_all_sorted()\n for listener in self.listeners:\n listener.stream_updated(d)", "def modify_ports(self, ports, **kwargs):\n pass", "def notifyObservers(self):", "def updateport(self,event=None):\n self.messages.log('Looking for focuser on '+str(self.port.get())+'...')\n if self.rf is not None and self.rf.ready:\n self.rf.closeconn()\n self.rf = rfserial.RFSerial(str(self.port.get()))\n if self.rf is not None and self.rf.ready:\n self.messages.log('Connected to focuser on '+str(self.port.get()))\n self.positiontext.set(str(self.rf.querypos()))\n pow = self.rf.queryrempow()\n self.pow[0].set(int(pow[0]))\n self.pow[1].set(int(pow[1]))\n self.pow[2].set(int(pow[2]))\n self.pow[3].set(int(pow[3]))\n else:\n self.messages.log(\"Can't connect to focuser on \"+str(self.port.get()))\n self.positiontext.set('Not Connected')", "def probe_ports( self, ):\r\n ports = self.com_driver.list_available()\r\n self.gui.print_info_string( \"\" )\r\n self.gui.print_info_string( \"Reported Ports from driver:\" )\r\n self.gui.print_info_string( \"\" )\r\n if len( ports ) == 0:\r\n self.gui.print_info_string( \"None \\n\" )\r\n else:\r\n for i_port in ports:\r\n self.gui.print_info_string( i_port[0] )\r\n #self.gui.print_info_string( \"\\n\" )\r\n\r\n self.close_driver()\r\n\r\n self.gui.print_info_string( \"\\nProbe Ports from parameters:\\n\" )\r\n ports = self.com_driver.probe_available( self.parameters.port_list )\r\n ix_line = 0 # what is this ??\r\n for i_port in ports:\r\n ix_line += 1\r\n self.gui.print_info_string( str( i_port ) )\r\n if ix_line == 10:\r\n ix_line = 0\r\n self.gui.print_info_string( \"\\n\" )\r\n #logger.log( fll, a_str )\r\n\r\n return", "def state_processing_do(cfg, app, win, events):", "def portconfig():\r\n print('''\\n%s at %s acting as user %s\r\n\\nPort Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current port configuration\r\n 2 - Get current port status\r\n 3 - Get current port counters\r\n 4 - Get SFP status\r\n 5 - Change Port Configuration\r\n 6 - Shut Down or Activate Port\r\n 7 - Reset Port Counters\r\n 8 - Back\r\n 9 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n portconfig()\r\n execute = {1: PACKETMASTER.port_config,\r\n 2: PACKETMASTER.port_info,\r\n 3: PACKETMASTER.port_statistics,\r\n 4: PACKETMASTER.sfp_info,\r\n 5: PACKETMASTER.set_port_config_guided,\r\n 6: PACKETMASTER.port_on_off_guided,\r\n 7: PACKETMASTER.reset_port_counters,\r\n 8: hardwareconfig,\r\n 9: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n portconfig()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n portconfig()", "def save_io_ports(self, *args):\n if args[0] == 'Save':\n title = args[1].title\n text = args[1].text_field.text\n try:\n port = int(text, 16)\n if port < 0 or port > 4095:\n toast('Invalid port number. Valid port numbers [0-4095]')\n else:\n if is_valid_port(port):\n hex_port = convert_to_hex(port, 12)\n if TRAFFIC_LIGHT['menu_title'] in title:\n update_reserved_ports(TRAFFIC_LIGHT,\n TRAFFIC_LIGHT['port'],\n hex_port)\n self.traffic_lights.text = TRAFFIC_LIGHT['menu_title'] + '. Current Port: ' + str(\n TRAFFIC_LIGHT['port'])\n toast_message = f'Changed Traffic Light I/O port number to {port}'\n elif SEVEN_SEGMENT_DISPLAY['menu_title'] in title:\n update_reserved_ports(SEVEN_SEGMENT_DISPLAY,\n SEVEN_SEGMENT_DISPLAY['port'],\n hex_port)\n self.seven_segment.text = SEVEN_SEGMENT_DISPLAY['menu_title'] + '. Current Port: ' + str(\n SEVEN_SEGMENT_DISPLAY['port'])\n toast_message = f'Changed Seven Segment I/O port number to {port}'\n elif ASCII_TABLE['menu_title'] in title:\n if port > 4088:\n toast_message = 'Invalid port for ASCII Table. Valid ports [0-4088]'\n else:\n try:\n update_reserved_ports(ASCII_TABLE,\n ASCII_TABLE['port'],\n hex_port, True)\n self.ascii_table.text = ASCII_TABLE['menu_title'] + '. Current Port: ' + str(\n ASCII_TABLE['port'])\n toast_message = f'Changed ASCII Table I/O port number to {port}'\n except MemoryError as e:\n toast_message = str(e)\n else:\n update_reserved_ports(HEX_KEYBOARD,\n HEX_KEYBOARD['port'],\n hex_port)\n self.hex_keyboard.text = HEX_KEYBOARD['menu_title'] + '. Current Port: ' + str(\n HEX_KEYBOARD['port'])\n toast_message = f'Changed HEX Keyboard I/O port number to {port}'\n toast(toast_message)\n else:\n toast('Invalid input. That port is reserved!')\n except ValueError as e:\n toast(f'Not a valid port!')", "def on_event(self, c, e):\n\n # any updates available?\n if EVENT_MCX_UPDATES_AVAILABLE.isSet():\n # gather all updates\n availableUpdates = mcxUpdateThread.getUpdates()\n # for each update group\n for updateGroupId, updateMessageList in availableUpdates.iteritems():\n # and each message within each group\n for updateMessage in updateMessageList:\n # send the message\n self.connection.privmsg(self.channel, updateMessage)\n\n # after all reset event\n EVENT_MCX_UPDATES_AVAILABLE.clear()", "def devices_changed_trigger(result):\n\n # Error handling\n error = extract_error_from(result)\n if error:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Devices changed trigger got error result: {}\".format(result))\n return\n\n ctx = __context__.setdefault(\"usbutil.devices_changed_trigger\", {})\n\n previous_devices = ctx.get(\"devices\", [])\n current_devices = result[\"values\"]\n\n for dev in previous_devices:\n if dev not in current_devices:\n # Device disconnected\n tag = \"system/usb/{}/{}/disconnected\".format(dev[\"vendor\"], dev[\"product\"])\n __salt__[\"minionutil.trigger_event\"](tag, data={\n \"bus\": dev[\"bus\"],\n \"device\": dev[\"device\"],\n \"name\": dev[\"name\"],\n })\n\n for dev in current_devices:\n if dev not in previous_devices:\n # Device connected\n tag = \"system/usb/{}/{}/connected\".format(dev[\"vendor\"], dev[\"product\"])\n __salt__[\"minionutil.trigger_event\"](tag, data={\n \"bus\": dev[\"bus\"],\n \"device\": dev[\"device\"],\n \"name\": dev[\"name\"],\n })\n\n # Update state\n ctx[\"devices\"] = result[\"values\"]", "def enable_cable_ports(cid):\n\n SQL.execute('''\n SELECT \n guid,\n port\n FROM \n cable_ports \n WHERE\n cid = ?\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall(): \n if not DISABLE_PORT_STATE_CHANGE: \n ib_mgt.enable_port(int(row['guid']), int(row['port']))", "def update_listeners(self, packet):\n\t\tpacket.module_id = self.id\n\t\tself._notify_listeners(packet)", "def handle_updates(self, update):\r\n self.__manage_pump()", "def refresh(self):\n self.ports = list(serial.tools.list_ports.comports())", "def _process_trunk_subport_bindings(self, context, trunk, port_ids):\n updated_ports = []\n trunk_port_id = trunk.port_id\n trunk_port = self.core_plugin.get_port(context, trunk_port_id)\n trunk_host = trunk_port.get(portbindings.HOST_ID)\n migrating_to_host = trunk_port.get(\n portbindings.PROFILE, {}).get('migrating_to')\n if migrating_to_host and trunk_host != migrating_to_host:\n # Trunk is migrating now, so lets update host of the subports\n # to the new host already\n trunk_host = migrating_to_host\n\n # NOTE(status_police) Set the trunk in BUILD state before\n # processing subport bindings. The trunk will stay in BUILD\n # state until an attempt has been made to bind all subports\n # passed here and the agent acknowledges the operation was\n # successful.\n self._safe_update_trunk(\n trunk, status=trunk_consts.TRUNK_BUILD_STATUS)\n\n for port_id in port_ids:\n try:\n updated_port = self._handle_port_binding(context, port_id,\n trunk, trunk_host)\n # NOTE(fitoduarte): consider trimming down the content\n # of the port data structure.\n updated_ports.append(updated_port)\n except trunk_exc.SubPortBindingError as e:\n LOG.error(\"Failed to bind subport: %s\", e)\n\n # NOTE(status_police) The subport binding has failed in a\n # manner in which we cannot proceed and the user must take\n # action to bring the trunk back to a sane state.\n self._safe_update_trunk(\n trunk, status=trunk_consts.TRUNK_ERROR_STATUS)\n return []\n except Exception as e:\n msg = (\"Failed to bind subport port %(port)s on trunk \"\n \"%(trunk)s: %(exc)s\")\n LOG.error(msg, {'port': port_id, 'trunk': trunk.id, 'exc': e})\n\n if len(port_ids) != len(updated_ports):\n self._safe_update_trunk(\n trunk, status=trunk_consts.TRUNK_DEGRADED_STATUS)\n\n return updated_ports", "async def _async_process_ports(self, ports: list[ListPortInfo]) -> None:\n for port in ports:\n if port.vid is None and port.pid is None:\n continue\n await self._async_process_discovered_usb_device(usb_device_from_port(port))", "def set_port_pullups(self, port, value):\n if port == 1:\n self.__port_b_pullup = value\n self.__bus.write_byte_data(self.__ioaddress, self.GPPUB, value)\n else:\n self.__port_a_pullup = value\n self.__bus.write_byte_data(self.__ioaddress, self.GPPUA, value)\n return", "async def _notifyUpdate(self):\n for observer in self.__observers:\n await observer.updateSSE()", "def update_port(self, context, port_id, port):\n LOG.debug(_(\"NeutronRestProxyV2: update_port() called\"))\n\n self._warn_on_state_status(port['port'])\n\n # Validate Args\n orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)\n with context.session.begin(subtransactions=True):\n # Update DB\n new_port = super(NeutronRestProxyV2,\n self).update_port(context, port_id, port)\n self._update_extra_dhcp_opts_on_port(context, port_id, port,\n new_port)\n if (portbindings.HOST_ID in port['port']\n and 'id' in new_port):\n host_id = port['port'][portbindings.HOST_ID]\n porttracker_db.put_port_hostid(context, new_port['id'],\n host_id)\n new_port = self._extend_port_dict_binding(context, new_port)\n\n # update on networl ctrl\n mapped_port = self._map_state_and_status(new_port)\n self.servers.rest_update_port(orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n mapped_port, port_id)\n\n if (new_port.get(\"device_id\") != orig_port.get(\"device_id\") and\n orig_port.get(\"device_id\")):\n try:\n self.servers.rest_unplug_interface(orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n orig_port[\"id\"])\n device_id = new_port.get(\"device_id\")\n if device_id:\n self.rest_plug_interface(new_port[\"tenant_id\"],\n new_port[\"network_id\"],\n new_port, device_id)\n\n except RemoteRestError:\n with excutils.save_and_reraise_exception():\n port_update = {\"port\": {\"status\": \"ERROR\"}}\n super(NeutronRestProxyV2, self).update_port(\n context,\n new_port[\"id\"],\n port_update\n )\n\n # return new_port\n return new_port", "def on_dts_state_change(self, state):\n \n switch = {\n rwdts.State.CONFIG: rwdts.State.INIT,\n rwdts.State.INIT: rwdts.State.REGN_COMPLETE,\n rwdts.State.REGN_COMPLETE: rwdts.State.RUN,\n }\n\n handlers = {\n rwdts.State.INIT: self.init,\n rwdts.State.RUN: self.run,\n }\n\n # Transition application to next state\n handler = handlers.get(state, None)\n if handler is not None:\n yield from handler()\n\n # Transition dts to next state\n next_state = switch.get(state, None)\n self.log.info(\"DTS transition from {} -> {}\".format(state, next_state))\n\n if next_state is not None:\n self._dts.handle.set_state(next_state)", "def _update_port_config(port_config):\n\n # Update network config for port node\n _update_network_config(port_config)\n\n # Update network fixed ips config\n _update_fixed_ips_config(port_config)\n\n # Update security groups config for port node\n _update_security_groups_config(port_config)", "def modify_rstp_ports(self, ports, **kwargs):\n pass", "def update_state(self):\n for listener in self.listeners:\n listener['callback']()", "def before_update(self, introspection_data, node_info, **kwargs):\n inventory = utils.get_inventory(introspection_data)\n\n ironic_ports = node_info.ports()\n\n for iface in inventory['interfaces']:\n if iface['name'] not in introspection_data['all_interfaces']:\n continue\n\n mac_address = iface['mac_address']\n port = ironic_ports.get(mac_address)\n if not port:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, matching port not found in Ironic.\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n # Determine the physical network for this port.\n # Port not touched in here.\n physnet = self.get_physnet(port, iface['name'], introspection_data)\n if physnet is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no physical network mapping\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n patch = self._get_physnet_patch(physnet, port)\n if patch is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no update required\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n try:\n node_info.patch_port(port, [patch])\n except exceptions.BadRequestException as e:\n LOG.warning(\"Failed to update port %(uuid)s: %(error)s\",\n {'uuid': port.id, 'error': e},\n node_info=node_info)", "def process_port_learn(self, dp_name, port, mac, vlan):\n with self._lock:\n device = self._port_device_mapping.setdefault((dp_name, port), DeviceEntry())\n device.mac = mac\n device.vlan = vlan\n device.port_up = True\n device.assigned = self._mac_assignments.get(mac)\n self._send_device_port_event(device)", "def setup_logical_port_connectivity(self, context, port_db):\n pass", "def list_ports(state):\n\tstate.report()", "def update_port_precommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n # unbind port from old host, if already bound\n if port_context.original_binding_levels is not None:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME and\n port_context.host != port_context.original_host):\n\n # Note that we skip this step if the change happens while\n # 'unbinding' and rebinding to the same host - it's probably\n # an update of extraneous detail and not really a request\n # that requires binding.\n\n self.communicator.unbind(port_context._plugin_context.session,\n port_context.original,\n port_context.original_host,\n prev_bind[api.BOUND_SEGMENT]\n )\n\n # (Re)bind port to the new host, if it needs to be bound\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n\n binding_type = self.get_vif_type(port_context)\n # Remove port membership from any previously associated\n # security groups for updating remote_security_group_id ACLs\n self.communicator.unbind_port_from_remote_groups(\n port_context._plugin_context.session,\n port_context.original,\n port_context.current)\n\n self.communicator.bind(port_context._plugin_context.session,\n port_context.current,\n current_bind[api.BOUND_SEGMENT],\n port_context.host,\n binding_type)\n\n # TODO(ijW): The agent driver checks for a change of\n # host, but we're oddly seeing that the orig_host is\n # always set. Should confirm if this is a problem or\n # not.\n self._insert_provisioning_block(port_context)", "def __update_observers(self):\n for observer in self.__observers: \n # print(\"hello\")\n observer.update(self)", "def db2_remote_controller(self, update_rate=10, recv_buf_size=5):\n svrsock, clnsock, clnaddr = init_connection(self.ip_addr, self.port)\n self.conn_flag = 1\n #if input('PRESS ENTER TO START REMOTE...\\nor input \"e\" to exit\\n') is 'e':\n # exit(0)\n kb_monitor = KeyboardListen(sock=clnsock)\n recv_maintainer = RecvMaintainer(sock=clnsock, buf_size=recv_buf_size, end_indicator='#')\n keyboard.hook(kb_monitor.kbaction_callback)\n threading.Thread(target=recv_maintainer.recv_maintain).start()\n while True:\n if recv_maintainer.latest_state_data() is None:\n continue\n self.poll_height = recv_maintainer.latest_state_data()[0]\n self.park_state = recv_maintainer.latest_state_data()[1]\n self.battery = recv_maintainer.latest_state_data()[2]\n self.left_encoder = recv_maintainer.latest_state_data()[3]\n self.right_encoder = recv_maintainer.latest_state_data()[4]\n print('STATE INFO:')\n print(' Poll Height:', self.poll_height)\n print(' Park State:', self.park_state)\n print(' Battary:', self.battery)\n print(' Left Encoder:', self.left_encoder)\n print('Right Encoder:', self.right_encoder)\n\n time.sleep(1 / update_rate)\n os.system('clear')", "def old_changes(self):\n from couchdbkit import Consumer\n\n c = Consumer(self.couch_db, backend='gevent')\n while True:\n try:\n c.wait(self.parsing_processor, since=self.since, filter=self.couch_filter,\n heartbeat=WAIT_HEARTBEAT, feed='continuous', timeout=30000, **self.extra_args)\n except Exception, ex:\n pillow_logging.exception(\"Exception in form listener: %s, sleeping and restarting\" % ex)\n gevent.sleep(RETRY_INTERVAL)", "def activate(self, ext_ip, ext_port):\n self.sql_manager.port_update(self.id, external_ip=ext_ip, external_port=ext_port)\n self.external_port = ext_port\n self.external_ip = ext_ip", "def test_port_status_matchfb303(self):\n for _pnum, pstate in self.client.getAllPortInfo().items():\n self.assertEqual(pstate.operState,\n self.client.getCounter(\"%s.up\" % pstate.name))", "def update(clients, context, name=None):\n port_id = context['port_id']\n logger.info(\"Taking action port.update {}.\".format(port_id))\n neutron = clients.get_neutron()\n body = {'port': {}}\n if name is not None:\n body['port']['name'] = name\n neutron.update_port(port_id, body=body)", "def startListening(self):\n mgr = self.cxn.manager\n # example of Signal processing:\n # server = self.cxn[self.selectedADR]\n # update_state = lambda c, payload: self.updateInterface()\n # yield server.signal_state_changed(self.ID)\n # yield server.addListener(listener = update_state, source=None,ID=self.ID)\n\n # state update (only if the message is from the correct ADR server)\n update_state = lambda c, (s,payload): self.updateInterface() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(update_state, source=mgr.ID, ID=101)\n yield mgr.subscribe_to_named_message('State Changed', 101, True)\n # log update\n update_log = lambda c, (s,(t,m,a)): self.updateLog(t,m,a) \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(update_log, source=mgr.ID, ID=102)\n yield mgr.subscribe_to_named_message('Log Changed', 102, True)\n # magging up stopped\n mag_stop = lambda c, (s,payload): self.magUpStopped() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(mag_stop, source=mgr.ID, ID=103)\n yield mgr.subscribe_to_named_message('MagUp Stopped', 103, True)\n # regulation stopped\n reg_stop = lambda c, (s,payload): self.regulationStopped() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(reg_stop, source=mgr.ID, ID=104)\n yield mgr.subscribe_to_named_message('Regulation Stopped', 104, True)\n # magging up started\n mag_start = lambda c, (s,payload): self.magUpStarted() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(mag_start, source=mgr.ID, ID=105)\n yield mgr.subscribe_to_named_message('MagUp Started', 105, True)\n # regulation started\n reg_start = lambda c, (s,payload): self.regulationStarted() \\\n if self.correctServer(s) else -1\n self.cxn._cxn.addListener(reg_start, source=mgr.ID, ID=106)\n yield mgr.subscribe_to_named_message('Regulation Started', 106, True)\n # servers starting and stopping\n serv_conn_func = lambda c, (sID, sName): self.serverChanged(sName)\n serv_disconn_func = lambda c, (sID, sName): self.serverChanged(sName)\n self.cxn._cxn.addListener(serv_conn_func, source=mgr.ID, ID=107)\n self.cxn._cxn.addListener(serv_disconn_func, source=mgr.ID, ID=108)\n yield mgr.subscribe_to_named_message('Server Connect', 107, True)\n yield mgr.subscribe_to_named_message('Server Disconnect', 108, True)", "def _processDeviceUpdated(self, action: UpdateAppliedAction) -> List[Tuple]:\n session = self._dbSessionCreator()\n try:\n deviceInfo = (\n session.query(DeviceInfoTuple)\n .filter(DeviceInfoTuple.deviceId == action.deviceId)\n .one()\n )\n\n deviceId = deviceInfo.deviceId\n\n if action.appVersion is not None:\n deviceInfo.appVersion = action.appVersion\n\n if action.updateVersion is not None:\n deviceInfo.updateVersion = action.updateVersion\n\n session.commit()\n\n self._notifierController.notifyDeviceInfo(deviceId=deviceId)\n\n return []\n\n finally:\n session.close()", "def or_conn_status_event(self, event):\r\n pass", "def OnUpdate(self, event):\n # Check remote - TODO\n # Query database for status of processing\n # 2018-04-11 13:25:56.914000\n self.controller.checkRemote()\n seriesprocesses = self.controller.db.getActiveProcesses()\n self.m_dataViewListCtrlCloud.DeleteAllItems()\n for series in seriesprocesses:\n # time delta\n t1 = datetime.datetime.strptime(series[4], '%Y-%m-%d %H:%M:%S.%f')\n if series[5] is not None:\n t2 = datetime.datetime.strptime(series[5], '%Y-%m-%d %H:%M:%S.%f')\n else:\n t2 = datetime.datetime.now()\n tdiff = t2 - t1\n # Load to window\n self.m_dataViewListCtrlCloud.AppendItem(\n [False, series[0], series[1], series[2].upper(), self.getStatus(series[3]), str(tdiff)])", "def modify_mstp_ports(self, ports, instance=0, **kwargs):\n pass", "def _set_server_event(servers):\n if not servers:\n return\n\n event = Variable.get_event(with_server=False)\n\n for server in servers:\n server.event = event if event and event['server_ip_and_port'] and event['server_ip_and_port'] == server.ip_and_port else None", "def _set_up_change_notifier(conn, table: str, actions: Set[str]):\n\n # build function to create in the database\n channel = f\"{table}_table_change\"\n func_name = f\"notify_{table}_change()\"\n func = f\"\"\"\n CREATE OR REPLACE FUNCTION {func_name}\n RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('{channel}','changed');\n RETURN NULL;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\"\n\n # build triggers that will run func on each action\n triggers = \"\"\n for action in actions:\n if action.upper() in ServerSockets.DbActions:\n trigger_name = f\"{table}_notify_{action.lower()}\"\n\n triggers += f\"\"\"\n DROP TRIGGER IF EXISTS {trigger_name} ON {table};\n CREATE TRIGGER {trigger_name}\n AFTER {action} ON {table}\n FOR EACH ROW EXECUTE PROCEDURE {func_name};\n \"\"\"\n else:\n raise TypeError(\n \"All actions must be either INSERT, UPDATE or DELETE\")\n\n # insert function and respective triggers into the database\n cur = conn.cursor()\n cur.execute(func)\n if triggers:\n cur.execute(triggers)\n return channel", "def update(self):\n self.events.update()", "def update_port_precommit(self, mech_context):\n LOG.debug(\"update_port_precommit(self: called\")", "def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def update_subport_bindings(self, context, subports):\n el = context.elevated()\n ports_by_trunk_id = collections.defaultdict(list)\n updated_ports = collections.defaultdict(list)\n\n for s in subports:\n ports_by_trunk_id[s['trunk_id']].append(s['port_id'])\n for trunk_id, subport_ids in ports_by_trunk_id.items():\n trunk = trunk_objects.Trunk.get_object(el, id=trunk_id)\n if not trunk:\n LOG.debug(\"Trunk not found. id: %s\", trunk_id)\n continue\n\n trunk_updated_ports = self._process_trunk_subport_bindings(\n el, trunk, subport_ids)\n updated_ports[trunk.id].extend(trunk_updated_ports)\n\n return updated_ports", "def update_GPIB(self):\r\n self.check_errors(False, False)\r\n #self.I_source_list = [\r\n # x for x in self.resources\r\n # if (str(self.GPIB.value()) and 'GPIB') in x\r\n # ]\r\n\r\n if not self.errors_exist:\r\n self.I_source = self.rm.open_resource(self.I_source_list[0])\r\n self.I_source.write('*RST; OUTP:RESP SLOW')\r\n self.connected = bool(self.I_source) and self.V_meter_connected\r\n if self.connected:\r\n self.update_source_range_type()\r\n self.update_source_range()\r\n self.update_volt_range()\r\n self.set_compliance_abort()\r\n self.in_buffer = int(self.I_source.query(\"TRAC:POIN:ACT?\"))", "def update_port_ip(self, dpid, port, ip):\n # TODO Connection between mac and ip of host?\n if self.nb_api is None:\n self.nb_api = api_nb.NbApi.get_instance(False)\n\n port_id = \"{}:{}\".format(dpid, port)\n try:\n lport = self.nb_api.get(l2.LogicalPort(id=port_id))\n for ip_addr_obj in lport.ips:\n if str(ip_addr_obj) == ip:\n # already learned\n return\n lport.ips.append(ip)\n self.nb_api.update(lport)\n # TODO: Remove old ips\n except DBKeyNotFound:\n # TODO: Create Port?\n print \"Key not Found!!\"", "def configure_dcbx_app(self, ports, **kwargs):\n pass", "def connect_signals(self):\n # row selection\n selectionModel = self.itemView.selectionModel()\n selectionModel.selectionChanged.connect(self.selection_changed)\n # pause action\n self.pauseBtn.toggled.connect(self.__toggle_pause) \n # open prefs action\n self.prefBtn.clicked.connect(_open_prefs)\n # mode action\n self.modeComboBox.currentTextChanged.connect(self.__set_mode) \n # filter action\n self.filterMenu.triggered.connect(self.__trigger_filter)\n # update actions\n self.act_forceUpdateAll.triggered.connect(localization.forceUpdateAll)\n #self.act_forceUpdateSelectedNodes.triggered.connect(localization.forceUpdateSelectedNodes)\n self.act_forceUpdateSelectedNodes.triggered.connect(self.__force_update_selected)\n self.act_forceUpdateOnDemand .triggered.connect(localization.forceUpdateOnDemand)\n\n # do the signals coming from Nuke\n if IN_NUKE:\n # ugly way to find the action since the hiero module is not available in time for\n # a docked panel in the startup workspace\n for act in nuke.menu('Nuke').findItem('Cache/Localization').action().menu().actions():\n if act.objectName() == 'foundry.project.localcachetoggleNuke':\n pause_action = act\n break\n else:\n pause_action = hiero.ui.findMenuAction('foundry.project.localcachetoggle')\n\n # connect signals for localization state and pause so they can stay in sync\n pause_action.toggled.connect(self.__update_pause_btn)\n nuke.localizationPanelSignals.modeChanged.connect(self.modeComboBox.setCurrentText)\n nuke.localizationPanelSignals.inputUpdated.connect(self.model.knob_cb_updates_row)\n # connect signals for colour preferences so changes are live\n nuke.localizationPanelSignals.new_color_values.connect(self.itemView.update_colours_from_prefs)\n # update panel UI to reflect current state\n self.modeComboBox.setCurrentText(localization.mode().capitalize())\n self.__update_pause_btn(localization.isLocalizationPaused())\n # auto scroll\n self.model.localizing_index.connect(self.__scroll_view)\n \n if IN_NUKE:\n # If we are in Nuke we can run the callbacks now because Nuke instantiates the panel on the fly\n # which means the required objects we want to connect are available.\n # In Hiero this is not run until a project has been loaded or a new one created.\n self.add_callbacks()", "def _notify_observers(self):\n for observer in self.observers:\n observer.notify(self.game_state)", "def on_connect(client, userdata, flags, rcdata):\n client.subscribe(\"diy/system/fire\", 1)\n client.subscribe(\"diy/system/panic\", 1)\n client.subscribe(\"diy/system/who\", 1)", "def fusion_api_edit_interconnect_ports(self, body, uri, api=None, param='', headers=None):\n param = '/update-ports%s' % param\n return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)", "def _on_config_changed(self, _):\n self._configure_pod()", "def ports(self, ports):\n\n self._ports = ports", "def _update_handler(self, state):\n self._schedule_remaining_events()", "def make_connections(self):\n try:\n self.datatype.currentIndexChanged.connect(self.set_datatype)\n self.dyad.valueChanged.connect(self.set_dyad)#\n self.vid_or_channel.valueChanged.connect(self.set_channel_or_vid)\n except Exception as e:\n QMessageBox.about(self, str(e))" ]
[ "0.62168676", "0.6123209", "0.60724276", "0.60613334", "0.60579395", "0.6057897", "0.60528636", "0.59052724", "0.58132577", "0.5790811", "0.5715928", "0.5668344", "0.56622344", "0.5567299", "0.55424833", "0.54548216", "0.5438808", "0.5418422", "0.5405286", "0.5364919", "0.5348141", "0.5343965", "0.530156", "0.53005517", "0.5288755", "0.5283231", "0.5280578", "0.52697295", "0.52376074", "0.5187858", "0.51771677", "0.51584345", "0.51573783", "0.5153079", "0.5152539", "0.5137488", "0.51226956", "0.5120204", "0.5120011", "0.5118486", "0.51035494", "0.50778604", "0.50555724", "0.50455064", "0.5041773", "0.5024394", "0.50108564", "0.50092286", "0.5006514", "0.50021404", "0.49932173", "0.49924502", "0.49873984", "0.49785027", "0.4977464", "0.49772465", "0.496331", "0.49560812", "0.49395978", "0.49323404", "0.49301916", "0.49141657", "0.49101132", "0.4909387", "0.49000856", "0.48995557", "0.48956323", "0.4882528", "0.48769084", "0.48695776", "0.48664784", "0.4861033", "0.48544037", "0.4854346", "0.48487645", "0.48385793", "0.48294538", "0.4826938", "0.48207167", "0.48205042", "0.48176914", "0.47984627", "0.47909927", "0.4788805", "0.47831753", "0.47715867", "0.47650555", "0.47640198", "0.47638312", "0.47614422", "0.47599563", "0.47594762", "0.4754448", "0.47484684", "0.47395578", "0.4736122", "0.47317576", "0.4730534", "0.4728067", "0.4724203" ]
0.6788266
0
Select CONFIG_DB PORT table changes, once there is a port configuration add/remove, notify observers
def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler): if not stop_event.is_set(): (state, _) = sel.select(SELECT_TIMEOUT_MSECS) if state == swsscommon.Select.TIMEOUT: return if state != swsscommon.Select.OBJECT: logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT') return read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices", "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n reason_dict = {ofproto.OFPPR_ADD: \"added\",\n ofproto.OFPPR_DELETE: \"deleted\",\n ofproto.OFPPR_MODIFY: \"modified\", }\n\n if reason in reason_dict:\n\n print \"switch%d: port %s %s\" % (dpid, reason_dict[reason], port_no)\n else:\n print \"switch%d: Illeagal port state %s %s\" % (port_no, reason)", "def db_change_callback(self, table, key, action, value, topic=None):\n if self.USE_CACHE:\n # Update cache\n if action == 'create' or action == 'set':\n if table == 'lport':\n self.cache_logical_port_by_port_id[key] = self.nb_api.get(l2.LogicalPort(id=key))\n if table == 'lrouter':\n self.cache_logical_router_by_dpid[key] = self.nb_api.get(l3.LogicalRouter(id=key))\n if action == 'del':\n if table == 'lport':\n # default if key does not exists is None\n self.cache_logical_port_by_port_id.pop(key, None)\n if table == 'lrouter':\n self.cache_logical_router_by_dpid.pop(key, None)\n\n print(\"L3 App: Received Update for table {} and key {} action {}\".format(table, key, action))\n if action == 'set':\n if table == 'lport':\n if self.USE_CACHE:\n updated_port = self.cache_logical_port_by_port_id[key]\n else:\n updated_port = self.nb_api.get(l2.LogicalPort(id=key))\n\n if len(updated_port.ips) is not 0:\n for ip in updated_port.ips:\n # new ip discovered\n # install route on every datapath\n # only update the other datapaths\n for dpid, datapath in self.cache_datapath_by_dpid.iteritems():\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid, ip)\n if out_port is None:\n continue\n out_port_id = \"{}:{}\".format(dpid, out_port)\n lout_port = self.nb_api.get(l2.LogicalPort(id=out_port_id))\n if ip in lout_port.ips:\n continue\n # else add new ip and install flow\n lout_port.ips.append(ip)\n self.nb_api.update(lout_port)\n # install flow\n print \"L3 IP via pubsub: installing flow on {}: out_port: {} src_mac:\" \\\n \" {} dst_mac: {}, ip: {}\".format(datapath.id, out_port, new_src_mac, new_dst_mac, ip)\n self.add_flow_gateway_for_ip(datapath, int(out_port), ip, new_src_mac, new_dst_mac)", "def update_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.update_port(port)\n except:\n pass", "def db_changed(self):\n self.dbstate.db.connect('person-add', self.update)\n self.dbstate.db.connect('person-delete', self.update)\n self.dbstate.db.connect('person-update', self.update)\n self.dbstate.db.connect('family-add', self.update)\n self.dbstate.db.connect('family-delete', self.update)\n self.dbstate.db.connect('family-update', self.update)", "def datachange_notification(self, node, val, data):\n \n logger.debug(\"New data change event. node:{}, value:{}\".format(node, val))\n \n # Sorry about these lines of code, but I don't see any nicer way of determining the port number than from \n # the identifier string. Then splitting it up to isolate the port number.\n # Example \"Status.Port_2.Selected\" is split into ['Status', 'Port_2', 'Selected'] then 'Port_2' is split into \n # ['Port', '2'] and then the '2' is turned into an intiger.\n path_list = str(node.nodeid.Identifier).split(\".\")\n\n # We can safely assume that the last term is the tag that updated.\n tag = path_list[-1] \n \n # Figure out the port number\n port_number = None\n if 'Port' in path_list[1]:\n port_number = int(path_list[1].split(\"_\")[-1]) \n \n \"\"\" Switch for each possible tag\"\"\"\n # If the command tag \"Select\" changes go select that port with the instructions saved in the command tag. \n if tag == 'Select' and port_number:\n if val == True:\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Instructions\".format(port_number))\n instructions = node.get_value()\n self._pbl.select_port(port_number, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Select\".format(port_number))\n node.set_value(False)\n \n elif tag == 'Deselect' and port_number:\n if val == True:\n self._pbl.deselect_port(port_number, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Deselect\".format(port_number))\n node.set_value(False)\n\n elif tag == 'ContentDisplayName' and port_number:\n self._pbl.set_content_key(port_number,'display_name', str(val))\n elif tag == 'ContentName' and port_number:\n self._pbl.set_content_key(port_number,'name', str(val))\n elif tag == 'ContentDescription' and port_number:\n self._pbl.set_content_key(port_number,'description', str(val))\n elif tag == 'ContentImagePath' and port_number:\n self._pbl.set_content_key(port_number,'image_path', str(val))\n \n elif tag == 'Select' and 'ByContent' in path_list[1]:\n if val == True:\n instructions = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Instructions\").get_value()\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n _, selected_port = self._pbl.select_content(name = name, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Select\")\n node.set_value(False)\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Result\")\n node.set_value(selected_port)\n\n elif tag == 'Deselect' and 'ByContent' in path_list[1]:\n if val == True:\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n self._pbl.deselect_content(name = name, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Deselect\")\n node.set_value(False)", "def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler):\n if not stop_event.is_set():\n (state, _) = sel.select(SELECT_TIMEOUT_MSECS)\n if state == swsscommon.Select.TIMEOUT:\n return\n if state != swsscommon.Select.OBJECT:\n logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')\n return\n for port_tbl in asic_context.keys():\n while True:\n (key, op, fvp) = port_tbl.pop()\n if not key:\n break\n if not validate_port(key):\n continue\n fvp = dict(fvp) if fvp is not None else {}\n if 'index' not in fvp:\n fvp['index'] = '-1'\n port_index = int(fvp['index'])\n port_change_event = None\n if op == swsscommon.SET_COMMAND:\n port_change_event = PortChangeEvent(key,\n port_index,\n asic_context[port_tbl],\n PortChangeEvent.PORT_SET,\n fvp)\n elif op == swsscommon.DEL_COMMAND:\n port_change_event = PortChangeEvent(key,\n port_index,\n asic_context[port_tbl],\n PortChangeEvent.PORT_DEL,\n fvp)\n if port_change_event is not None:\n port_change_event_handler(port_change_event)", "def get_all_port(self, conf, dpid):\n\t\tpass", "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)", "def _on_config_changed(self, _):\n self._configure_pod()", "def setup_logical_port_connectivity(self, context, port_db):\n pass", "def portconfig():\r\n print('''\\n%s at %s acting as user %s\r\n\\nPort Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current port configuration\r\n 2 - Get current port status\r\n 3 - Get current port counters\r\n 4 - Get SFP status\r\n 5 - Change Port Configuration\r\n 6 - Shut Down or Activate Port\r\n 7 - Reset Port Counters\r\n 8 - Back\r\n 9 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n portconfig()\r\n execute = {1: PACKETMASTER.port_config,\r\n 2: PACKETMASTER.port_info,\r\n 3: PACKETMASTER.port_statistics,\r\n 4: PACKETMASTER.sfp_info,\r\n 5: PACKETMASTER.set_port_config_guided,\r\n 6: PACKETMASTER.port_on_off_guided,\r\n 7: PACKETMASTER.reset_port_counters,\r\n 8: hardwareconfig,\r\n 9: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n portconfig()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n portconfig()", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n if poll_interval is not None:\n port_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def config_db():", "def port_configure(self,port,**config):\n if not port in self.ports:\n self.ports[port] = {}\n\n for k,v in config.items():\n self.ports[port][k] = v", "def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)", "def servicesChanged(self) -> None:\n ...", "def refreshPorts(self, event):\n logging.debug(\"Refreshing ports.\")\n self.availablePorts = self.controller.getAvailablePorts()\n\n # Delete old dropdown options\n self.portSelector[\"menu\"].delete(0, \"end\")\n for value in self.availablePorts:\n\n def _callback(value=value):\n self.controller.updatePort(value)\n self.serialPortVar.set(value)\n\n self.portSelector[\"menu\"] \\\n .add_command(label=value,\n command=_callback)\n return", "def treat_devices_added_or_updated(self, details):\n device = details['device']\n LOG.debug(\"Processing port: %s\", device)\n # REVISIT(ivar): this is not a public facing API, we will move to\n # the right method once the redesign is complete.\n port = self.bridge_manager.get_vif_port_by_id(device)\n if port:\n gbp_details = details.get('gbp_details')\n trunk_details = details.get('trunk_details')\n neutron_details = details.get('neutron_details')\n if gbp_details and 'port_id' not in gbp_details:\n # The port is dead\n details.pop('port_id', None)\n if (gbp_details and gbp_details.get('host') and\n gbp_details['host'] != self.host):\n self.port_unbound(device)\n return False\n elif neutron_details and 'port_id' in neutron_details:\n LOG.info(\"Port %(device)s updated. Details: %(details)s\",\n {'device': device, 'details': details})\n # Inject GBP/Trunk details\n port.gbp_details = gbp_details\n port.trunk_details = trunk_details\n self.treat_vif_port(port, neutron_details['port_id'],\n neutron_details['network_id'],\n neutron_details['network_type'],\n neutron_details['physical_network'],\n neutron_details['admin_state_up'],\n neutron_details['fixed_ips'],\n neutron_details['device_owner'],\n neutron_details['segmentation_id'])\n # update plugin about port status\n if neutron_details.get('admin_state_up'):\n LOG.debug(\"Setting status for %s to UP\", device)\n self.plugin_rpc.update_device_up(\n self.context, device, self.agent_id, self.host)\n else:\n LOG.debug(\"Setting status for %s to DOWN\", device)\n self.plugin_rpc.update_device_down(\n self.context, device, self.agent_id, self.host)\n LOG.info(\"Configuration for device %s completed.\",\n device)\n else:\n LOG.warn(\"Device %s not defined on plugin\", device)\n if port and port.ofport != -1:\n self.port_unbound(port.vif_id)\n return False\n else:\n # The port disappeared and cannot be processed\n LOG.info(\"Port %s was not found on the integration bridge \"\n \"and will therefore not be processed\", device)\n self.port_unbound(device)\n return False\n return True", "def check_port_connections(self):\n all_ports = crest.get_all_ports(self.model)\n influences_to_target = {p: [] for p in all_ports}\n updates_to_target = {p: [] for p in all_ports}\n actions_to_target = {p: [] for p in all_ports}\n\n # fill data stores\n for inf in crest.get_all_influences(self.model):\n influences_to_target[inf.target].append(inf)\n\n for up in crest.get_all_updates(self.model):\n updates_to_target[up.target].append(up)\n\n for action in crest.get_all_actions(self.model):\n actions_to_target[action.target].append(action)\n\n for port in all_ports:\n assert not (len(influences_to_target[port]) > 0 and (\n len(updates_to_target[port]) > 0 or len(actions_to_target[port]) > 0)\n ), f\"There are [influences and (updates or actions)] writing to port {port._name} (entity: {port._parent._name})\"\n\n assert len(influences_to_target[port]) < 2, f\"There are two influences writing to {port._name}\"\n\n states = [update.state for update in updates_to_target[port]]\n assert len(states) == len(set(states)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple updates linked to the same state\"\n\n transitions = [action.transition for action in actions_to_target[port]]\n assert len(transitions) == len(set(transitions)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple actions linked to the same transition\"", "def get_port(self, conf, dpid, port_id):\n\t\tpass", "def enable_cable_ports(cid):\n\n SQL.execute('''\n SELECT \n guid,\n port\n FROM \n cable_ports \n WHERE\n cid = ?\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall(): \n if not DISABLE_PORT_STATE_CHANGE: \n ib_mgt.enable_port(int(row['guid']), int(row['port']))", "def update_ports(self):\n \n # fetch only those ports having\n # VID:PID == a valid (VID, PID) pair in target_vid_pid\n ports = []\n\n for valid_pair in self.target_vid_pid:\n vid_pid = valid_pair[0] + ':' + valid_pair[1]\n ports = ports + [p for p in list_ports.grep(vid_pid)]\n #ports = list_ports.comports()\n \n # add new ports to connected_ports\n # and update new_ports\n new_ports = []\n for p in ports:\n if not p in self.connected_ports:\n self.connected_ports.append(p)\n new_ports.append(p)\n\n # remove missing ports from devices_found\n # and update removed_ports\n removed_ports = []\n for p in self.connected_ports:\n if not p in ports:\n self.connected_ports.remove(p)\n removed_ports.append(p)\n\n return new_ports, removed_ports", "def _update_port_config(port_config):\n\n # Update network config for port node\n _update_network_config(port_config)\n\n # Update network fixed ips config\n _update_fixed_ips_config(port_config)\n\n # Update security groups config for port node\n _update_security_groups_config(port_config)", "def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)", "def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)", "def updateAvailablePorts(self):\n # Build a port list\n device_list_all = comports()\n self.device_choices = list()\n for device in device_list_all:\n self.device_choices.append(device[0])\n\n if len(self.device_choices) < 1:\n tkinter.messagebox.showerror('No Available Serial Ports','No serial ports are available.')", "def handle_config_change(self, msg):\n self.xmpp.event('groupchat_config_status', msg)\n self.xmpp.event('muc::%s::config_status' % msg['from'].bare , msg)", "def process_update_port(self, context, data, result):\n\n orginal_exten = copy.deepcopy(result)\n # Process extension data\n self._find_port_dict_extensions(\n result, None, session=context.session)\n\n port_ext = self._update_port_ext(\n result, data, session=context.session)\n switchports = self._update_switchports(\n result, data, session=context.session)\n self._find_port_dict_extensions(\n result, None, port_ext=port_ext,\n switchports=switchports, session=context.session)\n\n # We only want to commit on a state change\n if orginal_exten.get(\"commit\") != result[\"commit\"]:\n # If we are transitioning to active, validate\n if not orginal_exten.get(\"commit\") and result[\"commit\"]:\n self._validate_port_can_commit(\n result, None, session=context.session)", "def pull_port(self, port, update_fields=None):\n import_time = timezone.now()\n imported_port = self.import_port(\n port.vm.backend_id, port.backend_id, save=False\n )\n\n port.refresh_from_db()\n if port.modified < import_time:\n if not update_fields:\n update_fields = models.Port.get_backend_fields()\n\n update_pulled_fields(port, imported_port, update_fields)", "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def testGetConfigPortState(self):\n self.ports.getconfig_port_state(file_name = 'get_port_state.xml', port_ids = portsDict['port_ids'], port_states = portsDict['port_state'])", "def extend_hosting_port_info(self, context, port_db, hosting_info):\n pass", "def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0", "def update_port_postcommit(self, mech_context):\n LOG.debug(\"update_port_postcommit: called\")", "def activate(self, ext_ip, ext_port):\n self.sql_manager.port_update(self.id, external_ip=ext_ip, external_port=ext_port)\n self.external_port = ext_port\n self.external_ip = ext_ip", "def change(cls, db):\n cls.configs['db'] = db\n\n if cls.conn and cls.conn.open:\n cls.conn.select_db(db)", "def test_connections_updated(self):\n assert self.connection_config.connections == {self.new_connection_id}", "def __set_port_list(self):\n\n self._coms = [str(i.device) for i in sorted(self.ports)]", "def extend_port_dict(self, session, model, result):\n\n commit = self._get_port_attr(model, \"commit\")\n trunked = self._get_port_attr(model, \"trunked\")\n hardware_id = self._get_port_attr(model, \"switch:hardware_id\")\n switchports = self._get_port_attr(model, \"switch:ports\")\n if switchports is None:\n switchports = []\n if commit is None:\n commit = False\n\n port_ext = db.get_port_ext(result[\"id\"], session=session)\n if port_ext:\n LOG.info(\"Port %s does not have extension data\"\n % model[\"id\"])\n port_ext = port_ext.as_dict()\n result[\"commit\"] = port_ext[\"commit\"]\n result[\"trunked\"] = port_ext[\"trunked\"]\n if port_ext[\"hardware_id\"]:\n switchports = db.filter_switchports(\n hardware_id=port_ext[\"hardware_id\"],\n session=session)\n switchports = [sp.as_dict() for sp in switchports]\n result[\"switch:hardware_id\"] = port_ext[\"hardware_id\"]\n result[\"switch:ports\"] = switchports\n else:\n result[\"switch:hardware_id\"] = hardware_id\n result[\"commit\"] = commit\n result[\"trunked\"] = trunked\n result[\"switch:ports\"] = switchports", "def treat_devices_added(self, devices):\n LOG.info(_(\"treat_devices_added %s\"), devices)\n resync = False\n self.prepare_devices_filter(devices)\n for device in devices:\n LOG.debug(_(\"Port %s added\"), device)\n try:\n details = self.plugin_rpc.get_device_details(self.context,\n device,\n self.agent_id)\n except Exception as e:\n LOG.debug(_(\"Unable to get port details for \"\n \"%(device)s: %(e)s\"),\n {'device': device, 'e': e})\n resync = True\n continue\n if 'port_id' in details:\n LOG.info(_(\"Port %(device)s updated. Details: %(details)s\"),\n {'device': device, 'details': details})\n # If a device has been added but it's not active, don't\n # do anything with it. We'll add it later. Otherwise, configure\n # it.\n if details['admin_state_up']:\n # create the networking for the port\n network_type = details.get('network_type')\n if network_type:\n segmentation_id = details.get('segmentation_id')\n else:\n # compatibility with pre-Havana RPC vlan_id encoding\n vlan_id = details.get('vlan_id')\n (network_type,\n segmentation_id) = lconst.interpret_vlan_id(vlan_id)\n\n if self.routing_mgr.add_interface(details['network_id'],\n network_type,\n details['physical_network'],\n segmentation_id,\n details['port_id'],\n details['fixed_ips'],\n details['mac_address']):\n\n # update plugin about port status\n resp = self.plugin_rpc.update_device_up(self.context,\n device,\n self.agent_id,\n cfg.CONF.host)\n else:\n resp = self.plugin_rpc.update_device_down(self.context,\n device,\n self.agent_id,\n cfg.CONF.host)\n LOG.info(_(\"Update device response: %s\"), resp)\n else:\n LOG.info(_(\"Device %s not defined on plugin\"), device)\n return resync", "def notify_config_changes(self, is_new, data, diff):\n self.event(\n self.EV_CONFIG_CHANGED, {\"object\": self, \"is_new\": is_new, \"config\": data, \"diff\": diff}\n )", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n if poll_interval:\n port_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def sync_with_database(self):\n # learn from db\n lports = self.nb_api.get_all(l2.LogicalPort)\n for lport in lports:\n port_id = \"{}:{}\".format(lport.lswitch.id, lport.id)\n self.cache_logical_port_by_port_id[port_id] = lport\n lrouters = self.nb_api.get_all(l3.LogicalRouter)\n for lrouter in lrouters:\n self.cache_logical_router_by_dpid[lrouter.id] = lrouter", "def on_config_change(self, config, section, key, value):\n \n if section == \"Makesmith Settings\":\n if key == \"COMport\":\n self.data.comport = value\n elif key == 'xPitch':\n print \"xPitch changed\"", "def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}", "def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature", "def probe_ports( self, ):\r\n ports = self.com_driver.list_available()\r\n self.gui.print_info_string( \"\" )\r\n self.gui.print_info_string( \"Reported Ports from driver:\" )\r\n self.gui.print_info_string( \"\" )\r\n if len( ports ) == 0:\r\n self.gui.print_info_string( \"None \\n\" )\r\n else:\r\n for i_port in ports:\r\n self.gui.print_info_string( i_port[0] )\r\n #self.gui.print_info_string( \"\\n\" )\r\n\r\n self.close_driver()\r\n\r\n self.gui.print_info_string( \"\\nProbe Ports from parameters:\\n\" )\r\n ports = self.com_driver.probe_available( self.parameters.port_list )\r\n ix_line = 0 # what is this ??\r\n for i_port in ports:\r\n ix_line += 1\r\n self.gui.print_info_string( str( i_port ) )\r\n if ix_line == 10:\r\n ix_line = 0\r\n self.gui.print_info_string( \"\\n\" )\r\n #logger.log( fll, a_str )\r\n\r\n return", "def cbDConfigPort( BoardNum, PortNum, Direction ):\n CHK( cbw.cbDConfigPort( BoardNum, PortNum, Direction ) )", "def process_create_port(self, context, port, result):\n\n # Process extension data\n port_ext = self._create_port_ext(result, port, context=context)\n switchports = self._update_switchports(result, port,\n session=context.session)\n self._find_port_dict_extensions(result, None, port_ext=port_ext,\n switchports=switchports,\n session=context.session)\n\n # Validate we can actually configure this port\n if result[\"commit\"]:\n self._validate_port_can_commit(result, None,\n session=context.session)", "def send_on_output_port_change(self):\n return self._send_on_output_port_change", "def platform_config_update(config):\n\n port_map = {}\n\n for (device, ports, socket_addr) in config[\"device_sockets\"]:\n for port in ports:\n port_map[(device, port)] = socket_addr\n\n # no default configuration for this platform\n\n config[\"port_map\"] = port_map", "def _find_port_dict_extensions(self, port_res, port_db, port_ext=None,\n switchports=None, session=None):\n if not port_ext:\n port_ext = db.get_port_ext(port_res[\"id\"], session=session)\n if not port_ext:\n LOG.error(\"Port %s does not have extension data\"\n % port_db[\"id\"])\n return\n port_ext = port_ext.as_dict()\n\n if not switchports:\n switchports = []\n if port_ext[\"hardware_id\"]:\n switchports = db.filter_switchports(\n hardware_id=port_ext[\"hardware_id\"], session=session)\n switchports = [sp.as_dict() for sp in switchports]\n\n port_res[\"switch:ports\"] = switchports\n port_res[\"switch:hardware_id\"] = port_ext[\"hardware_id\"]\n port_res[\"commit\"] = port_ext[\"commit\"]\n port_res[\"trunked\"] = port_ext[\"trunked\"]", "def testEditConfigCreatePortState(self):\n self.ports.editconfig_create_port_state(file_name = 'editconfig_create_port_label.xml', port_ids = portsDict['port_ids'], port_states = portsDict['port_state'])", "def update_port(self, context, port_id, port):\n LOG.debug(_(\"NeutronRestProxyV2: update_port() called\"))\n\n self._warn_on_state_status(port['port'])\n\n # Validate Args\n orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)\n with context.session.begin(subtransactions=True):\n # Update DB\n new_port = super(NeutronRestProxyV2,\n self).update_port(context, port_id, port)\n self._update_extra_dhcp_opts_on_port(context, port_id, port,\n new_port)\n if (portbindings.HOST_ID in port['port']\n and 'id' in new_port):\n host_id = port['port'][portbindings.HOST_ID]\n porttracker_db.put_port_hostid(context, new_port['id'],\n host_id)\n new_port = self._extend_port_dict_binding(context, new_port)\n\n # update on networl ctrl\n mapped_port = self._map_state_and_status(new_port)\n self.servers.rest_update_port(orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n mapped_port, port_id)\n\n if (new_port.get(\"device_id\") != orig_port.get(\"device_id\") and\n orig_port.get(\"device_id\")):\n try:\n self.servers.rest_unplug_interface(orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n orig_port[\"id\"])\n device_id = new_port.get(\"device_id\")\n if device_id:\n self.rest_plug_interface(new_port[\"tenant_id\"],\n new_port[\"network_id\"],\n new_port, device_id)\n\n except RemoteRestError:\n with excutils.save_and_reraise_exception():\n port_update = {\"port\": {\"status\": \"ERROR\"}}\n super(NeutronRestProxyV2, self).update_port(\n context,\n new_port[\"id\"],\n port_update\n )\n\n # return new_port\n return new_port", "def configure_dcbx_app(self, ports, **kwargs):\n pass", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList", "def refresh(self):\n self.ports = list(serial.tools.list_ports.comports())", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "def populateConfigFromDb(self):\n driverName = self.widget.abstractDb.getType()\n self.configFromDbComboBox.clear()\n self.configFromDbComboBox.addItem(self.tr('Select Stored Config (optional)'))\n if driverName == 'QPSQL':\n self.configFromDbComboBox.setEnabled(True)\n propertyDict = self.widget.abstractDb.getPropertyDict('FieldToolBoxConfig')\n dbVersion = self.widget.abstractDb.getDatabaseVersion()\n if dbVersion in propertyDict.keys():\n self.configFromDbDict = propertyDict[dbVersion]\n nameList = self.configFromDbDict.keys()\n nameList.sort()\n for name in nameList:\n self.configFromDbComboBox.addItem(name)\n else:\n self.configFromDbComboBox.setEnabled(False)\n self.configFromDbDict = dict()", "def determine_ports():\n ports = [config('admin-port'), config('service-port')]\n return list(set(ports))", "def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def test_update_port_group(self):\n pass", "def list_ports(state):\n\tstate.report()", "def setup():\n\tglobal conn_old\n\tconn_old = pdatab.connect_permissions_db(ip=\"localhost\", port=3306, db=\"pcomp_srv_sb\")\n\tglobal conn_new\n\tconn_new = pdatab.connect_permissions_db(ip=\"localhost\", port=3306, db=\"pcomp_srv\")", "def save_io_ports(self, *args):\n if args[0] == 'Save':\n title = args[1].title\n text = args[1].text_field.text\n try:\n port = int(text, 16)\n if port < 0 or port > 4095:\n toast('Invalid port number. Valid port numbers [0-4095]')\n else:\n if is_valid_port(port):\n hex_port = convert_to_hex(port, 12)\n if TRAFFIC_LIGHT['menu_title'] in title:\n update_reserved_ports(TRAFFIC_LIGHT,\n TRAFFIC_LIGHT['port'],\n hex_port)\n self.traffic_lights.text = TRAFFIC_LIGHT['menu_title'] + '. Current Port: ' + str(\n TRAFFIC_LIGHT['port'])\n toast_message = f'Changed Traffic Light I/O port number to {port}'\n elif SEVEN_SEGMENT_DISPLAY['menu_title'] in title:\n update_reserved_ports(SEVEN_SEGMENT_DISPLAY,\n SEVEN_SEGMENT_DISPLAY['port'],\n hex_port)\n self.seven_segment.text = SEVEN_SEGMENT_DISPLAY['menu_title'] + '. Current Port: ' + str(\n SEVEN_SEGMENT_DISPLAY['port'])\n toast_message = f'Changed Seven Segment I/O port number to {port}'\n elif ASCII_TABLE['menu_title'] in title:\n if port > 4088:\n toast_message = 'Invalid port for ASCII Table. Valid ports [0-4088]'\n else:\n try:\n update_reserved_ports(ASCII_TABLE,\n ASCII_TABLE['port'],\n hex_port, True)\n self.ascii_table.text = ASCII_TABLE['menu_title'] + '. Current Port: ' + str(\n ASCII_TABLE['port'])\n toast_message = f'Changed ASCII Table I/O port number to {port}'\n except MemoryError as e:\n toast_message = str(e)\n else:\n update_reserved_ports(HEX_KEYBOARD,\n HEX_KEYBOARD['port'],\n hex_port)\n self.hex_keyboard.text = HEX_KEYBOARD['menu_title'] + '. Current Port: ' + str(\n HEX_KEYBOARD['port'])\n toast_message = f'Changed HEX Keyboard I/O port number to {port}'\n toast(toast_message)\n else:\n toast('Invalid input. That port is reserved!')\n except ValueError as e:\n toast(f'Not a valid port!')", "def test_port(self):\n\n\t\tself.assertEqual(self.port, self.tracker.port)", "def config_probe(self, widget, data=None):\n\t\tConfigure.ExcludeServer = (int(self.builder.get_object(\"MasterRadio\").get_active()))\n\t\tConfigure.MaxNodes = \t (int(self.builder.get_object(\"NodeScale\").get_value()))\n\t\tConfigure.LocalhostOnly = (int(self.builder.get_object(\"LocalHostRadio\").get_active()))\n\t\tConfigure.TimeStep = \t (int(self.builder.get_object(\"TimeStepScale\").get_value()))\n\t\tConfigure.Interval = \t (int(self.builder.get_object(\"IntervalScale\").get_value()))\n\n\t\tnomeFile = (str(self.builder.get_object(\"NameText\").get_text()))\n\n\t\tif ('/' not in nomeFile) : Configure.SaveConfig(NewFile=\"./extra/UserOutput/\"+nomeFile)\n\t\telse : Configure.SaveConfig(NewFile = nomeFile)\n\t\t\n\n\t\tprint \"### Sending setup signal to Monitor...\"\n\t\tself.setup_monitor()", "def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def wait_for_port(port, host=\"localhost\", interval=30):\n print('Waiting for database connections to be available...')\n good = False\n while not good:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n good = True\n except socket.error:\n pass\n finally:\n sock.close()\n time.sleep(interval)", "def fusion_api_update_li_port_monitor_configuration(self, body=None, uri=None, api=None, headers=None):\n param = '/port-monitor'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def update_port_precommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n # unbind port from old host, if already bound\n if port_context.original_binding_levels is not None:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME and\n port_context.host != port_context.original_host):\n\n # Note that we skip this step if the change happens while\n # 'unbinding' and rebinding to the same host - it's probably\n # an update of extraneous detail and not really a request\n # that requires binding.\n\n self.communicator.unbind(port_context._plugin_context.session,\n port_context.original,\n port_context.original_host,\n prev_bind[api.BOUND_SEGMENT]\n )\n\n # (Re)bind port to the new host, if it needs to be bound\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n\n binding_type = self.get_vif_type(port_context)\n # Remove port membership from any previously associated\n # security groups for updating remote_security_group_id ACLs\n self.communicator.unbind_port_from_remote_groups(\n port_context._plugin_context.session,\n port_context.original,\n port_context.current)\n\n self.communicator.bind(port_context._plugin_context.session,\n port_context.current,\n current_bind[api.BOUND_SEGMENT],\n port_context.host,\n binding_type)\n\n # TODO(ijW): The agent driver checks for a change of\n # host, but we're oddly seeing that the orig_host is\n # always set. Should confirm if this is a problem or\n # not.\n self._insert_provisioning_block(port_context)", "def make_connections(self):\n try:\n self.datatype.currentIndexChanged.connect(self.set_datatype)\n self.dyad.valueChanged.connect(self.set_dyad)#\n self.vid_or_channel.valueChanged.connect(self.set_channel_or_vid)\n except Exception as e:\n QMessageBox.about(self, str(e))", "def update_port_precommit(self, mech_context):\n LOG.debug(\"update_port_precommit(self: called\")", "def update_port_postcommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n if port_context.original_binding_levels is None:\n prev_bind = None\n else:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n self.communicator.kick()\n elif (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n self.communicator.kick()", "def preferencesChanged(self):\n # do nothing\n pass", "def _assign_port_to_device(self):\n for i in range(0, len(self.stlink_devices)):\n self.stlink_devices[i]['usb_port'] = self.get_port_from_serial(self.stlink_devices[i]['serial'])", "def connections_changed(self, name):\n return {name: str(name)}", "def modify_ports(self, ports, **kwargs):\n pass", "def wait_for_port_bind(self):\n if self.starter_port is not None:\n count = 0\n while count < 10:\n for socket in self.instance.connections():\n if socket.status == \"LISTEN\" and socket.laddr.port == self.starter_port:\n print(\"socket found!\")\n return\n count += 1\n time.sleep(1)\n raise Exception(f\"starter didn't bind {self.starter_port} on time!\")\n print(\"dont know port\")", "def test_connections_updated(self):\n assert self.agent_config.connections == {self.new_connection_id}", "def _update_port_ext(self, original_port, req_port,\n session=None):\n\n commit = self._get_port_attr(req_port, \"commit\")\n trunked = self._get_port_attr(req_port, \"trunked\")\n hardware_id = self._get_port_attr(req_port, \"switch:hardware_id\")\n\n # we cannot allow the trunked flag to change if committed.\n if trunked is not None and (original_port[\"trunked\"] != trunked):\n if original_port[\"commit\"] and (commit is not False):\n msg = \"cannot update trunked flag when commit=true\"\n raise exc.InvalidInput(error_message=msg)\n\n port_ext = db.update_port_ext(\n port_id=original_port[\"id\"],\n trunked=trunked,\n commit=commit,\n hardware_id=hardware_id,\n session=session)\n return port_ext.as_dict()", "def test_connections_updated(self):\n assert self.skill_config.connections == {self.new_connection_id}", "def configPort(self, port, bw=2, delay = '1ms', loss = 0):\n if(bw < 0 or bw > 1000):\n return\n tc = 'tc'\n try:\n intf = self.intfs[port]\n except KeyError:\n error('Port %d does not exist on node %s' % (port, self.name))\n return\n info( intf + '(bw %dMbit, delay %s, loss %d%%)\\n' % (bw, delay, loss) )\n cmds = [\n '%s qdisc del dev %s root',\n '%s qdisc add dev %s root handle 1:0 htb default 1',\n '%s class add dev %s parent 1:0 classid 1:1 htb ' +\n 'rate %dMbit burst 15k' % bw,\n '%s qdisc add dev %s parent 1:1 handle 10:0 netem ' +\n 'delay ' + '%s' % delay + ' loss ' + '%d' % loss\n ]\n # execute all the commands in the container\n map(lambda s: self.cmd(s % (tc, intf)), cmds)", "def view_config_changes(config):\n rev = reverter.Reverter(config)\n rev.recovery_routine()\n rev.view_config_changes()", "def send_on_input_port_change(self):\n return self._send_on_input_port_change", "def onConfigureMessage(self, config):\n for adaptor in config[\"adaptors\"]:\n adtID = adaptor[\"id\"]\n if adtID not in self.devices:\n # Because configure may be re-called if devices are added\n name = adaptor[\"name\"]\n friendly_name = adaptor[\"friendly_name\"]\n logging.debug(\"%s Configure app. Adaptor name: %s\", ModuleName, name)\n self.idToName[adtID] = friendly_name.replace(\" \", \"_\")\n self.devices.append(adtID)\n self.dm = DataManager(self.bridge_id)\n self.setState(\"starting\")", "def create_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.create_port(port)\n except:\n pass", "def on_config_changed(self, event):\n unit = self.model.unit", "def _set_up_change_notifier(conn, table: str, actions: Set[str]):\n\n # build function to create in the database\n channel = f\"{table}_table_change\"\n func_name = f\"notify_{table}_change()\"\n func = f\"\"\"\n CREATE OR REPLACE FUNCTION {func_name}\n RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('{channel}','changed');\n RETURN NULL;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\"\n\n # build triggers that will run func on each action\n triggers = \"\"\n for action in actions:\n if action.upper() in ServerSockets.DbActions:\n trigger_name = f\"{table}_notify_{action.lower()}\"\n\n triggers += f\"\"\"\n DROP TRIGGER IF EXISTS {trigger_name} ON {table};\n CREATE TRIGGER {trigger_name}\n AFTER {action} ON {table}\n FOR EACH ROW EXECUTE PROCEDURE {func_name};\n \"\"\"\n else:\n raise TypeError(\n \"All actions must be either INSERT, UPDATE or DELETE\")\n\n # insert function and respective triggers into the database\n cur = conn.cursor()\n cur.execute(func)\n if triggers:\n cur.execute(triggers)\n return channel", "def conf_update(self):\n pass", "def test_restricted_to_protocols_updated(self):\n assert self.connection_config.restricted_to_protocols == {self.new_protocol_id}", "def process_port_state(self, dp_name, port, state):\n with self._lock:\n device = self._port_device_mapping.setdefault((dp_name, port), DeviceEntry())\n device.port_up = state\n if not state:\n device.assigned = None\n device.vlan = None\n self._send_device_port_event(device)", "def test_config_changed_no_relations(\n self,\n ) -> NoReturn:\n\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)\n self.assertTrue(\n all(\n relation in self.harness.charm.unit.status.message\n for relation in [\"mongodb\", \"kafka\", \"ro\"]\n )\n )", "def _process_trunk_subport_bindings(self, context, trunk, port_ids):\n updated_ports = []\n trunk_port_id = trunk.port_id\n trunk_port = self.core_plugin.get_port(context, trunk_port_id)\n trunk_host = trunk_port.get(portbindings.HOST_ID)\n migrating_to_host = trunk_port.get(\n portbindings.PROFILE, {}).get('migrating_to')\n if migrating_to_host and trunk_host != migrating_to_host:\n # Trunk is migrating now, so lets update host of the subports\n # to the new host already\n trunk_host = migrating_to_host\n\n # NOTE(status_police) Set the trunk in BUILD state before\n # processing subport bindings. The trunk will stay in BUILD\n # state until an attempt has been made to bind all subports\n # passed here and the agent acknowledges the operation was\n # successful.\n self._safe_update_trunk(\n trunk, status=trunk_consts.TRUNK_BUILD_STATUS)\n\n for port_id in port_ids:\n try:\n updated_port = self._handle_port_binding(context, port_id,\n trunk, trunk_host)\n # NOTE(fitoduarte): consider trimming down the content\n # of the port data structure.\n updated_ports.append(updated_port)\n except trunk_exc.SubPortBindingError as e:\n LOG.error(\"Failed to bind subport: %s\", e)\n\n # NOTE(status_police) The subport binding has failed in a\n # manner in which we cannot proceed and the user must take\n # action to bring the trunk back to a sane state.\n self._safe_update_trunk(\n trunk, status=trunk_consts.TRUNK_ERROR_STATUS)\n return []\n except Exception as e:\n msg = (\"Failed to bind subport port %(port)s on trunk \"\n \"%(trunk)s: %(exc)s\")\n LOG.error(msg, {'port': port_id, 'trunk': trunk.id, 'exc': e})\n\n if len(port_ids) != len(updated_ports):\n self._safe_update_trunk(\n trunk, status=trunk_consts.TRUNK_DEGRADED_STATUS)\n\n return updated_ports", "def update_port_ip(self, dpid, port, ip):\n # TODO Connection between mac and ip of host?\n if self.nb_api is None:\n self.nb_api = api_nb.NbApi.get_instance(False)\n\n port_id = \"{}:{}\".format(dpid, port)\n try:\n lport = self.nb_api.get(l2.LogicalPort(id=port_id))\n for ip_addr_obj in lport.ips:\n if str(ip_addr_obj) == ip:\n # already learned\n return\n lport.ips.append(ip)\n self.nb_api.update(lport)\n # TODO: Remove old ips\n except DBKeyNotFound:\n # TODO: Create Port?\n print \"Key not Found!!\"" ]
[ "0.60486573", "0.601688", "0.6003362", "0.5990358", "0.5919747", "0.5897559", "0.5824499", "0.5805411", "0.57939553", "0.5755642", "0.5738589", "0.57356256", "0.56507355", "0.5590035", "0.55844575", "0.5562933", "0.5554786", "0.55488783", "0.5544349", "0.5528714", "0.55249166", "0.55130166", "0.5482734", "0.5482587", "0.5469336", "0.5420505", "0.5391734", "0.537468", "0.5369885", "0.53678507", "0.53622025", "0.5353813", "0.5340616", "0.53080255", "0.5302531", "0.5290584", "0.5287836", "0.52817005", "0.5261537", "0.5248487", "0.52237827", "0.5211319", "0.52044797", "0.5200059", "0.51948965", "0.5193738", "0.51874954", "0.51760167", "0.5167808", "0.516244", "0.51613355", "0.5150386", "0.5149746", "0.5129912", "0.5119048", "0.5112081", "0.50938386", "0.5088143", "0.50783944", "0.50768596", "0.50755006", "0.5060814", "0.50579196", "0.50304097", "0.50264215", "0.5023106", "0.5013817", "0.5000496", "0.49865568", "0.4982473", "0.4975868", "0.49746504", "0.49650788", "0.49634963", "0.4961482", "0.4957483", "0.49537987", "0.4952669", "0.4938133", "0.4926281", "0.4922879", "0.49149328", "0.49046332", "0.49029467", "0.49021178", "0.4898403", "0.4894161", "0.48938692", "0.48930955", "0.48897457", "0.48873606", "0.48782986", "0.4876259", "0.4858672", "0.48502213", "0.48417863", "0.48383883", "0.4835218", "0.48274034", "0.4827119" ]
0.6089049
0
Get port mapping from CONFIG_DB
def get_port_mapping(namespaces): port_mapping = PortMapping() for namespace in namespaces: asic_id = multi_asic.get_asic_index_from_namespace(namespace) config_db = daemon_base.db_connect("CONFIG_DB", namespace=namespace) port_table = swsscommon.Table(config_db, swsscommon.CFG_PORT_TABLE_NAME) for key in port_table.getKeys(): if not validate_port(key): continue _, port_config = port_table.get(key) port_config_dict = dict(port_config) port_change_event = PortChangeEvent(key, port_config_dict['index'], asic_id, PortChangeEvent.PORT_ADD) port_mapping.handle_port_change_event(port_change_event) return port_mapping
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDbPort():\n\n if \"DB_PORT\" in controller.CONF.keys():\n return controller.CONF[\"DB_PORT\"]\n\n return basedefs.DB_PORT", "def db_port(self) -> Optional[int]:\n return pulumi.get(self, \"db_port\")", "def get_all_port(self, conf, dpid):\n\t\tpass", "def get_port_binding():\n import docker\n client = docker.from_env()\n return [c.attrs['NetworkSettings']['Ports']['5555/tcp'][0]\n for c in client.containers.list(\n filters={'label': 'org.label-schema.name=profemag/femag'})]", "def get_port(self, conf, dpid, port_id):\n\t\tpass", "def port_extension_map(self):\n return usb_config.CAMBRIONIX_PORT_MAP[self.model]", "def determine_ports():\n ports = [config('admin-port'), config('service-port')]\n return list(set(ports))", "def port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._primary_port_prop)", "def get_ports_mapping(status=psutil.CONN_LISTEN):\n ports = defaultdict(list)\n\n for process in get_processes():\n try:\n connections = process.connections()\n except psutil.Error:\n continue\n\n if connections:\n for conn in connections:\n if conn.status == status:\n ports[process].append(conn.laddr.port)\n\n return ports", "def connection_configuration_mapping(self, value):\n if value == \"Y\":\n return \"0\"\n elif value == \"D\":\n return \"2\"\n elif value == \"Z\":\n return \"5\"\n else:\n raise ValueError(\"Unknown configuration {}\".format(value))", "def get_ports(self, database_name):\n databases = self.list_databases()\n for d in databases:\n if d['name'] == database_name:\n database_id = d['id']\n break\n else:\n raise ClientError('Could not find database, does not exist.')\n end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n return resp.json()", "def port(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"port\")", "def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']", "def _get_port(self):\n return self.__port", "def portmap(ctx, verbose):\n table = \"No portmap rules exist\"\n with Spinner('Looking up port mapping rules'):\n data = ctx.obj.vlab_api.get('/api/1/ipam/portmap').json()['content']\n rules = data['ports']\n gateway_ip = data['gateway_ip']\n header = ['Name', 'Type', 'Port', 'Protocol']\n if verbose:\n header.append('Target IP')\n rows = []\n for conn_port, details in rules.items():\n name = details.get('name', 'Error')\n vm_type = details.get('component', 'Unknown')\n vm_port = details.get('target_port', 0)\n protocol = port_to_protocol(vm_type, vm_port)\n target_ip = details.get('target_addr', 'Unknown')\n if verbose:\n row = [name, vm_type, conn_port, protocol, target_ip]\n else:\n row = [name, vm_type, conn_port, protocol]\n rows.append(row)\n table = tabulate(rows, headers=header, tablefmt='presto', numalign=\"center\")\n click.echo('\\nGateway IP: {}'.format(gateway_ip))\n click.echo(table)", "def port():", "def get_ports(self) -> tuple:\n raise NotImplementedError", "def map_port_info(port, nmap_store):\n nmap_store[\"port_id\"] = port.get(\"portid\")\n nmap_store[\"port_protocol\"] = port.get(\"protocol\")\n map_state_info(port, nmap_store)\n map_service_info(port, nmap_store)\n return nmap_store", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def _translate_port(port):\n services = _get_services_mapping()\n if port in services and services[port][\"port\"]:\n return services[port][\"port\"][0]\n return port", "def get_port(self):\n return self.port", "def port_list(self):\n return self._port_list", "def port(self) -> int:", "def _get_exposed_ports(debug_port):\n if not debug_port:\n return None\n\n return {\n # container port : host port\n debug_port: debug_port\n }", "def secondary_port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._secondary_port_prop)", "def get_serverport(cobj):\n pass", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def port(self):\n return self._host[CONF_PORT]", "def get_config_connection():\n\n connection = {'send_time': '5',\n 'address': 'localhost',\n 'port': '5672',\n 'flask_port': '500'}\n\n return connection", "def get_config_db():\n\n datab = {'db_name': 'database_name',\n 'db_url': 'database_url'}\n\n return datab", "def GetPostgresPortNumber():\n pattern = r\"^\\s*port\\s*=\\s*(\\d{4,})\\s*\"\n\n match = MatchPattern(POSTGRES_PROPERTIES_PATH, pattern)\n if match:\n port = match[0]\n return port\n\n return None", "def list_occupied_adb_ports():\n out = AdbProxy().forward('--list')\n clean_lines = str(out, 'utf-8').strip().split('\\n')\n used_ports = []\n for line in clean_lines:\n tokens = line.split(' tcp:')\n if len(tokens) != 3:\n continue\n used_ports.append(int(tokens[1]))\n return used_ports", "def get_my_port(self):\n if self.starter_port is not None:\n return self.starter_port\n\n where = -1\n tries = 10\n while where == -1 and tries:\n tries -= 1\n lfcontent = self.get_log_file()\n where = lfcontent.find(\"ArangoDB Starter listening on\")\n if where != -1:\n where = lfcontent.find(\":\", where)\n if where != -1:\n end = lfcontent.find(\" \", where)\n port = lfcontent[where + 1 : end]\n self.starter_port = port\n assert int(port), \"port cannot be converted to int!\"\n return port\n logging.info(\"retrying logfile\")\n time.sleep(1)\n message = \"could not get port form: \" + self.log_file\n logging.error(message)\n raise Exception(message)", "def get_ports(self) -> tuple:\n return self._current_dev_manager.get_ports()", "def port_services(start_port, end_port):\n # Initialize empty dict for (key, value) pairs of (port, service).\n port_service_dict = dict()\n\n # Loop through port range and add to port_service_dict\n for i in range(int(start_port), int(end_port) + 1):\n try:\n port_service_dict[i] = socket.getservbyport(i)\n except OSError:\n port_service_dict[i] = \" \" # Shows that port is not in use\n\n return port_service_dict", "def get_port(project_path):\n\n config = ConfigParser.ConfigParser()\n config_path = os.path.abspath(os.path.join(project_path, 'config.ini'))\n config.read(config_path)\n return config.get('SELENIUMSERVER', 'hub_port')", "def get_switch_port_mapping(self,switch_name):\n switch_list = []\n switch_list = self.__graph_dict[switch_name]\n return switch_list", "def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})", "def get_services(options, db):\n results = []\n port, protocol = options.port\n # swap values if they were passed in wrong order\n if port.isalpha() and protocol.isdigit():\n port, protocol = protocol, port\n results = db.GetPortParents(port, protocol)\n return port, protocol, results", "def get_host_port(self) -> int:\n return self.config_dict.get(\"host_port\", 0)", "def get_port(self):\n return self.__port", "def get_external_db_mapping(self) -> dict:\n external_map_path = self.param(\"external_db_map\")\n db_map = dict()\n if external_map_path is None: return db_map\n\n # Load the map\n with open(external_map_path, \"r\") as map_file:\n for line in map_file:\n if line.startswith(\"#\"): continue\n line = re.sub(r'#.*', '', line)\n if re.match(r'^\\s*$', line): continue\n (from_name, to_name, *rest) = line.strip().split(\"\\t\")\n if len(rest) > 0 and rest[0].upper() != \"SEQ_REGION\": continue\n if to_name == \"_IGNORE_\": continue\n db_map[from_name] = to_name\n return db_map", "def setup_logical_port_connectivity(self, context, port_db):\n pass", "def config_db():", "def mac2port(self, mac, context):\n nmeta = self._nmeta\n dpid = self.dpid\n #*** Retrieve first matching record:\n db_result = nmeta.dbidmac.find_one({'dpid': dpid, 'mac': mac,\n 'context': context})\n if db_result:\n if not 'dpid' in db_result:\n self.logger.error(\"DB record didn't have a dpid...???\")\n return PORT_NOT_FOUND\n dpid = db_result['dpid']\n if not 'port' in db_result:\n self.logger.error(\"DB record didn't have a port...???\")\n return PORT_NOT_FOUND\n if not 'context' in db_result:\n self.logger.error(\"DB record didn't have a context...???\")\n return PORT_NOT_FOUND\n if db_result['context'] != context:\n return PORT_NOT_FOUND\n port = db_result['port']\n self.logger.debug(\"Found mac=%s on dpid=%s port=%s context=%s\",\n mac, dpid, port, context)\n return port\n else:\n self.logger.info(\"Unknown mac=%s for dpid=%s context=%s\", mac,\n self.dpid, context)\n return PORT_NOT_FOUND", "def port_usage(port):\n\n global PORT_USES\n\n if PORT_USES is None:\n config = conf.Config()\n config_path = os.path.join(os.path.dirname(__file__), 'ports.cfg')\n\n try:\n config.load(config_path)\n port_uses = {}\n\n for key, value in config.get('port', {}).items():\n if key.isdigit():\n port_uses[int(key)] = value\n elif '-' in key:\n min_port, max_port = key.split('-', 1)\n\n for port_entry in range(int(min_port), int(max_port) + 1):\n port_uses[port_entry] = value\n else:\n raise ValueError(\"'%s' is an invalid key\" % key)\n\n PORT_USES = port_uses\n except Exception as exc:\n log.warn(\"BUG: stem failed to load its internal port descriptions from '%s': %s\" % (config_path, exc))\n\n if not PORT_USES:\n return None\n\n if isinstance(port, str) and port.isdigit():\n port = int(port)\n\n return PORT_USES.get(port)", "def get_port(self) -> int:\n return self._port", "def _get_port_info(self, context):\n port = {}\n data = dict()\n old_host_name = ''\n\n if context.original is not None:\n old_host_name = context.original.get('binding:host_id', '')\n\n context = context._port\n port_id = str(context.get('id', ''))\n data['device_owner'] = str(context.get('device_owner', ''))\n # don't create port \"network:floating_ip\n if data['device_owner'] == \"network:floatingip\":\n return None\n data['host_name'] = str(context.get('binding:host_id', ''))\n if len(context.get('fixed_ips', [])) > 0:\n data['subnet_id'] = str(context['fixed_ips'][0].get('subnet_id', ''))\n data['ip_address'] = str(context['fixed_ips'][0].get('ip_address', ''))\n data['device_id'] = str(context.get('device_id', ''))\n data['mac'] = str(context.get('mac_address', ''))\n data['network_id'] = str(context.get('network_id', ''))\n data['admin_state_up'] = context.get('admin_state_up', '')\n data['port_id'] = port_id\n data['tenant_id'] = str(context.get('tenant_id', ''))\n\n context_str = json.dumps(data, sort_keys=True)\n data['md5sum'] = hashlib.md5(context_str).hexdigest()\n\n data['field_not_in_md5'] = ['md5sum']\n data['field_not_in_md5'].append('old_host_name')\n data['old_host_name'] = old_host_name\n\n if data['port_id'] == '':\n LOG.error(_('Get creating port information failed'))\n return None\n\n if port_id != '':\n port[port_id] = data\n return port", "def get_port(self, dst_ip, access_table):\r\n if access_table:\r\n if isinstance(access_table.values()[0], tuple):\r\n for key in access_table.keys():\r\n if dst_ip == access_table[key][0]: # Use the IP address only, not the MAC address. (hmc)\r\n dst_port = key[1]\r\n return dst_port\r\n return None", "def port2(self):\n return self._port2", "def platform_config_update(config):\n\n port_map = {}\n\n for (device, ports, socket_addr) in config[\"device_sockets\"]:\n for port in ports:\n port_map[(device, port)] = socket_addr\n\n # no default configuration for this platform\n\n config[\"port_map\"] = port_map", "def Port(self) -> int:", "def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)", "def get_switch_port_map(self,switch_name):\n\n # Now do a sort and return a map having port nos & connected devices\n myswitch_pmap = []\n self.sw_port_mapping[switch_name].sort()\n idx = 1\n for swname in self.sw_port_mapping[switch_name]:\n myswitch_pmap.append( (idx, swname) )\n idx = idx + 1\n return myswitch_pmap", "def parse_connection_string(self, constring):\r\n try:\r\n host, port, db = constring.split(\":\")\r\n port = port if host == \"unix\" else int(port)\r\n db = int(db)\r\n return host, port, db\r\n except (ValueError, TypeError):\r\n raise ImproperlyConfigured(\"Incorrect format '%s'\" % (constring))", "def read_all_ram_ports(self):\n return self.RAM_PORT", "def get_ports(self):\n return self._ports", "def get_port(self):\n \n return self._port", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def _get_nport(self):\n return self.__nport", "def port(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"port\")", "def port_configure(self,port,**config):\n if not port in self.ports:\n self.ports[port] = {}\n\n for k,v in config.items():\n self.ports[port][k] = v", "def _find_host_port(ports: Dict[str, Any], container_port: int) -> str:\n mappings = ports.get('{}/tcp'.format(container_port), [])\n for mapping in mappings:\n if mapping['HostIp'] == '0.0.0.0':\n return mapping['HostPort']\n else:\n raise ValueError(\n 'No HostPort found for ContainerPort={} (all port mappings: {})'\n .format(container_port, ports))", "def get_database_dsn():\n return getattr(config, f\"POSTGRES_DSN_{config.SERVER_MODE}\")", "def port(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"port\")", "def get_ports(svc_group, db):\n results = []\n for svc in svc_group:\n port = db.GetService(svc)\n results.append((svc, port))\n return results", "def list_port(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/ports.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server, while listing ports.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get port list Failed with status %s\"\n % response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Port List : %s \" % output)\n return output[\"ports\"]", "def get_available_portoffset(target=\"localhost\"):\n target_ip = socket.gethostbyname(target)\n for portoffset in range(10000, 61000, 1000):\n i = portoffset + 873\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((target_ip, i))\n sock.close()\n if result != 0:\n logger.debug(\"port open {0}\".format(portoffset))\n return portoffset\n return None", "def _get_port_number(\n device_dict: _DeviceDictType,\n port_mapping: Optional[Dict[str, int]] = None,\n parent_device_dict: Optional[_DeviceDictType] = None) -> Optional[int]:\n if not port_mapping: # not connected to a usb hub\n return None\n if is_cambrionix(device_dict):\n return None\n if parent_device_dict:\n index = _get_cambrionix_port_using_parent_hub(\n device_dict, parent_device_dict)\n else:\n # get the appropriate digits\n stripped_location_id = _rstrip_location_id(device_dict)\n index = '{}.{}'.format(stripped_location_id[-2], stripped_location_id[-1])\n port = port_mapping[index]\n return port", "def get_res_port():\n return get_port() + 1", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port(self) -> int:\n return pulumi.get(self, \"port\")", "def port1(self):\n return self._port1", "def internal_port(self):\r\n return self._internal_port", "def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('FLASK_DRIVER_PORT'))\n\t\texcept:\n\t\t\treturn 5000", "def parse_port_protocol(nmap_file):\r\n\r\n # use parse() to parse the xml file\r\n nmap_xml = ET.parse(nmap_file)\r\n # start with finding the root\r\n root = nmap_xml.getroot()\r\n # create empty dictionary dictionary_address_ip_and_ports\r\n dictionary_address_ip_and_ports = {}\r\n # find host inside the xml file first\r\n for host in root.iter(\"host\"):\r\n # then locate address inside the host\r\n for address in host.iter(\"address\"):\r\n # create empty dictionary dictionary_portid_and_protocol\r\n dictionary_portid_and_protocol = {}\r\n # finally locate the portid inside the host\r\n for ports in host.iter(\"port\"):\r\n # store portid and protocol into dictionary_portid_and_protocol\r\n dictionary_portid_and_protocol[ports.get(\"portid\")] = ports.get(\"protocol\")\r\n # store dictionary_portid_and_protocol into dictionary_address_ip_and_ports\r\n dictionary_address_ip_and_ports[address.get(\"addr\")] = dictionary_portid_and_protocol\r\n # return dictionary_store_ip_and_ports\r\n return dictionary_address_ip_and_ports", "def find_port(addr, user):\n home = pwd.getpwuid(os.getuid()).pw_dir\n for name in os.listdir('%s/.ssh/' % home):\n if name.startswith('unixpipe_%s@%s_' % (user, addr,)):\n return int(name.split('_')[2])", "def get_db_params(self):\n return self.get_section_config('db')", "def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_get_port(self)", "def _get_data(self):\n raw_data = self._get_raw_data()\n if not raw_data:\n return None\n result = {}\n for line in raw_data:\n if 'tcp' in line:\n parts = line.split()\n proto = parts[0]\n local_addr = parts[3]\n state = parts[5]\n ip, port = local_addr.rsplit(':', 1)\n port = str(port)\n result[port] = 1\n if state == 'LISTEN':\n if port not in self.charts['ports']:\n self.charts['ports'].add_dimension([port, port, 'absolute'])\n return result", "def port(self, **kw):\n return self.portType(**kw)", "def get_port_by_ip(cfg_facts, ipaddr):\n if ':' in ipaddr:\n iptype = \"ipv6\"\n else:\n iptype = \"ipv4\"\n\n intf = {}\n intf.update(cfg_facts.get('INTERFACE', {}))\n if \"PORTCHANNEL_INTERFACE\" in cfg_facts:\n intf.update(cfg_facts['PORTCHANNEL_INTERFACE'])\n for a_intf in intf:\n for addrs in intf[a_intf]:\n intf_ip = addrs.split('/')\n if iptype == 'ipv6' and ':' in intf_ip[0] and intf_ip[0].lower() == ipaddr.lower():\n return a_intf\n elif iptype == 'ipv4' and ':' not in intf_ip[0] and intf_ip[0] == ipaddr:\n return a_intf\n\n raise Exception(\"Dod not find port for IP %s\" % ipaddr)", "def port(name):\n\n words = name.upper().split('-', 1)\n\n if len(words) == 1:\n words.append(words[0][1])\n\n return int(f\"{ord(words[0][0])}{ord(words[1][0])}\")", "def get_port_number():\n try:\n return os.environ[\"PORT\"]\n except Exception:\n return None", "def get_ptf_port(duthosts, cfg_facts, tbinfo, dut, dut_port):\n\n # get the index of the frontend node to index into the tbinfo dictionary.\n mg_facts = dut.get_extended_minigraph_facts(tbinfo)\n\n if \"portchannel\" in dut_port.lower():\n pc_cfg = cfg_facts['PORTCHANNEL_MEMBER']\n pc_members = pc_cfg[dut_port]\n logger.info(\"Portchannel members %s: %s\", dut_port, list(pc_members.keys()))\n port_list = list(pc_members.keys())\n else:\n port_list = [dut_port]\n\n ret = []\n for port in port_list:\n ret.append(mg_facts['minigraph_ptf_indices'][port])\n\n return ret", "def get_device_system_ports(cfg_facts):\n\n sys_port_slot_dict = cfg_facts['SYSTEM_PORT']\n merge_dict = {}\n for slot in sys_port_slot_dict:\n for port in sys_port_slot_dict[slot]:\n merge_dict[slot + \"|\" + port] = sys_port_slot_dict[slot][port]\n return merge_dict", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")" ]
[ "0.6964796", "0.6683577", "0.653418", "0.65336806", "0.6416702", "0.628645", "0.6283446", "0.6275742", "0.6270378", "0.6266905", "0.62222916", "0.6177558", "0.60762554", "0.60760283", "0.597032", "0.59239864", "0.5915565", "0.59000075", "0.58872294", "0.5862604", "0.5836161", "0.58315146", "0.58273274", "0.58240616", "0.5812932", "0.5805191", "0.58002853", "0.578504", "0.5781893", "0.5761348", "0.5756276", "0.5742697", "0.5732783", "0.57184", "0.57117814", "0.5711716", "0.570991", "0.5696188", "0.56915236", "0.56689036", "0.56583995", "0.5653101", "0.5637652", "0.5634592", "0.56323385", "0.5629474", "0.56279945", "0.56245947", "0.5621757", "0.5599647", "0.5595443", "0.5595058", "0.55869603", "0.5579661", "0.5556104", "0.5544836", "0.5544667", "0.55425876", "0.55422086", "0.55422086", "0.55422086", "0.5539315", "0.5532436", "0.552895", "0.552895", "0.55227005", "0.5521736", "0.5521736", "0.55129", "0.550972", "0.5508424", "0.550545", "0.54943514", "0.5491643", "0.5482636", "0.5476685", "0.5472697", "0.54721785", "0.54721785", "0.54721785", "0.54721785", "0.54721785", "0.54574925", "0.54558665", "0.54471827", "0.54420626", "0.5436816", "0.5434814", "0.5429773", "0.5429309", "0.5421104", "0.54196864", "0.54191226", "0.54159033", "0.5414789", "0.5410652", "0.54064244", "0.54064244", "0.54064244", "0.54064244" ]
0.6833719
1
Add a class that doesn't descend from Pickleable to the pickle whitelist
def addClassToPickleWhitelist(cls): unpickleWhitelist_.add(cls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_class(self, module, name):\n raise pickle.UnpicklingError(\"global '%s.%s' is forbidden\" %\n (module, name))", "def test__pickle_unpickle(self):\n pass", "def __reduce__(self) -> NoReturn:\n raise TypeError(\n \"can't pickle {} objects\".format(self.__class__.__name__)\n )", "def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class", "def class_exts(cls):\n return set()", "def record_class_examined(self, cls):\n serialized = self.serialize_type(cls)\n if serialized is not None:\n self.classes_examined.add(serialized)", "def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result", "def __reduce_ex__(self, protocol):\n return (_safe_pickle_load, (self.__module__, self.__class__.__name__, self.name))", "def addBanClass(x:ResidueDict)->ResidueDict:\n banClass:str = run(matchStrandToClass(x.struct,x.strand_id))\n x.banClass = banClass\n return x", "def _record_unpatched_classes():\n # type: () -> Dict[str, type]\n installed_packages = _get_installed_modules()\n\n original_classes = {}\n\n for package, orig_path in CLASSES_TO_INSTRUMENT.items():\n if package in installed_packages:\n try:\n original_cls = _import_by_path(orig_path)\n except (AttributeError, ImportError):\n logger.debug(\"[OTel] Failed to import %s\", orig_path)\n continue\n\n original_classes[package] = original_cls\n\n return original_classes", "def try_pickle_dumps(obj):\n try:\n return cloudpickle.dumps(obj)\n except Exception:\n pass\n\n try:\n return pickle.dumps(obj)\n except Exception:\n raise", "def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))", "def add_managee(self, **saveable_classes):\n check_compliance(saveable_classes)\n for name in saveable_classes:\n if name in self.__dict__:\n logging.warning(\"Attribute of SavableCollection {} already \"\n \"exists, will be replaced\".format(name))\n\n self.__dict__.update(saveable_classes)", "def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)", "def opaque_class(self, classobj):\n self.restrict_class(classobj, None)", "def register_serializer(cls, *, serializer, deserializer):\n context = ray.worker.global_worker.get_serialization_context()\n context._register_cloudpickle_serializer(cls, serializer, deserializer)", "def add_base_classes(x, newclasses):\n bases = list(x.__class__.__bases__)\n if bases[0] is object:\n bases[0] = x.__class__\n if any(x in bases for x in newclasses):\n raise PermitError(\"Cannot insert duplicate classes.\")\n bases = bases + newclasses\n x.__class__ = type(x.__class__.__name__, tuple(bases), x.__dict__)\n return newclasses", "def load_objects(self):\n \n # Load classifier\n with open('../twitterClass/classifier/classifier.p','r') as f:\n self.classifier = cPickle.load(f)\n \n #Load blocked keywords\n regex_str2 = []\n with open('../twitterClass/twitterMiningClass/private/blocked_keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n print key\n regex_str2.append(key[1])\n # create regex compiler for blocked keyword search\n regex_str2 = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str2)\n self.blocked_keywords_re = re.compile(r'('+'|'.join(regex_str2)+')',re.IGNORECASE)\n \n # Load keywords\n with open('../twitterClass/twitterMiningClass/private/keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n self.keywords[key[0]] = key[1]\n # create regex compiler for keyword search\n regex_str = []\n for keys,pattern in self.keywords.iteritems():\n regex_str.append(pattern)\n regex_str = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str)\n self.keywords_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)", "def __init_subclass__(cls):\n available_storages.append({\n \"name\": cls.__name__,\n \"extensions\": cls.extensions,\n \"storage\": cls,\n })", "def restrict_class(self, classobj, vars=None):\n if vars == None: vars = []\n self.instance_vars[classobj] = vars", "def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class", "def __add__(self, other: '__class__') -> '__class__':", "def register_class(obj):\r\n try:\r\n KnownClass.objects.get(module_name=obj.__module__, class_name=obj.__class__.__name__)\r\n except DoesNotExist:\r\n # Create it\r\n KnownClass(module_name = obj.__module__, class_name = obj.__class__.__name__).save()", "def drop_class(self, cls, ignore_instances=False):\n if ignore_instances:\n self.client.command(\n 'DROP CLASS {} UNSAFE'.format(cls.registry_name))\n else:\n self.client.command(\n 'DROP CLASS {}'.format(cls.registry_name))", "def extension(klass):\n registry.register(klass)\n return klass", "def _register_subclasses(cls):\n cls._format_to_serializer = {}\n cls._extension_to_serializer = {}\n subclasses = collections.deque(cls.__subclasses__())\n while subclasses:\n subclass = subclasses.popleft()\n if subclass.format is not None:\n cls._format_to_serializer[subclass.format] = subclass\n if subclass.extension is not None:\n cls._extension_to_serializer[subclass.extension] = subclass\n subclasses.extend(subclass.__subclasses__())", "def register(cls):\n if not issubclass(cls, Fuzzer):\n raise TypeError(\"Expecting a Fuzzer, not '%s'\" % type(cls))\n _registered.append(cls)", "def register_classes():\n CoaddSplit.register_class()\n CoaddSplit_SG.register_class()", "def test_drop_class(self, excl, value):\n e = exclude(*excl)\n assert e(fields(C).a, value) is False", "def process_class_list(self, module, classes):", "def mix_in(self, object):\n object.__classes.__bases__ += (MolecularDescriptorMixIn,)\n # check to see if you're already there !!", "def set_class_list(self, L):\n\t\tself.class_list = L", "def enableWarningClass(clazz):\n _enabled.insert(0, (clazz, 1))", "def can(obj, file, protocol=2):\n if type(file) is str: f=open(file,'wb')\n else: f=file\n\n cPickle.dump(obj, f, protocol=protocol)\n\n if type(file) is str: f.close()", "def additional_cloning_checks(self):\n pass", "def __class__(self, ???):", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def transparent_serialize(cls):\n return _create_wrapper_cls(cls, store_init_parameters=False)", "def test_drop_class(self, incl, value):\n i = include(*incl)\n assert i(fields(C).a, value) is False", "def add_class(self, name):\n if name is not None and not self.has_class(name):\n self._cached_class.append(name)\n self._update_class()", "def test_pickle_save(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)", "def __getstate__(self):\n # construct a list of unpicklable entties and exclude them from pickling\n nope = ['_divisionClassifier', '_assembledObjects']\n d = dict((key, val) for key, val in self.__dict__.items() if key not in nope) # deepcopy needed\n return d", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def register_classes():\n CollectLimits.register_class()\n CollectLimits_SG.register_class()\n CollectStackedLimits_SG.register_class()", "def add_class(wire_version, cls, members):\n memid = 0\n\n sig = loxi_utils.class_signature(members)\n if cls in of_g.unified:\n uc = of_g.unified[cls]\n if wire_version in uc:\n debug(\"Error adding %s to unified. Wire ver %d exists\" %\n (cls, wire_version))\n sys.exit(1)\n uc[wire_version] = {}\n # Check for a matching signature\n for wver in uc:\n if type(wver) != type(0): continue\n if wver == wire_version: continue\n if not \"use_version\" in uc[wver]:\n if sig == loxi_utils.class_signature(uc[wver][\"members\"]):\n log(\"Matched %s, ver %d to ver %d\" % \n (cls, wire_version, wver))\n # have a match with existing version\n uc[wire_version][\"use_version\"] = wver\n # What else to do?\n return\n else: # Haven't seen this entry before\n log(\"Adding %s to unified list, ver %d\" % (cls, wire_version))\n of_g.unified[cls] = dict(union={})\n uc = of_g.unified[cls]\n\n # At this point, need to add members for this version\n uc[wire_version] = dict(members = members)\n\n # Per member processing:\n # Add to union list (I'm sure there's a better way)\n # Check if it's a list\n union = uc[\"union\"]\n if not cls in of_g.ordered_members:\n of_g.ordered_members[cls] = []\n for member in members:\n m_name = member[\"name\"]\n m_type = member[\"m_type\"]\n if m_name.find(\"pad\") == 0:\n continue\n if m_name in union:\n if not m_type == union[m_name][\"m_type\"]:\n debug(\"ERROR: CLASS: %s. VERSION %d. MEMBER: %s. TYPE: %s\" %\n (cls, wire_version, m_name, m_type))\n debug(\" Type conflict adding member to unified set.\")\n debug(\" Current union[%s]:\" % m_name)\n debug(union[m_name])\n sys.exit(1)\n else:\n union[m_name] = dict(m_type=m_type, memid=memid)\n memid += 1\n if not m_name in of_g.ordered_members[cls]:\n of_g.ordered_members[cls].append(m_name)", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def __init_subclass__(cls, **kwargs):\n\n super().__init_subclass__(**kwargs)\n if hasattr(cls, \"suspicion_func_num\"):\n cls.runnable_managers.append(cls)", "def __init__(self):\n self.classes = {}", "def register(cls, class_to_register):\n cls.registered_loaders.append(class_to_register)\n return class_to_register", "def insert_class(self, user_label: str) -> None:\n logger.debug(\"Adds \\\"{}\\\" to the user label class\", user_label)\n\n if self.cluster_k == -1:\n logger.debug(\"Clustering is disabled. Hence, just added to the list in set-semantic (current length: {})\",\n len(self.classes))\n final_label_tokens = self.convert_label(user_label=user_label)\n if final_label_tokens not in self.classes:\n self.classes.append(final_label_tokens)\n else:\n logger.debug(\"\\\"{}\\\" was already in the list!\", \" \".join(final_label_tokens))", "def _typechecked_class(cls):\n for name, func in cls.__dict__.items():\n if not name.startswith('__'):\n setattr(cls, name, _typechecked_func(func))\n return cls", "def dumpWithPreobjects(self, preObjects, *obj, **kw):\n\n dis = kw.get(\"dis\")\n try:\n toBeDumped = (preObjects, obj[0] if len(obj) == 1 else obj)\n\n # ensure that the pickler does not touch sys.modules more than required\n with PEP302ImportDetector(raiseOn=kw.get(\"raiseOn\")) as detector:\n sys_modules = dict(sys.modules)\n p = self.pickler.dumps(toBeDumped,\n mangleModuleName=kw.get(\"mangleModuleName\"),\n object_dispatch=kw.get(\"object_dispatch\"))\n sys_modules2 = dict(sys.modules)\n imports = set()\n for n in detector.imports:\n sys_modules2.pop(n, None)\n for i in self.IMPORTS_TO_IGNORE:\n if n.startswith(i):\n break\n else:\n imports.add(n)\n self.assertEqual(sys_modules, sys_modules2)\n self.assertEqual(imports, set())\n\n self.pickler.dis(p, out=StringIO())\n except:\n exinfo = sys.exc_info()\n l = []\n try:\n _sPickle.Pickler(l, 2, object_dispatch=kw.get(\"object_dispatch\")).dump(toBeDumped)\n except Exception:\n try:\n l.append(pickle.STOP)\n pickletools.dis(b\"\".join(l), out=sys.stderr)\n except:\n traceback.print_exc(limit=1, file=sys.stderr)\n raise exinfo[0], exinfo[1], exinfo[2]\n\n if dis is None:\n dis = self.dis\n if dis:\n self.pickler.dis(p)\n print(\"len(pickle): \", len(p))\n return p", "def register(cls, D: DONLOADER_CLASS) -> DONLOADER_CLASS:\r\n ...", "def import_and_add(self, import_str):\n # loaded_classes.clear()\n\n try:\n import_module(import_str)\n except ImportError as e:\n traceback.print_exc()\n logger.warning(\"Tried to import `%s` and failed, ignoring\", import_str)\n logger.warning(\"Error: %s\", e)\n # else:\n # for k in loaded_classes:\n # if k.__module__.startswith(\"dataclay\"):\n # # dataClay contrib classes should not be registered here\n # continue\n # else:\n # self.add_class(k)", "def load(cls, filepath):\n with open(filepath) as filehandler:\n classifier = pickle.load(filehandler)\n\n if not isinstance(classifier, Classifier):\n raise ValueError(\"Pickled object is not a Classifier\")\n\n return classifier", "def _register(registry, cls):\n assert issubclass(cls, Registrable)\n\n reg_attr = f\"_{cls.__name__}_registered\"\n if getattr(cls, reg_attr, False):\n return cls\n\n name = cls.__fieldtype__()\n assert (\n name not in registry\n ), f\"{cls!r} cannot be registered as {name!r}: already used by {registry[name]!r}\"\n\n registry[name] = cls\n setattr(cls, reg_attr, True)\n return cls", "def __post_init__(self) -> None:\n setattr(self, _FROZEN, True)", "def deregister_serializer(cls):\n context = ray.worker.global_worker.get_serialization_context()\n context._unregister_cloudpickle_reducer(cls)", "def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)", "def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls", "def pickle_nn(clf):\n\n filename = 'nnMLPClass'\n outfile = open(filename, 'wb')\n pickle.dump(clf, outfile)\n outfile.close()", "def save(self, fname):\n\n def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True\n\n with open(fname, 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "def test_copy_pickle(self):\n\n # Test that we can pickle and unpickle\n # We force a pattern that contains all custom types:\n # `Selector`, `NullSelector`, `SelectorTag`, `SelectorAttribute`,\n # `SelectorNth`, `SelectorLang`, `SelectorList`, and `Namespaces`\n p1 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}\n )\n sp1 = pickle.dumps(p1)\n pp1 = pickle.loads(sp1)\n self.assertTrue(pp1 == p1)\n\n # Test that we pull the same one from cache\n p2 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}\n )\n self.assertTrue(p1 is p2)\n\n # Test that we compile a new one when providing a different flags\n p3 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}, flags=0x10\n )\n self.assertTrue(p1 is not p3)\n self.assertTrue(p1 != p3)\n\n # Test that the copy is equivalent, but not same.\n p4 = copy.copy(p1)\n self.assertTrue(p4 is not p1)\n self.assertTrue(p4 == p1)\n\n p5 = copy.copy(p3)\n self.assertTrue(p5 is not p3)\n self.assertTrue(p5 == p3)\n self.assertTrue(p5 is not p4)", "def remove_base_class(x, cls):\n bases = list(x.__class__.__bases__)\n original_class = bases[0]\n other_classes = bases[1:]\n if cls in other_classes:\n other_classes.remove(cls)\n else:\n raise PermitError(\"Class {0} not in list of base classes {1}\".format(cls, bases))\n if len(other_classes) == 0:\n x.__class__ = original_class\n else:\n x.__class__ = type(x.__class__.__name__, tuple([original_class] + other_classes), x.__dict__)\n return cls", "def operation_pickling_check(instance, sim):\n pickling_check(instance)\n sim.operations += instance\n sim.run(0)\n pickling_check(instance)", "def pickle(obj):\n return pickletools.optimize(cPickle.dumps(obj))", "def _patch_remaining_classes(original_classes):\n # type: (Dict[str, type]) -> None\n # check which classes have actually been instrumented\n instrumented_classes = {}\n\n for package in list(original_classes.keys()):\n original_path = CLASSES_TO_INSTRUMENT[package]\n\n try:\n cls = _import_by_path(original_path)\n except (AttributeError, ImportError):\n logger.debug(\n \"[OTel] Failed to check if class has been instrumented: %s\",\n original_path,\n )\n del original_classes[package]\n continue\n\n if not cls.__module__.startswith(\"opentelemetry.\"):\n del original_classes[package]\n continue\n\n instrumented_classes[package] = cls\n\n if not instrumented_classes:\n return\n\n # replace occurrences of the original unpatched class in sys.modules\n for module_name, module in sys.modules.copy().items():\n if (\n module_name.startswith(\"sentry_sdk\")\n or module_name in sys.builtin_module_names\n ):\n continue\n\n for package, original_cls in original_classes.items():\n for var_name, var in vars(module).copy().items():\n if var == original_cls:\n logger.debug(\n \"[OTel] Additionally patching %s from %s\",\n original_cls,\n module_name,\n )\n\n setattr(module, var_name, instrumented_classes[package])", "def load(cls, file: str) -> 'Serializable':\n with open(file, \"rb\") as f:\n instance = dill.load(f)\n if not isinstance(instance, cls):\n raise TypeError(f\"Unpickled object is not of type {cls}\")\n return instance", "def __subclasshook__(self, ???):", "def iter_cls(*classes, blacklist=tuple()):\n for bases in permutations(classes):\n if bases not in blacklist:\n yield type('_'.join(c.__name__ for c in bases), bases, {})", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def __init__(self):\n self.instantiable = {self: self}\n self.is_generic = False", "def objects():\n subclasses = StorableObject.descendants()\n return {subclass.__name__: subclass for subclass in subclasses\n if not subclass.__module__.startswith(\n 'openpathsampling.experimental.storage'\n )}", "def extend_instance(obj, cls):\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(base_cls_name, (base_cls, cls), {})", "def test_dump_load_ignored():\n\n class Config(DumpableAttrs):\n xx = Ignored\n\n # Test dumping\n assert len(attr.fields(Config)) == 0\n cfg = Config()\n s = yaml.dump(cfg)\n assert (\n s\n == \"\"\"\\\n!Config {}\n\"\"\"\n )\n assert yaml.load(s) == cfg\n\n # Test loading\n s = \"\"\"\\\n!Config\nxx: 1\n\"\"\"\n assert yaml.load(s) == Config()", "def add_ignore_module(modules: List[Any]):\n global BUILTIN_LIKELY_MODULES\n for module in modules:\n if module not in BUILTIN_LIKELY_MODULES:\n BUILTIN_LIKELY_MODULES.append(module)", "def test_new_instance_every_time(self):\n registry = ClassRegistry(attr_name='element')\n registry.register(Wartortle)\n\n self.assertIsNot(registry['water'], registry['water'])", "def secure_class(cls): # type: ignore\n return cls", "def serialize_cls(cls):\n return _create_wrapper_cls(cls)", "def deploy_class( self, target, type_name, fti ):\n write = self.stream.write\n tptypes = getToolByName( target, 'portal_types', None )\n\n if tptypes is None:\n write( 'No portal_skins' )\n elif not tptypes.getTypeInfo( type_name ):\n tptypes.addType( type_name, fti[0] )\n write( 'Added type object for %s \\n' % type_name )\n else:\n write( 'Skipping type object for %s (already exists) \\n' % type_name )", "def _copy_obj(src, dest, skip_list = None):\n if type(dest) == bytes:\n # instantiate a new destination class of the specified type name?\n dest = new.classobj(dest, (), {})\n for x in dir(src):\n # skip special and private fields\n if x.startswith(\"__\") and x.endswith(\"__\"):\n continue\n # skip items in the skip list\n if skip_list and x in skip_list:\n continue\n t = getattr(src, x)\n # skip callable\n if callable(t):\n continue\n setattr(dest, x, t)\n return dest", "def pickle(self, pickleFile):\n if pickleFile.find('.gz') < 0:\n pickleFile += '.gz'\n with gzip.open(pickleFile, 'w') as f:\n pickle.dump(self, f)", "def whitelist(self, message):\n user = self.ts.get_user(message)\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n try:\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n holder_list = json.load(f)\n except json.decoder.JSONDecodeError:\n holder_list = []\n if msg_list[1] not in holder_list:\n holder_list.append(msg_list[1])\n with codecs.open('whitelist.json', 'w', 'utf-8') as f:\n json.dump(holder_list, f, ensure_ascii=False)\n self._add_to_whisper_queue(user, '{} has been added to the whitelist'.format(msg_list[1]))\n else:\n self._add_to_whisper_queue(user, '{} is already in the whitelist!'.format(msg_list[1]))", "def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def transparent_class(self, classobj):\n del self.instance_vars[classobj]", "def _unpickle_cached_list(cls, *args, **kwargs):\n new_list = cls(*args, **kwargs)\n new_list._unpack = True\n return new_list", "def whitelist_file(self, fkey):\n self.whitelist.update([fkey])", "def extend_class(cls):\n return lambda f: (setattr(cls, f.__name__, f) or f)", "def __init__(self, exclusion):\n self.exclusion = exclusion", "def register_classes():\n AnalyzeExtension.register_class()\n AnalyzeExtension_SG.register_class()", "def from_pickle(cls, picklefile):\n return cls(unpickle_file(picklefile))", "def identify_class(self, cls):", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def serialize(self, fout: t.BinaryIO) -> None:\n fout.write(pickle.dumps(self))", "def __init__(self, *args, **kwargs):\n self.whitelist = set(kwargs.pop('whitelist', []))\n self.blacklist = set(kwargs.pop('blacklist', []))\n\n super(MyTestRunner, self).__init__(*args, **kwargs)", "def load_classes(excel_data, slot_count):\n return True", "def set_structures(self, excluded=None):\n self.__excluded = excluded\n if not self.__structures: \n with open('structures.pkl', 'rb') as input:\n structures = pickle.load(input)\n if type(structures)==dict: self.__structures = structures\n else: self.__structures = structures.get_structures()\n for atoms in sorted(self.__structures.items()): atoms[1].exclude=False\n else: pass\n if self.__excluded != None: \n for exc in self.__excluded: self.__structures[exc].exclude = True", "def add_base_class(\n existing_object: Any,\n import_method: Callable[[Any], Any],\n export_method: Callable[[Any], Any],\n):\n existing_object.export_data = types.MethodType(export_method, existing_object)\n existing_object.import_data = types.MethodType(import_method, existing_object)", "def ProxyType(cls):\n PROXY_TYPES[cls.__namespace__] = cls\n for clsName in getattr(cls, '__known_classes__', ()):\n PROXY_TYPES[clsName] = cls\n return cls" ]
[ "0.5848995", "0.5714878", "0.5440713", "0.5331448", "0.53248143", "0.5274852", "0.5263136", "0.5262969", "0.5250038", "0.52005446", "0.5171152", "0.515845", "0.5151149", "0.51253116", "0.51061636", "0.51053673", "0.5073575", "0.5070191", "0.5066985", "0.5048813", "0.50263953", "0.50203323", "0.5012834", "0.5005724", "0.4989039", "0.49778858", "0.49598557", "0.4926569", "0.49202478", "0.49189088", "0.49086687", "0.489354", "0.48860595", "0.4875494", "0.48679614", "0.4857145", "0.48551154", "0.48497707", "0.48490286", "0.48445678", "0.48365763", "0.4829974", "0.4787054", "0.47771528", "0.4755461", "0.47474274", "0.47364247", "0.47351876", "0.47338584", "0.47328186", "0.473226", "0.47287926", "0.4727304", "0.47096038", "0.47095394", "0.4706112", "0.4703039", "0.470136", "0.47010314", "0.4698296", "0.46781763", "0.46730295", "0.4670554", "0.46678746", "0.46619073", "0.4657857", "0.4651515", "0.46502838", "0.46496323", "0.46478432", "0.46435827", "0.46307033", "0.46227902", "0.46224222", "0.46208486", "0.46187028", "0.4617985", "0.4613701", "0.46103182", "0.46074197", "0.46038345", "0.46030125", "0.4594158", "0.45920518", "0.4587662", "0.4586592", "0.45850003", "0.4578942", "0.4576605", "0.45727056", "0.45686892", "0.45517692", "0.45511878", "0.45398438", "0.4536688", "0.45338285", "0.45338234", "0.45281544", "0.45277676", "0.45268655" ]
0.8570413
0
Override the serializer to use 'override' as the identifier for instances of 'cls' This is primarily to shorted the amount of data in the representation and to allow the representation to remain constant even if classes are moving around or changing names. override may not be a tuple
def addOverride(cls, override): assert cls not in locationTypeOverrides_ assert not isinstance(override, tuple) locationTypeOverrides_[cls] = override locationTypes_[override] = cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(cls, *args, **kwargs):\n return serialize_cls(cls)(*args, **kwargs)", "def serialize_cls(cls):\n return _create_wrapper_cls(cls)", "def default_serializer(_cls: Type[Any], obj: Any) -> Any:", "def __call__(\n self, cls: Type, key: Optional[str] = None, override: bool = False, show_deprecation: bool = True\n ) -> Type:\n if key is None:\n key = cls.__name__\n elif not isinstance(key, str):\n raise TypeError(f\"`key` must be a str, found {key}\")\n\n if key not in self or override:\n self[key] = cls\n\n self._deprecation(show_deprecation)\n return cls", "def serialize(\n _cls: Optional[Type[T]] = None,\n rename_all: Optional[str] = None,\n reuse_instances_default: bool = False,\n convert_sets_default: bool = False,\n serializer: Optional[SerializeFunc] = None,\n tagging: Tagging = DefaultTagging,\n type_check: TypeCheck = NoCheck,\n serialize_class_var: bool = False,\n **kwargs: Any,\n) -> Type[T]:\n\n def wrap(cls: Type[T]) -> Type[T]:\n tagging.check()\n\n # If no `dataclass` found in the class, dataclassify it automatically.\n if not is_dataclass(cls):\n dataclass(cls)\n\n g: Dict[str, Any] = {}\n\n # Create a scope storage used by serde.\n # Each class should get own scope. Child classes can not share scope with parent class.\n # That's why we need the \"scope.cls is not cls\" check.\n scope: Optional[Scope] = getattr(cls, SERDE_SCOPE, None)\n if scope is None or scope.cls is not cls:\n scope = Scope(\n cls,\n reuse_instances_default=reuse_instances_default,\n convert_sets_default=convert_sets_default,\n )\n setattr(cls, SERDE_SCOPE, scope)\n\n # Set some globals for all generated functions\n g[\"cls\"] = cls\n g[\"copy\"] = copy\n g[\"serde_scope\"] = scope\n g[\"SerdeError\"] = SerdeError\n g[\"raise_unsupported_type\"] = raise_unsupported_type\n g[\"enum_value\"] = enum_value\n g[\"is_dataclass\"] = is_dataclass\n g[\"typename\"] = typename # used in union functions\n g[\"is_instance\"] = is_instance # used in union functions\n g[\"to_obj\"] = to_obj\n g[\"typing\"] = typing\n g[\"Literal\"] = Literal\n g[\"TypeCheck\"] = TypeCheck\n g[\"NoCheck\"] = NoCheck\n g[\"coerce\"] = coerce\n if serializer:\n g[\"serde_custom_class_serializer\"] = functools.partial(\n serde_custom_class_serializer, custom=serializer\n )\n\n # Collect types used in the generated code.\n for typ in iter_types(cls):\n # When we encounter a dataclass not marked with serialize, then also generate serialize\n # functions for it.\n if is_dataclass_without_se(typ):\n # We call serialize and not wrap to make sure that we will use the default serde\n # configuration for generating the serialization function.\n serialize(typ)\n\n if is_primitive(typ) and not is_enum(typ):\n continue\n g[typename(typ)] = typ\n\n # render all union functions\n for union in iter_unions(cls):\n union_args = list(type_args(union))\n union_key = union_func_name(UNION_SE_PREFIX, union_args)\n add_func(scope, union_key, render_union_func(cls, union_args, tagging), g)\n scope.union_se_args[union_key] = union_args\n\n for f in sefields(cls, serialize_class_var):\n if f.skip_if:\n g[f.skip_if.name] = f.skip_if\n if f.serializer:\n g[f.serializer.name] = f.serializer\n\n add_func(\n scope, TO_ITER, render_to_tuple(cls, serializer, type_check, serialize_class_var), g\n )\n add_func(\n scope,\n TO_DICT,\n render_to_dict(cls, rename_all, serializer, type_check, serialize_class_var),\n g,\n )\n add_func(scope, TYPE_CHECK, render_type_check(cls), g)\n\n logger.debug(f\"{typename(cls)}: {SERDE_SCOPE} {scope}\")\n\n return cls\n\n if _cls is None:\n return wrap # type: ignore\n\n if _cls in GENERATION_STACK:\n return _cls\n\n GENERATION_STACK.append(_cls)\n try:\n return wrap(_cls)\n finally:\n GENERATION_STACK.pop()", "def serializer_class(self):", "def _register_subclasses(cls):\n cls._format_to_serializer = {}\n cls._extension_to_serializer = {}\n subclasses = collections.deque(cls.__subclasses__())\n while subclasses:\n subclass = subclasses.popleft()\n if subclass.format is not None:\n cls._format_to_serializer[subclass.format] = subclass\n if subclass.extension is not None:\n cls._extension_to_serializer[subclass.extension] = subclass\n subclasses.extend(subclass.__subclasses__())", "def _make_serialize(\n cls_name: str,\n fields: Iterable[Union[str, Tuple[str, Type[Any]], Tuple[str, Type[Any], Any]]],\n *args: Any,\n rename_all: Optional[str] = None,\n reuse_instances_default: bool = False,\n convert_sets_default: bool = False,\n serializer: Optional[SerializeFunc] = None,\n tagging: Tagging = DefaultTagging,\n type_check: TypeCheck = NoCheck,\n serialize_class_var: bool = False,\n **kwargs: Any,\n) -> Type[Any]:\n C = dataclasses.make_dataclass(cls_name, fields, *args, **kwargs)\n C = serialize(\n C,\n rename_all=rename_all,\n reuse_instances_default=reuse_instances_default,\n convert_sets_default=convert_sets_default,\n serializer=serializer,\n tagging=tagging,\n type_check=type_check,\n serialize_class_var=serialize_class_var,\n **kwargs,\n )\n return C", "def get_serializer_class(self, *args, **kwargs):\n if self.action == 'list':\n return self.serializer_list_class\n else:\n return self.serializer_class", "def identify_class(self, cls):", "def __new__(cls, *args, **kwargs):\n return get_override_class(cls, cls._default_class)(*args, **kwargs)", "def transparent_serialize(cls):\n return _create_wrapper_cls(cls, store_init_parameters=False)", "def __call__(self, cls):\n cls_dict = dict(cls.__dict__)\n\n def wrap_str(w_self):\n return self.pformat(w_self)\n\n cls_dict['__repr__'] = wrap_str\n return type(cls.__name__, cls.__bases__ if hasattr(cls, \"__bases__\") else (), cls_dict)", "def get_serializer_class(self):\n pk_lookup, dataid_lookup = self.lookup_fields\n form_pk = self.kwargs.get(pk_lookup)\n dataid = self.kwargs.get(dataid_lookup)\n fmt = self.kwargs.get(\"format\", self.request.GET.get(\"format\"))\n sort = self.request.GET.get(\"sort\")\n fields = self.request.GET.get(\"fields\")\n if fmt == Attachment.OSM:\n serializer_class = OSMSerializer\n elif fmt == \"geojson\":\n serializer_class = GeoJsonSerializer\n elif fmt == \"xml\":\n serializer_class = DataInstanceXMLSerializer\n elif (\n form_pk is not None\n and dataid is None\n and form_pk != self.public_data_endpoint\n ):\n if sort or fields:\n serializer_class = JsonDataSerializer\n else:\n serializer_class = DataInstanceSerializer\n else:\n serializer_class = super().get_serializer_class()\n\n return serializer_class", "def type(cls):", "def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return SillaSerializer\n else:\n return SillaSerializer", "def wrap(cls, orig):\n # hack to give the timestamp this class' specialized methods\n orig.__class__ = cls\n return orig", "def dataclass(self, arg: SeField[Any]) -> str:\n if arg.flatten:\n flattened = []\n for f in sefields(arg.type, self.serialize_class_var):\n f.parent = arg # type: ignore\n flattened.append(self.render(f))\n return \", \".join(flattened)\n else:\n return (\n f\"{arg.varname}.{SERDE_SCOPE}.funcs['{self.func}']({arg.varname},\"\n \" reuse_instances=reuse_instances, convert_sets=convert_sets)\"\n )", "def register_serializer(cls, class_type, serializer):\n cls._serializers.update({class_type:serializer})", "def custom_field_serializer(self, arg: SeField[Any]) -> str:\n assert arg.serializer\n return f\"{arg.serializer.name}({arg.varname})\"", "def extend_instance(obj, cls):\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(base_cls_name, (base_cls, cls), {})", "def get_serializer_class(self):\n #overide function this is a fun that called to retrive the serailizer class\n #for perticular request\n #this fun are used for wanted to chang the serailzer class for the different action\n #that are available on the recip0e viewset\n if self.action == 'retrieve':\n print('okkkkkkkkkkkkw')\n return serializers.RecipeDetailSerializer\n elif self.action == 'upload_image':\n print('okkkkkkkkkkkkkkkkk')\n return serializers.RecipeImageSerailzer\n\n return self.serializer_class", "def __new__(cls, classname, bases, classdict):\n\n import re\n import keyword\n import inspect\n\n re_mangle = re.compile(r'[A-Za-z][a-z]+|[A-Z]+(?=$|[A-Z0-9])|\\d+')\n re_id = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')\n re_getdoc = re.compile(r'^\\s*[A-Za-z_][A-Za-z0-9_]*\\(self\\)')\n re_setdoc = re.compile(r'^\\s*[A-Za-z_][A-Za-z0-9_]*\\(self,[^,)]+\\)')\n\n #convert ACapitalCasedStringXYZ123 -> a_capital_cased_string_xyz_123\n mangle_name = lambda x: ('_'.join(re_mangle.findall(x))).lower()\n\n get_methods = set()\n set_methods = set()\n swig_setter = classdict.get('__swig_setmethods__', {})\n\n allowed_methods = [\n 'Annotation', \n 'AnnotationString', \n 'Notes', \n 'NotesString', \n ]\n\n #only search for get/set methods\n #we assume that any unset method also has either get or set\n #a small number of set without get cases exist so we can't only\n #search for get methods\n for k, v in classdict.items():\n name = k[3:]\n prefix = k[:3]\n mangled = mangle_name(name)\n if name:\n if callable(v):\n if re_id.match(mangled) and mangled not in keyword.kwlist:\n if prefix == 'get':\n get_methods.add(name)\n elif prefix == 'set':\n set_methods.add(name)\n\n for name in get_methods | set_methods:\n\n mangled = mangle_name(name)\n\n #ListOfFoobars -> foobars\n if mangled.startswith('list_of_'):\n mangled = mangled[8:]\n\n getter = setter = deleter = None\n if name in get_methods:\n getter = classdict['get'+name]\n \n #this is a very dirty way of checking if the get method\n #requires extra arguments (and hence cannot be a property)\n #it should be possible to do this properly in SWIG?\n try:\n argspec = inspect.getargspec(getter)\n numargs = len(argspec.args)\n if numargs > 1 or (numargs == 1 and argspec.args[0] != 'self') \\\n or (argspec.varargs!=None and name not in allowed_methods and not name.startswith('ListOf') ):\n continue\n except:\n continue\n\n #use the c-level get function if the python function\n #only consists of a call to it\n cname = classname + '_get' + name\n #test if function is \"return _libsbml.CLASS_getNAME(__args__)\"\n try:\n if getter.func_code.co_names == ('_libsbml', cname):\n getter = getattr(_libsbml, cname)\n except:\n if getter.__code__.co_names == ('_libsbml', cname):\n getter = getattr(_libsbml, cname)\n \n if name in set_methods:\n setter = classdict['set'+name]\n try:\n argspec = inspect.getargspec(getter)\n numargs = len(argspec.args)\n if numargs > 1 and argspec.args[0] == 'self':\n cname = classname + '_set' + name\n try:\n if setter.func_code.co_names == ('_libsbml', cname):\n setter = getattr(_libsbml, cname)\n except:\n if setter.__code__.co_names == ('_libsbml', cname):\n setter = getattr(_libsbml, cname)\n \n #property fget does not get intercepted by __getattr__\n #but fset does, so we implement property setting via\n #the __swig_setmethods__ dict\n swig_setter[mangled] = setter\n continue\n except:\n pass\n \n if 'unset' + name in classdict:\n deleter = classdict['unset'+name]\n\n try:\n argspec = inspect.getargspec(getter)\n numargs = len(argspec.args)\n if numargs == 1 and argspec.args[0] == 'self' and \\\n (argspec.varargs==None or name in allowed_methods):\n cname = classname + '_unset' + name\n try:\n if deleter.func_code.co_names == ('_libsbml', cname):\n deleter = getattr(_libsbml, cname) \n except:\n if deleter.__code__.co_names == ('_libsbml', cname):\n deleter = getattr(_libsbml, cname) \n except:\n pass\n\n if getter or setter or deleter:\n #fset is technically redundant since the method is dispatched\n #via _swig_setattr rather than through the property due to that\n #function not delegating to object.__setattr__ which properly\n #handles properties\n classdict[mangled] = property(fget=getter, fset=setter, fdel=deleter)\n\n def __repr__(self):\n desc = self.__class__.__name__\n if hasattr(self, '__len__'):\n desc += '[%s]' % self.__len__()\n if hasattr(self, 'id') and self.id:\n desc += ' %s' % self.id\n if hasattr(self, 'name') and self.name:\n desc += ' \"%s\"' % self.name\n return '<' + desc + '>'\n \n if classdict.get('__repr__', None) in (_swig_repr, None):\n classdict['__repr__'] = __repr__\n\n\n return type.__new__(cls, classname, bases, classdict)", "def format_yaml_tag(subcls):\n return f'!{subcls.__name__}Class'", "def __new__(metacls, name, bases, classdict):\n # classdict is not always a dict wtf\n if not isinstance(classdict, dict):\n classdict = dict(classdict)\n\n for (key, value) in iteritems(classdict):\n if isinstance(value, TraitType):\n value.name = key\n elif inspect.isclass(value):\n if issubclass(value, TraitType):\n value_inst = value()\n value_inst.name = key\n classdict[key] = value_inst\n\n return super(MetaModel, metacls).__new__(metacls, name, bases, classdict)", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return self.serializer_classes.get('retrieve')\n elif self.action == 'create':\n return self.serializer_classes.get('create')\n elif self.action == 'update':\n return self.serializer_classes.get('update')\n else:\n return self.serializer_classes.get('default')", "def choose_class(self, *args, **kwargs):", "def get_serializer(self, *args, **kwargs):\n kwargs['part_detail'] = True\n kwargs['location_detail'] = True\n kwargs['supplier_part_detail'] = True\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def __init_subclass__(cls, *args, **kwargs):\n super().__init_subclass__(*args, **kwargs)\n if cls._for_code:\n METHODS_MAP_CODE[cls._identifier] = cls\n else:\n METHODS_MAP_DATA[cls._identifier] = cls", "def _cls_repr(self):\n # XXX For now we do not expect any pure non-specialized\n # collection , thus just override in derived classes\n raise NotImplementedError, \"Class %s should override _cls_repr\" \\\n % self.__class__.__name__", "def get_serializer_class(self):\n if self.action == 'create':\n return self.serializer_classes.get('create')\n else:\n return self.serializer_classes.get('default')", "def serialize(self):\n raise NotImplementedError(\"Abstract class, implemented in sub class\")", "def __class__(self, ???):", "def get_serializer(self, *args, **kwargs):\n if self.__class__.serializer_class is not None:\n cls = self.__class__.serializer_class\n else:\n if self.action == 'list' and hasattr(self.__class__,\n 'list_serializer_class'):\n cls = self.__class__.list_serializer_class\n elif hasattr(self.__class__, 'detail_serializer_class'):\n cls = self.__class__.detail_serializer_class\n else:\n # error handling\n return super().get_serializer(*args, **kwargs)\n\n # default the context\n kwargs['context'] = self.get_serializer_context()\n\n return cls(*args, **kwargs)", "def overload_classmethod(typ, attr, **kwargs):\n return _overload_method_common(types.TypeRef(typ), attr, **kwargs)", "def get_serializer_class(self):\n return self.serializer_class", "def get_serializer_class(self):\n if self.action in ('retrieve', 'list', 'update', 'partial_update'):\n return ListaPedidoSerializer\n return PedidoSerializer", "def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)", "def register_serializer(cls, *, serializer, deserializer):\n context = ray.worker.global_worker.get_serialization_context()\n context._register_cloudpickle_serializer(cls, serializer, deserializer)", "def get_serializer_class(self):\n if self.action in (\"list\",):\n return serializers.NotesGroupListSerializer\n\n return serializers.NotesGroupDetailSerializer", "def cls_identifier(cls):\n if hasattr(cls, 'get_cls_identifier'):\n return cls.get_cls_identifier()\n return Encoder.default_identifier(cls)", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.ProductDetailSerializer\n\n return self.serializer_class", "def __getattribute__(self,attr):\n if attr in super(BaseTransformer,self).__getattribute__('_overrides'):\n return super(BaseTransformer,self).__getattribute__('_'+attr)\n return super(BaseTransformer,self).__getattribute__(attr)", "def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return UserReadSerializer\n else:\n return UserSerializer", "def get_serializer_class(self, *args, **kwargs):\n\n if self.request.method == 'GET':\n serializer_class = FavoriteModelSerializer\n\n elif self.request.method == 'POST':\n serializer_class = FavoriteCreateSerializer\n\n elif self.action == 'destroy':\n serializer_class = FavoriteDestorySerializer\n\n elif self.action == 'destroy_all':\n serializer_class = FavoriteDestroyAllSerializer\n\n return serializer_class", "def serialize_instance(obj):\n d = {'__classname__': type(obj).__name__}\n d.update(vars(obj))\n return d", "def get_serializer_class(self):\n\n if self.action in ['list', 'retrieve']:\n return OrderListSerializer\n else:\n return OrderSerializer", "def __new__(cls, *args, **kwargs):\n # create the instance by calling the base class __new__\n obj = cls.__base__.__new__(cls)\n # using super() did not work here -- why??\n # set the instance attributes to defaults\n for attr, typ in cls._attrs_to_save.items():\n setattr(obj, attr, typ.default)\n return obj", "def withOverrides(overrides):", "def decorate_class(cls, klass: type, decorate_subclasses=False, **setting_kwds) -> None:\n assert isinstance(klass, type) # in \"debug\" mode only\n if not isinstance(klass, type): # in either mode, have the same awareness at the same time\n return\n\n # Filter out builtins.\n if not get_file_of_object(klass):\n return\n\n def _deco_class(kls: type):\n t = cls(**setting_kwds)\n _ = t(kls)\n # assert _ == kls\n\n def _deco_class_rec(kls: type):\n _deco_class(kls)\n for subclass in kls.__subclasses__():\n _deco_class_rec(subclass)\n\n if decorate_subclasses:\n _deco_class_rec(klass)\n else:\n _deco_class(klass)\n # (_deco_class_rec if decorate_subclasses else _deco_class)(klass)", "def serialize_to_python(cls, value):\n cls = type(value)\n cls_name = cls.__name__\n mod_name = cls.__module__\n\n if mod_name.startswith('django.db.models'):\n cls_path = 'models.%s' % cls_name\n else:\n cls_path = '%s.%s' % (mod_name, cls_name)\n\n return '%s.%s' % (cls_path, value._name_)", "def __new__(mcs, name, bases, attrs, **kwargs):\r\n attrs['__fields__'] = set()\r\n attrs['__store_attrs__'] = set()\r\n return super().__new__(mcs, name, bases, attrs, **kwargs)", "def dataclassRepr(obj) -> str:\n attrs = dataclassNonDefaults(obj)\n clsName = obj.__class__.__qualname__\n kwargs = ', '.join(f'{k}={v!r}' for k, v in attrs.items())\n return f'{clsName}({kwargs})'", "def visit_class_def(self, node: ClassDef) -> None:\n self.strip_type_info(node.info)\n node.base_type_exprs.extend(node.removed_base_type_exprs)\n node.removed_base_type_exprs = []\n with self.enter_class(node.info):\n super().visit_class_def(node)", "def override(self, default: Optional[str] = None) -> Optional[str]:\n return self.type_override if self.type_override else default", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.AccountDetailSerializer\n\n return self.serializer_class", "def record_cls(self):\n return self._ELE_CLS", "def get_serializer_class(self, *args, **kwargs):\n\n if self.request.method in ['GET', 'POST']:\n serializer_class = SearchSerialzer\n\n elif self.action == 'destroy':\n serializer_class = SearchNotRequiredSerializer\n\n elif self.action == 'destroy_all':\n serializer_class = SearchDeleteAllSerializer\n\n return serializer_class", "def get_serializer_class(self):\n return {\"create\": ReportFileSerializer, }.get(self.action, ReportFileSerializer)", "def __init__(self, record_cls, **kwargs):\n self._ELE_CLS = record_cls\n super(DataRecords, self).__init__(**kwargs)", "def render(self, arg: SeField[Any]) -> str:\n if arg.serializer and arg.serializer.inner is not default_serializer:\n res = self.custom_field_serializer(arg)\n elif is_dataclass(arg.type):\n res = self.dataclass(arg)\n elif is_opt(arg.type):\n res = self.opt(arg)\n elif is_list(arg.type):\n res = self.list(arg)\n elif is_set(arg.type):\n res = self.set(arg)\n elif is_dict(arg.type):\n res = self.dict(arg)\n elif is_tuple(arg.type):\n res = self.tuple(arg)\n elif is_enum(arg.type):\n res = self.enum(arg)\n elif is_numpy_datetime(arg.type):\n res = serialize_numpy_datetime(arg)\n elif is_numpy_scalar(arg.type):\n res = serialize_numpy_scalar(arg)\n elif is_numpy_array(arg.type):\n res = serialize_numpy_array(arg)\n elif is_primitive(arg.type):\n res = self.primitive(arg)\n elif is_union(arg.type):\n res = self.union_func(arg)\n elif is_str_serializable(arg.type):\n res = f\"{arg.varname} if reuse_instances else {self.string(arg)}\"\n elif is_datetime(arg.type):\n res = f\"{arg.varname} if reuse_instances else {arg.varname}.isoformat()\"\n elif is_none(arg.type):\n res = \"None\"\n elif is_any(arg.type) or isinstance(arg.type, TypeVar):\n res = f\"to_obj({arg.varname}, True, False, False, c=typing.Any)\"\n elif is_generic(arg.type):\n origin = get_origin(arg.type)\n assert origin\n arg.type = origin\n res = self.render(arg)\n elif is_literal(arg.type):\n res = self.literal(arg)\n elif is_class_var(arg.type):\n arg.type = type_args(arg.type)[0]\n res = self.render(arg)\n else:\n res = f\"raise_unsupported_type({arg.varname})\"\n\n # Custom field serializer overrides custom class serializer.\n if self.custom and not arg.serializer:\n return (\n \"serde_custom_class_serializer(\"\n f\"{typename(arg.type)}, \"\n f\"{arg.varname}, \"\n f\"default=lambda: {res})\"\n )\n else:\n return res", "def write_protected(cls, **kwargs: Any) -> \"DataSchema[ObjType]\":\n return super().write_protected(**kwargs) # type: ignore", "def get_serializer_class(self):\n return self.serializer_classes.get(self.action,\n self.default_serializer_class)", "def get_serializer_class(self):\n assert self.serializer_class is not None, (\n \"'%s' should either include a `serializer_class` attribute, \"\n \"or override the `get_serializer_class()` method.\"\n % self.__class__.__name__\n )\n\n return self.serializer_class", "def general_serializer(instance):\n # get the serializer for this model\n serializer_class = eval(instance.target_model.__name__ + 'Serializer')\n # use this for create, update and retrieve, since we only need special serialization to display less in list and\n # to communicate with python\n if instance.action in ['retrieve', 'create', 'update']:\n # if it's the detail view, just return the standard serializer\n return serializer_class\n elif instance.action == 'from_python':\n # copy the declared fields from the detail serializer\n PythonSerializer._declared_fields = serializer_class._declared_fields.copy()\n # also the fields\n PythonSerializer.Meta.fields = serializer_class.Meta.fields.copy()\n # get fields\n model_fields = instance.target_model._meta.get_fields()\n # copy the extra_kwargs\n PythonSerializer.Meta.extra_kwargs = serializer_class.Meta.extra_kwargs.copy()\n # and the model\n PythonSerializer.Meta.model = instance.target_model\n # turn the relations into text fields, except the m2m field since the automatic serialization works better\n for fields in model_fields:\n if fields.is_relation:\n\n if (not isinstance(fields, models.ManyToManyField)) and \\\n (not isinstance(fields, models.ManyToManyRel)):\n PythonSerializer._declared_fields[fields.name] = serializers.StringRelatedField()\n else:\n PythonSerializer._declared_fields[fields.name] = serializers.StringRelatedField(many=True)\n\n return PythonSerializer\n else: # if not, modify it to remove unnecessary fields from the list view\n\n # copy the attributes to the generalized serializer\n GeneralSerializer._declared_fields = serializer_class._declared_fields.copy()\n GeneralSerializer.Meta.fields = serializer_class.Meta.fields.copy()\n GeneralSerializer.Meta.extra_kwargs = serializer_class.Meta.extra_kwargs.copy()\n GeneralSerializer.Meta.model = instance.target_model\n # allocate a list of the fields to remove from the list view\n remove_fields = []\n # for all the fields in the serializer\n for fields in GeneralSerializer.Meta.fields:\n\n # remove the id field (since it's not in declared_fields)\n if fields in ['id', 'slug']:\n # eliminate the field from the serializer\n remove_fields.append(fields)\n continue\n if instance.target_model.__name__ != 'Mouse' and fields == 'mouse':\n # remove the current mouse extra_kwargs so it displays\n del GeneralSerializer.Meta.extra_kwargs[fields]\n continue\n # remove the fields that have been assigned as read only\n if (fields in serializer_class._declared_fields.keys()) and \\\n (('read_only=True' in str(GeneralSerializer._declared_fields[fields])) or\n ('ReadOnly' in str(GeneralSerializer._declared_fields[fields]))):\n\n # eliminate the field from the serializer\n remove_fields.append(fields)\n # remove the field from declared fields\n del GeneralSerializer._declared_fields[fields]\n continue\n\n GeneralSerializer.Meta.extra_kwargs[fields] = {'write_only': True}\n\n # remove the read only fields\n GeneralSerializer.Meta.fields = [el for el in GeneralSerializer.Meta.fields if el not in remove_fields]\n # overwrite url kwargs, since it is set by default to read only\n GeneralSerializer.Meta.extra_kwargs['url'] = {'lookup_field': instance.lookup_field}\n # put the mouse entry at the top\n if 'mouse' in GeneralSerializer.Meta.fields:\n GeneralSerializer.Meta.fields.remove('mouse')\n GeneralSerializer.Meta.fields = ['mouse'] + GeneralSerializer.Meta.fields\n return GeneralSerializer", "def update_derived_class_records():\n derive_class_hierarchy()", "def serializable(\n attrs: Optional[List[str]] = None,\n without: Optional[List[str]] = None,\n inherit: Optional[bool] = True,\n inheritable: Optional[bool] = True,\n) -> Callable[[T], T]:\n\n def rs_decorator(cls: T) -> T:\n recursive_serde_register(\n cls,\n serialize_attrs=attrs,\n exclude_attrs=without,\n inherit_attrs=inherit,\n inheritable_attrs=inheritable,\n )\n return cls\n\n return rs_decorator", "def get_deserialization_instance(cls):\n if cls.__orig__ is None:\n return cls()\n else:\n return cls.__orig__()", "def __new__(cls, name: str, bases: tuple, dct: Dict) -> Any:\n\n members = dct.get(f\"_{name}__keys\")\n if members:\n dct[\"members\"] = {key: key for key in members}\n else:\n dct[\"members\"] = {}\n new_cls = type.__new__(cls, name, bases, dct)\n return new_cls", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.OperationDetailSerializer\n\n return self.serializer_class", "def get_serializer_class(self):\n try:\n return self.serializer_action_classes[self.action]\n except (KeyError, AttributeError):\n return super(\n MultiSerializerViewSetMixin, self).get_serializer_class()", "def serialize_to_python(cls, value):\n cls_path, args, kwargs = cls._deconstruct_object(value)\n module_path, cls_name = cls_path.rsplit('.', 1)\n\n if cls_path.startswith('django.db.models'):\n cls_name = 'models.%s' % cls_name\n\n all_args = []\n\n if args:\n all_args += [\n serialize_to_python(_arg)\n for _arg in args\n ]\n\n if kwargs:\n all_args += [\n '%s=%s' % (_key, serialize_to_python(_value))\n for _key, _value in sorted(six.iteritems(kwargs),\n key=lambda pair: pair[0])\n ]\n\n return '%s(%s)' % (cls_name, ', '.join(all_args))", "def uidfy(self, cls):\n return \"{0}.{1}\".format(cls.__module__, cls.__name__)", "def uidfy(self, cls):\n return \"{0}.{1}\".format(cls.__module__, cls.__name__)", "def get_serialization_instance(cls, value):\n\n # if the instance is a list, convert it to a cls instance.\n # this is only useful when deserializing method arguments for a client\n # request which is the only time when the member order is not arbitrary\n # (as the members are declared and passed around as sequences of\n # arguments, unlike dictionaries in a regular class definition).\n if isinstance(value, list) or isinstance(value, tuple):\n assert len(value) <= len(cls._type_info)\n\n cls_orig = cls\n if cls.__orig__ is not None:\n cls_orig = cls.__orig__\n inst = cls_orig()\n\n keys = cls._type_info.keys()\n for i in range(len(value)):\n setattr(inst, keys[i], value[i])\n\n elif isinstance(value, dict):\n inst = cls()\n\n for k in cls._type_info:\n setattr(inst, k, value.get(k, None))\n\n else:\n inst = value\n\n return inst", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.BookDetailSerializer\n elif self.action == 'upload_image':\n return serializers.BookImageSerializer\n\n return self.serializer_class", "def json_save(cls):\n # make sure this is decorating a class object\n if type(cls) is not type:\n raise TypeError(\"json_save can only be used on classes\")\n\n # find the saveable attributes\n # these will the attributes that get saved and reconstructed from json.\n # each class object gets its own dict\n attr_dict = vars(cls)\n cls._attrs_to_save = {}\n for key, attr in attr_dict.items():\n if isinstance(attr, Saveable):\n cls._attrs_to_save[key] = attr\n if not cls._attrs_to_save:\n raise TypeError(f\"{cls.__name__} class has no saveable attributes.\\n\"\n \" Note that Savable attributes must be instances\")\n # register this class so we can re-construct instances.\n Saveable.ALL_SAVEABLES[cls.__qualname__] = cls\n\n # add the methods:\n cls.__new__ = __new__\n cls.to_json_compat = _to_json_compat\n cls.__eq__ = __eq__\n cls.from_json_dict = _from_json_dict\n cls.to_json = _to_json\n\n return cls", "def getSerializer():", "def test_inherited_class_override():\n class TestClass10(object):\n arg1 = None # type: int\n\n class TestClass11(TestClass10):\n arg1 = None # type: str\n\n assert get_type_hints(TestClass11) == {\n 'arg1': str\n }", "def _create_class_proxy(cls, theclass):\n\n def make_method(name):\n def method(self, *args, **kw):\n if not object.__getattribute__(self, \"_track_on\")[0]:\n return getattr(\n object.__getattribute__(self, \"_obj\"), name)(*args,\n **kw)\n object.__getattribute__(self, \"_track_on\")[0] = False\n args_value = copy_and_placehold_data(args,\n object.__getattribute__(\n self, \"_track_on\"))\n args_value_copy = copy_call_data(args_value)\n kwargs_value = copy_and_placehold_data(kw,\n object.__getattribute__(\n self, \"_track_on\"))\n kwargs_value_copy = copy_call_data(kwargs_value)\n output = getattr(object.__getattribute__(self, \"_obj\"),\n name)(*args_value, **kwargs_value)\n output_value = copy_and_placehold_data(output,\n object.__getattribute__(\n self, \"_track_on\"))\n output_value_copy = copy_call_data(output_value)\n object.__getattribute__(self, \"_special_data\").append(\n SPECIAL_ATTR_DATA(name, args_value_copy, kwargs_value_copy,\n output_value_copy))\n object.__getattribute__(self, \"_track_on\")[0] = True\n return output_value\n\n return method\n\n namespace = {}\n for name in cls._special_names:\n if hasattr(theclass, name):\n namespace[name] = make_method(name)\n return type(\"%s(%s)\" % (cls.__name__, theclass.__name__), (cls, ),\n namespace)", "def __init_subclass__(cls) -> None:\n super().__init_subclass__()\n dataclass(cls)", "def find_dump(cls):\n cls_attrs = dir(cls)\n if \"to_json\" in cls_attrs:\n return cls.to_json\n if \"json\" in cls_attrs:\n return lambda o: o.json\n if is_dataclass(cls):\n return asdict\n raise ValueError(f\"Cannot find a dumper method for {cls}\")", "def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)", "def get_serializer_class(self):\n if self.action == 'login':\n return UserLoginSerializer\n if self.action == 'signup':\n return UserSignUpSerializer\n if self.action == 'remember_code':\n return RememberCodeSerializer\n return UserModelSerializer", "def serializer(*args, **kwargs):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available serializers.\n\n :param type cls: serializer class.\n\n :returns: serializer class.\n :rtype: type\n \"\"\"\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls\n\n return decorator", "def _serialize(\n self, value: typing.Any, attr: str | None, obj: typing.Any, **kwargs\n ):\n return value", "def serialize(self, obj):\n pass", "def create_default(cls):\n raise NotImplementedError(common.OVERRIDE_MESSAGE)", "def parameterize_class_name(cls: Type, idx: int, input_dicts: Mapping[Any, Any]):\n suffix = \"_\".join(f\"{k}_{v}\" for k, v in input_dicts.items())\n return f\"{cls.__name__}_{suffix}\"", "def __init__(\n self, base: BaseModel, override: Dict[str, Any], static: Optional[Dict[str, Any]] = None\n ):\n self.__base = base\n self.__override = override\n self.__static = static", "def default(self, obj):\n if dataclasses.is_dataclass(obj):\n return {\n **dict(__dataclass__=obj.__class__.__name__),\n **{field.name: self.default(getattr(obj, field.name)) for field in dataclasses.fields(obj)},\n }\n elif type(obj) in JSON_TYPES:\n return obj\n super(ExtendedJsonEncoder, self).default(obj)", "def class_casting(obj: object, cls: type):\n orig_cls = obj.__class__\n obj.__class__ = cls\n yield\n obj.__class__ = orig_cls", "def get_serializer_class(self):\n return self.serializers.get(self.action,\n self.serializers['default'])", "def get_serializer_class(self):\n serializer_map = {\n \"witness\": WitnessServiceSerializer,\n \"review\": ReviewServiceSerializer,\n \"certificate_provider\": LPACertificateServiceSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"service_type\", \"witness\")]", "def _decorate(cls):\n global_validators = [session_required, catch_typeerror]\n # Cheat methods _hosts_name_label\n # -------------\n # Methods that have a trivial implementation for all classes.\n # 1. get_by_uuid == getting by ref, so just return uuid for\n # all get_by_uuid() methods.\n \n for api_cls in classes.keys():\n # We'll let the autoplug classes implement these functions\n # themselves - its much cleaner to do it in the base class\n \n get_by_uuid = '%s_get_by_uuid' % api_cls\n get_uuid = '%s_get_uuid' % api_cls\n get_all_records = '%s_get_all_records' % api_cls \n\n def _get_by_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def _get_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def unpack(v):\n return v.get('Value')\n\n def _get_all_records(_api_cls):\n return lambda s, session: \\\n xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\\\n for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))\n\n setattr(cls, get_by_uuid, _get_by_uuid)\n setattr(cls, get_uuid, _get_uuid)\n setattr(cls, get_all_records, _get_all_records(api_cls))\n\n # Autoplugging classes\n # --------------------\n # These have all of their methods grabbed out from the implementation\n # class, and wrapped up to be compatible with the Xen-API.\n\n# def getter(ref, type):\n# return XendAPIStore.get(ref, type)\n\n def wrap_method(name, new_f):\n try:\n f = getattr(cls, name)\n wrapped_f = (lambda * args: new_f(f, *args))\n wrapped_f.api = f.api\n wrapped_f.async = f.async\n setattr(cls, name, wrapped_f)\n except AttributeError:\n # Logged below (API call: %s not found)\n pass\n\n\n def setter_event_wrapper(api_cls, attr_name):\n setter_name = '%s_set_%s' % (api_cls, attr_name)\n wrap_method(\n setter_name,\n lambda setter, s, session, ref, *args:\n _setter_event_dispatch(s, setter, api_cls, attr_name,\n session, ref, args))\n\n\n def ctor_event_wrapper(api_cls):\n ctor_name = '%s_create' % api_cls\n wrap_method(\n ctor_name,\n lambda ctor, s, session, *args:\n _ctor_event_dispatch(s, ctor, api_cls, session, args))\n\n\n def dtor_event_wrapper(api_cls):\n dtor_name = '%s_destroy' % api_cls\n wrap_method(\n dtor_name,\n lambda dtor, s, session, ref, *args:\n _dtor_event_dispatch(s, dtor, api_cls, session, ref, args))\n\n\n # Wrapping validators around XMLRPC calls\n # ---------------------------------------\n for api_cls, validator in classes.items():\n def doit(n, takes_instance, async_support=False,\n return_type=None):\n n_ = n.replace('.', '_')\n try:\n f = getattr(cls, n_)\n if n not in argcounts:\n argcounts[n] = f.func_code.co_argcount - 1\n \n validators = takes_instance and validator and \\\n [validator] or []\n \n validators += global_validators\n for v in validators:\n f = v(f)\n f.api = n\n f.async = async_support\n if return_type:\n f.return_type = return_type\n \n setattr(cls, n_, f)\n except AttributeError:\n log.warn(\"API call: %s not found\" % n)\n\n \n ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \\\n + cls.Base_attr_ro\n rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \\\n + cls.Base_attr_rw\n methods = getattr(cls, '%s_methods' % api_cls, []) \\\n + cls.Base_methods\n funcs = getattr(cls, '%s_funcs' % api_cls, []) \\\n + cls.Base_funcs\n\n # wrap validators around readable class attributes\n for attr_name in ro_attrs + rw_attrs:\n doit('%s.get_%s' % (api_cls, attr_name), True,\n async_support=False)\n\n # wrap validators around writable class attrributes\n for attr_name in rw_attrs:\n doit('%s.set_%s' % (api_cls, attr_name), True,\n async_support=False)\n setter_event_wrapper(api_cls, attr_name)\n\n # wrap validators around methods\n for method_name, return_type in methods:\n doit('%s.%s' % (api_cls, method_name), True,\n async_support=True)\n\n # wrap validators around class functions\n for func_name, return_type in funcs:\n \n doit('%s.%s' % (api_cls, func_name), False,\n async_support=True,\n return_type=return_type)\n \n ctor_event_wrapper(api_cls)\n dtor_event_wrapper(api_cls)", "def _replicate_class(self, **kwargs):\n return Posterior(**kwargs)", "def override_str_factory(obj):\n\n def new_str_method(self):\n return \": \".join([str(self.description), str(self.value)])\n\n # This used to use type create a new class, along these lines:\n # https://stackoverflow.com/questions/5918003/python-override-str-in-an-exception-instance\n #\n # That no longer seems to work, because using the class of the widget from type give a traitlets\n # object not an ipywidgets object, and the value is no longer related to the UI setting.\n #\n # This new way works, but changes the __str__ for every widget type it touches.\n # This whole thing really needs a re-design.\n\n original_class = type(obj)\n original_class.__str__ = new_str_method\n return obj" ]
[ "0.6116893", "0.6029395", "0.5879258", "0.5812591", "0.5711086", "0.55567217", "0.55458784", "0.5513836", "0.5463761", "0.5428988", "0.5375427", "0.5358711", "0.5357488", "0.5266866", "0.5182065", "0.5126839", "0.51204216", "0.5094683", "0.5066752", "0.5049762", "0.5035309", "0.50038934", "0.49935597", "0.4990867", "0.49795017", "0.49777964", "0.49760428", "0.49717852", "0.49625415", "0.49424505", "0.49377286", "0.4927788", "0.49247456", "0.49179888", "0.48988461", "0.4897987", "0.48815972", "0.4873019", "0.48723048", "0.4854591", "0.48523173", "0.48449546", "0.48366618", "0.48349828", "0.482552", "0.48219678", "0.48096249", "0.4801188", "0.4788461", "0.47863382", "0.47815964", "0.47784504", "0.47776046", "0.47705942", "0.47683015", "0.47644684", "0.47531024", "0.47486988", "0.4747808", "0.47436276", "0.47249073", "0.4717994", "0.47140083", "0.47107768", "0.4701211", "0.46961308", "0.4695739", "0.4693631", "0.46888062", "0.46884018", "0.46824214", "0.4676286", "0.46684235", "0.46623042", "0.46556148", "0.46556148", "0.46556094", "0.46555954", "0.46551055", "0.46550927", "0.46538064", "0.46534228", "0.46520644", "0.46512043", "0.46507937", "0.46488023", "0.46392816", "0.46310502", "0.46276242", "0.46253845", "0.46217778", "0.4619856", "0.461972", "0.46194056", "0.46144584", "0.4613696", "0.46057308", "0.45978263", "0.45965406", "0.4592478" ]
0.5931994
2
Convert 'x' from a simplified form to the full CG form.
def toComplex(simpleObject): if simpleObject[0] == ENCODING_SIMPLE_PYTHON: return simpleObject[1] if simpleObject[0] == ENCODING_INT: return int(simpleObject[1]) if simpleObject[0] == ENCODING_UNICODE: return unicode(simpleObject[1], 'utf-8') if simpleObject[0] == ENCODING_LONG: return long(simpleObject[1]) if simpleObject[0] == ENCODING_TUPLE: return tuple([toComplex(x) for x in simpleObject[1]]) if simpleObject[0] == ENCODING_LIST: return [toComplex(x) for x in simpleObject[1]] if simpleObject[0] == ENCODING_DICT: return dict((toComplex(k), toComplex(v)) for k,v in simpleObject[1]) elif simpleObject[0] == ENCODING_OBJECT: clsModuleAndName = simpleObject[1][0] args = simpleObject[1][1] cls = classFromModuleAndName(clsModuleAndName) kwds = toComplex(args) try: return cls(**kwds) except: raise UserWarning("Failed to construct instance of %s with %s" % (cls, kwds)) raise UserWarning("Badly encoded object")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _to_clips_value(cls, x):\n return x", "def _canon_cg(expr):\n return expr.replace(CG, _canon_cg_core)", "def reconstruct(self, x):\n return self.inverse_transform(self.transform(x))", "def construct(self, x):\n return self.double_conv(x)", "def call(self, x):\n self._check_shape(x.shape)\n\n if self._spatial_mult > 1:\n x = self._spatial_expand(x)\n\n return x", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def cst(x):\n\tfrom rhsinfo import wp\n\tif wp == 'float32': x = np.float32(x)\n\tif wp == 'float64': x = np.float64(x)\n\treturn x", "def convert_coordinates(x):\n \n long = x.replace('.',',')\n rep = re.sub('(,[^,]*),', r'\\1', long)\n rep = rep.replace(',','.')\n return rep", "def conv(x):\n return x#-2*(16.41*x + 65.04-95.12) ", "def to_model_space(self, x):\n\n # from (-inf, +inf) to (-1, +1)\n x = np.tanh(x)\n\n grad = 1 - np.square(x)\n\n # map from (-1, +1) to (min_, max_)\n a = (self.clip_min + self.clip_max) / 2\n b = (self.clip_max - self.clip_min) / 2\n x = x * b + a\n\n grad = grad * b\n return x, grad", "def transform(self, x):\n x = x * 0.5 + 0.5\n x_grey = x[:, [2], :, :] * 299 / 1000 + x[:, [1], :, :] * \\\n 587 / 1000 + x[:, [0], :, :] * 114 / 1000\n x_grey = (x_grey - 0.5) / 0.5\n return x_grey", "def transform(self, x):", "def transform(self, x):\n raise NotImplementedError()", "def g(self, x):\n return x * (1 - x)", "def construct(self, x):\n x = self.conv(x)\n x = self.DtS(x)\n return x", "def x_to_u(self, x):\n return stats.norm.ppf(self.CDF(x))", "def forward(self, x):\n x = self.conv(x)\n return x", "def call(self, x):\n self._check_shape(x.shape)\n\n if self._temporal_mult > 1:\n x = self._temporal_expand(x)\n\n if self._spatial_mult > 1:\n x = self._spatial_expand(x)\n\n return x", "def cxcy_to_gcxgcy(cxcy, priors_cxcy):\n\n # The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical\n # They are for some sort of numerical conditioning, for 'scaling the localization gradient'\n # See https://github.com/weiliu89/caffe/issues/155\n return torch.cat([(cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10), # g_c_x, g_c_y\n torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5], 1) # g_w, g_h", "def construct(self, x):\n x = self.conv(x)\n l0 = self.down_conv1(x)\n h0 = self.down_conv2(l0)\n l1 = self.down_conv3(h0 - x)\n return l1 + l0", "def transform(self, X):\n X *= self.scale_\n X += self.min_\n return X", "def u_to_x(self, u):\n # Use approximate normal cdf for speed.. \n return self.CDF_inv(normal_cdf_approx(u))", "def transform(self, X: numpy.ndarray) -> numpy.ndarray:\n\n intermediate_repr = self.intermediate_transform(X)\n return self.gate_.transform(intermediate_repr)", "def physicalToTransformed(self, x):\n return self.logit( (x-self.lower) / (self.upper - self.lower))", "def x(self) -> ir.FloatingValue:\n return ops.GeoX(self).to_expr()", "def encode(self, x):\n\n try:\n xp = (np.atleast_2d(x) - self.xmin[None, :]) / self.xspan[None, :]\n return xp.T\n except:\n xp = (np.atleast_2d(x) - self.xmin[:, None]) / self.xspan[:, None]\n return xp", "def normalize_m11(x):\n return x / 127.5 - 1", "def denormalize(self, x):\n raise NotImplementedError", "def flatten(self, x):\n return np.concatenate([c.flatten(xi) for c, xi in zip(self.spaces, x)])", "def euroadolaramerica(x):\r\n\tconversion = x * 1.35750\r\n\treturn conversion", "def gate2zx(box):\n if isinstance(box, (Bra, Ket)):\n dom, cod = (1, 0) if isinstance(box, Bra) else (0, 1)\n return Id(0).tensor(*[\n X(dom, cod, phase=.5 * bit) for bit in box.bitstring])\n if isinstance(box, (Rz, Rx)):\n return (Z if isinstance(box, Rz) else X)(1, 1, box.phase)\n if isinstance(box, CRz):\n return Z(1, 2) @ Z(1, 2, box.phase)\\\n >> Id(1) @ (X(2, 1) >> Z(1, 0, -box.phase)) @ Id(1)\n if isinstance(box, CRx):\n return X(1, 2) @ X(1, 2, box.phase)\\\n >> Id(1) @ (Z(2, 1) >> X(1, 0, -box.phase)) @ Id(1)\n if isinstance(box, quantum.CU1):\n return Z(1, 2, box.phase) @ Z(1, 2, box.phase)\\\n >> Id(1) @ (X(2, 1) >> Z(1, 0, -box.phase)) @ Id(1)\n standard_gates = {\n quantum.H: H,\n quantum.Z: Z(1, 1, .5),\n quantum.X: X(1, 1, .5),\n quantum.Y: Y(1, 1, .5),\n CZ: Z(1, 2) @ Id(1) >> Id(1) @ Had() @ Id(1) >> Id(1) @ Z(2, 1),\n CX: Z(1, 2) @ Id(1) >> Id(1) @ X(2, 1)}\n return standard_gates[box]", "def t2c(x):\n dx = to_dlpack(x)\n return cp.fromDlpack(dx)", "def convs(self, x):\n\n for layer, drop in zip(self.convolutionals, self.cnn_drop):\n x = F.max_pool2d(F.relu(drop(layer(x))), (1, 2))\n\n if self._to_linear is None:\n print(x.shape)\n self._to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]\n\n return x", "def _fix_input(self, x_in):\n h, w = K.int_shape(x_in)[1:3]\n dh = self.resolution - h\n dw = self.resolution - w\n dhh = dh // 2\n dhw = dw // 2\n\n if dhh >= 0 and dhw > 0:\n x = ZeroPadding2D(padding=((dhh, dh - dhh), (dhw, dw - dhw)))(x_in)\n if dhh < 0 and dhw < 0:\n x = Cropping2D(cropping=((-dhh, dhh - dh), (-dhw, dhw - dw)))(x_in)\n else:\n x = x_in\n return x", "def xyz_to_acescg(xyz: Vector) -> Vector:\n\n return alg.dot(XYZ_TO_AP1, xyz, dims=alg.D2_D1)", "def f_shp(i):\n return i.replace('(', '').replace(')', '').replace(', ', 'x').replace(',', '')", "def reconstruct(self, X: numpy.ndarray) -> numpy.ndarray:\n\n reduced_repr = self.transform(X)\n return self.inverse_transform(reduced_repr)", "def transform(self, x: Array2D) -> Array2D:", "def g1_unscaled(self, nx, ny, x_des):\n\n [c_d, a1, output] = [self.component_dependency['g_1'], self.dependency_matrix, []]\n for i in range(nx):\n [sum_i, row] = [[], a1[4 * ny + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # :x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.str_int.g1_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output", "def rescale(x):\n if x.min() != 0:\n raise ValueError('input should have min zero.')\n\n x /= x.max() # max 1\n x *= 2 # max 2\n x -= 1 # range -1, 1\n\n if x.min() != -1 and x.max() != 1:\n raise Exception\n\n return x", "def transform(self, x):\n return self._transform_eig(x)", "def scale(self, x):\n r = max(0, min(1, self.r * x))\n g = max(0, min(1, self.g * x))\n b = max(0, min(1, self.b * x))\n return Rgb(r,g,b)", "def construct(self, x):\n x = self.conv(x)\n h0 = self.up_conv1(x)\n l0 = self.up_conv2(h0)\n h1 = self.up_conv3(l0 - x)\n return h1 + h0", "def transform(self, x):\n return self._test_transform(x)", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n X *= self.scale_\n X += self.min_\n return X", "def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def kelvin_to_degc(x):\r\n return x - 273.15", "def g(x):\n return 5. - x[:, 1] - .5 * x[:, 0] ** 2.", "def _from_clips_value(cls, x):\n return cls.DEFAULT if x is None else cls.PYTHON_TYPE(x)", "def g(x):\n if x[0]**2 + x[1]**2 + x[2]**2 <= 1:\n gx = 1\n else:\n gx = 0\n\n return gx", "def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x", "def _rewrite_cg(expr):\n j1, m1, j2, m2, j3, m3 = symbols(\n 'j1 m1 j2 m2 j3 m3', cls=Wild\n )\n return expr.replace(\n CG(j1, m1, j2, m2, j3, m3),\n _NEG_UNITY ** (-j1 + j2 - m3) * sqrt(2 * j3 + 1) * Wigner3j(\n j1, m1, j2, m2, j3, -m3\n )\n )", "def g(self, X):\n\n return (X[0])**2 - 2*X[0] + X[1]**3 - 2", "def gcn(x, scale=55., bias=0.01):\n return scale * x / np.sqrt(bias + np.mean(x ** 2, axis=1))[:,None]", "def g(self, X):\n if isinstance(X, int) or isinstance(X, float):\n if X < 1:\n x = max(0.001, X)\n a = np.log(x/2.) + 1/np.sqrt(1-x**2)*np.arccosh(1./x)\n elif X == 1:\n a = 1 + np.log(1./2.)\n elif X > 1:\n a = np.log(X/2) + 1/np.sqrt(X**2-1)*np.arccos(1./X)\n\n else:\n a=np.empty_like(X)\n X[X==0] = 0.001\n x = X[X<1]\n\n a[X<1] = np.log(x/2.) + 1/np.sqrt(1-x**2)*np.arccosh(1./x)\n\n a[X==1] = 1 + np.log(1./2.)\n\n x = X[X>1]\n a[X>1] = np.log(x/2) + 1/np.sqrt(x**2-1)*np.arccos(1./x)\n\n return a", "def transformCoordinates(self,x,incoordsys=None,outcoordsys=None):\n if incoordsys is None:\n incoordsys = self.incoordsys\n if outcoordsys is None:\n outcoordsys = self._fcoordsys\n if incoordsys == outcoordsys:\n return x\n else:\n return self._inputtransforms[incoordsys][outcoordsys](*x)", "def gcxgcy_to_cxcy(gcxgcy, priors_cxcy):\n\n return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy[:, :2], # c_x, c_y\n torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) # w, h", "def unflatten(self, x):\n dims = [c.flat_dim for c in self.spaces]\n flat_x = np.split(x, np.cumsum(dims)[:-1])\n return tuple(c.unflatten(xi) for c, xi in zip(self.spaces, flat_x))", "def _shortcut(self, x):\n if self.learnable_sc:\n x = self._upsample_conv(\n x, self.c_sc) if self.upsample else self.c_sc(x)\n return x\n else:\n return x", "def transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n \n if self.with_centering:\n X -= self.center_\n if self.with_scaling:\n X /= self.scale_\n \n return X", "def x_nondim(self, x):\n x[0:4] /= self.r_scale\n return x", "def xtransformed(geo, transformation):\n T = xform_from_transformation(transformation)\n geo_copy = geo.Duplicate()\n geo_copy.Transform(T)\n return geo_copy", "def retraction(self, v, x, c):\n c = self.truncate_c(c)\n new_v = self.expmap(v, x, c)\n return self.proj(new_v, c)", "def transform(self, x:generic_array, dense=True) -> generic_array:\n if type(x) == np.ndarray and not dense:\n warnings.warn(\"For Numpy transform it is best to use dense=True\")\n \n K_nq = self._pairwise_kernels(x, self.components_, dense=dense)\n x_new = K_nq @ self.normalization\n return x_new", "def transform(x_data):\n return flatten(x_data)", "def true_g(x):\n obj1 = x[0]**2\n return (obj1,)", "def transform(self, x, **kwargs):\n return super().transform(x, **kwargs)", "def state_as_x(cls,\n state: float) -> np.array:\n xs = np.array([state])\n return xs.reshape([1, xs.shape[0]])", "def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n padx = int(self.conv2d_guass.shape[2]/2)\r\n pady = int(self.conv2d_guass.shape[3]/2)\r\n \r\n ixx = x[:,0,:,:]\r\n ixx = ixx.unsqueeze(dim=1)\r\n sxx = nn.functional.conv2d(ixx, self.conv2d_guass, padding=(padx, pady))\r\n\r\n iyy = x[:,1,:,:]\r\n iyy = iyy.unsqueeze(dim=1)\r\n syy = nn.functional.conv2d(iyy, self.conv2d_guass, padding=(padx, pady))\r\n\r\n ixy = x[:,2,:,:]\r\n ixy = ixy.unsqueeze(dim=1)\r\n sxy = nn.functional.conv2d(ixy, self.conv2d_guass, padding=(padx, pady))\r\n \r\n output = torch.cat((sxx, syy, sxy), dim=1)\r\n return output", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def normalize(x):\n # TODO: Implement Function\n \n return x/255", "def acescg_to_xyz(acescg: Vector) -> Vector:\n\n return alg.dot(AP1_TO_XYZ, acescg, dims=alg.D2_D1)", "def shape2str(x):\n return \"[\" + \" x \".join([str(x) for x in x.shape]) + \"]\"", "def transform(self, X):\n return self.transformer.transform(X)", "def x(self, x: ComType):\n if isinstance(x, complex):\n self._ohms = x\n else:\n self._ohms = complex(0, x)", "def standardize(self, x):\n if not self.image_resample:\n x = to_shape(x, self.image_shape, constant_values=-1024)\n elif self.image_resample:\n x = resample(x, self.image_shape)\n\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.voxelwise_normalization:\n if self.voxel_bounds is not None:\n x = voxelwise_normalize(x, self.voxel_bounds)\n if self.voxelwise_center:\n if self.voxel_mean is not None:\n x -= self.voxel_mean\n if self.voxelwise_std_normalization:\n x /= (self.voxelwise_std + 1e-7)\n if self.samplewise_center:\n x -= np.mean(x, axis=self.channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=self.channel_axis, keepdims=True) + 1e-7)\n return x", "def fusion(self, x):\n new_x = x.copy()\n for i in range(self.z.size):\n new_x[self.mask[i]] = self.z[i]\n return new_x", "def ctransform(x):\n xr = np.argsort(np.argsort(x)).astype(float)\n xr += 1.\n xr /= float(xr.shape[-1] + 1)\n return xr", "def ctransform(x):\n xr = np.argsort(np.argsort(x)).astype(float)\n xr += 1.\n xr /= float(xr.shape[-1] + 1)\n return xr", "def coordConv(self, svgx, svgy, relative=False):\n if relative:\n svgx = float(svgx) + self.curPoint[0]\n svgy = float(svgy) + self.curPoint[1]\n else:\n svgx = float(svgx)\n svgy = float(svgy)\n matrix = self.matrices[-1]\n epsx = matrix[0] * svgx + matrix[2] * svgy + matrix[4]\n epsy = matrix[1] * svgx + matrix[3] * svgy + matrix[5]\n\n return (epsx, epsy)", "def Expand3D(x):\r\n\r\n if x < 0:\r\n raise Exception(\r\n \"\"\"ERROR: Morton code is valid only for positive numbers\"\"\")\r\n x &= 0x7fffffffL\r\n x = (x ^ x << 32) & 0x7fff00000000ffffL\r\n x = (x ^ x << 16) & 0x7f0000ff0000ff0000ffL\r\n x = (x ^ x << 8) & 0x700f00f00f00f00f00f00fL\r\n x = (x ^ x << 4) & 0x430c30c30c30c30c30c30c3L\r\n x = (x ^ x << 2) & 0x49249249249249249249249L\r\n return x", "def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))", "def normalize(x, min_x, max_x):\n\treturn (x - min_x) / (max_x - min_x)", "def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x", "def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x", "def reconstruct_intermediate(self, X: numpy.ndarray) -> numpy.ndarray:\n\n reduced_repr_intr = self.intermediate_transform(X)\n return self.intermediate_inverse_transform(reduced_repr_intr)", "def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))", "def reconstructX(self, inputs):\n if self.act_dec is None:\n act_dec = lambda x: x\n else:\n act_dec = self.act_dec\n return act_dec(self.decodeX(inputs))", "def conjugacy_class(self, x):\n # Ref: \"Computing the conjugacy classes of finite groups\"; Butler, G.\n # Groups '93 Galway/St Andrews; edited by Campbell, C. M.\n new_class = {x}\n last_iteration = new_class\n\n while len(last_iteration) > 0:\n this_iteration = set()\n\n for y in last_iteration:\n for s in self.generators:\n conjugated = s * y * (~s)\n if conjugated not in new_class:\n this_iteration.add(conjugated)\n\n new_class.update(last_iteration)\n last_iteration = this_iteration\n\n return new_class", "def conjugate(x):\n if len(list(x.size())) == 2:\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = x[0]\n z[1] = -x[1]\n\n if len(list(x.size())) == 3:\n z = torch.zeros(\n 2, x.size()[2], x.size()[1], dtype=torch.double, device=x.device\n )\n z[0] = torch.transpose(x[0], 0, 1)\n z[1] = -torch.transpose(x[1], 0, 1)\n\n return z", "def g(x):\n y = numpy.zeros(2)\n y[0] = 2 * (x[0] - 1)\n y[1] = 2 * (x[1] + 3)\n return y", "def gx(Xn):\n gofx = np.sqrt(2 * np.pi) / (1 + Xn**4)\n return gofx", "def cx(x):\n return cw(x - global_min_x)", "def __call__(self, x):\n\n np.subtract(x, self.d, out=x)\n np.divide(self.a, x, out=x)\n np.subtract(x, self.b, out=x)\n np.log(x, out=x)\n np.divide(x, -self.e, out=x)\n np.add(x, self.c, out=x)\n\n return x", "def r(self, x, xType='CC', outType='CC', format='V'):\n\n allowed_xType = [\n 'CC', 'N', 'F', 'Fx', 'Fy', 'Fz', 'E', 'Ex', 'Ey', 'Ez'\n ]\n assert (\n type(x) == list or isinstance(x, np.ndarray)\n ), \"x must be either a list or a ndarray\"\n assert xType in allowed_xType, (\n \"xType must be either \"\n \"'CC', 'N', 'F', 'Fx', 'Fy', 'Fz', 'E', 'Ex', 'Ey', or 'Ez'\"\n )\n assert outType in allowed_xType, (\n \"outType must be either \"\n \"'CC', 'N', 'F', Fx', 'Fy', 'Fz', 'E', 'Ex', 'Ey', or 'Ez'\"\n )\n assert format in ['M', 'V'], \"format must be either 'M' or 'V'\"\n assert outType[:len(xType)] == xType, (\n \"You cannot change types when reshaping.\"\n )\n assert xType in outType, \"You cannot change type of components.\"\n\n if type(x) == list:\n for i, xi in enumerate(x):\n assert isinstance(x, np.ndarray), (\n \"x[{0:d}] must be a numpy array\".format(i)\n )\n assert xi.size == x[0].size, (\n \"Number of elements in list must not change.\"\n )\n\n x_array = np.ones((x.size, len(x)))\n # Unwrap it and put it in a np array\n for i, xi in enumerate(x):\n x_array[:, i] = mkvc(xi)\n x = x_array\n\n assert isinstance(x, np.ndarray), \"x must be a numpy array\"\n\n x = x[:] # make a copy.\n xTypeIsFExyz = (\n len(xType) > 1 and\n xType[0] in ['F', 'E'] and\n xType[1] in ['x', 'y', 'z']\n )\n\n def outKernal(xx, nn):\n \"\"\"Returns xx as either a matrix (shape == nn) or a vector.\"\"\"\n if format == 'M':\n return xx.reshape(nn, order='F')\n elif format == 'V':\n return mkvc(xx)\n\n def switchKernal(xx):\n \"\"\"Switches over the different options.\"\"\"\n if xType in ['CC', 'N']:\n nn = (self._n) if xType == 'CC' else (self._n+1)\n assert xx.size == np.prod(nn), (\n \"Number of elements must not change.\"\n )\n return outKernal(xx, nn)\n elif xType in ['F', 'E']:\n # This will only deal with components of fields,\n # not full 'F' or 'E'\n xx = mkvc(xx) # unwrap it in case it is a matrix\n nn = self.vnF if xType == 'F' else self.vnE\n nn = np.r_[0, nn]\n\n nx = [0, 0, 0]\n nx[0] = self.vnFx if xType == 'F' else self.vnEx\n nx[1] = self.vnFy if xType == 'F' else self.vnEy\n nx[2] = self.vnFz if xType == 'F' else self.vnEz\n\n for dim, dimName in enumerate(['x', 'y', 'z']):\n if dimName in outType:\n assert self.dim > dim, (\n \"Dimensions of mesh not great enough for \"\n \"{}{}\".format(xType, dimName)\n )\n assert xx.size == np.sum(nn), (\n \"Vector is not the right size.\"\n )\n start = np.sum(nn[:dim+1])\n end = np.sum(nn[:dim+2])\n return outKernal(xx[start:end], nx[dim])\n\n elif xTypeIsFExyz:\n # This will deal with partial components (x, y or z)\n # lying on edges or faces\n if 'x' in xType:\n nn = self.vnFx if 'F' in xType else self.vnEx\n elif 'y' in xType:\n nn = self.vnFy if 'F' in xType else self.vnEy\n elif 'z' in xType:\n nn = self.vnFz if 'F' in xType else self.vnEz\n assert xx.size == np.prod(nn), 'Vector is not the right size.'\n return outKernal(xx, nn)\n\n # Check if we are dealing with a vector quantity\n isVectorQuantity = len(x.shape) == 2 and x.shape[1] == self.dim\n\n if outType in ['F', 'E']:\n assert ~isVectorQuantity, (\n 'Not sure what to do with a vector vector quantity..'\n )\n outTypeCopy = outType\n out = ()\n for ii, dirName in enumerate(['x', 'y', 'z'][:self.dim]):\n outType = outTypeCopy + dirName\n out += (switchKernal(x),)\n return out\n elif isVectorQuantity:\n out = ()\n for ii in range(x.shape[1]):\n out += (switchKernal(x[:, ii]),)\n return out\n else:\n return switchKernal(x)", "def simplify(\n self,\n tolerance: ir.FloatingValue,\n preserve_collapsed: ir.BooleanValue,\n ) -> GeoSpatialValue:\n return ops.GeoSimplify(self, tolerance, preserve_collapsed).to_expr()", "def define_cx_unscaled(self, _cx_scaled: list[MX | SX, ...], scaling: np.ndarray) -> list[MX | SX, ...]:\n _cx = [self.nlp.cx() for _ in range(len(_cx_scaled))]\n for node_index in range(len(_cx_scaled)):\n _cx[node_index] = [self.nlp.cx() for _ in range(len(_cx_scaled[0]))]\n\n for node_index in range(len(_cx_scaled)):\n for j in range(len(_cx_scaled[0])):\n _cx[node_index][j] = _cx_scaled[node_index][j] * scaling\n return _cx", "def _call_(self, x):\n try:\n if parent(x) is not self.domain():\n x = self.domain()(x)\n except TypeError:\n raise TypeError(\"%s must be coercible into %s\"%(x,self.domain()))\n if self.domain().is_ambient():\n x = x.element()\n else:\n x = self.domain().coordinate_vector(x)\n C = self.codomain()\n v = x.change_ring(C.base_ring()) * self.matrix()\n if not C.is_ambient():\n v = C.linear_combination_of_basis(v)\n # The call method of parents uses (coercion) morphisms.\n # Hence, in order to avoid recursion, we call the element\n # constructor directly; after all, we already know the\n # coordinates.\n return C._element_constructor_(v)", "def _g(self, x):\n e_x = math.exp(-self._alpha * x)\n return (1.0 / (1 + e_x))", "def transform(self, X):\n return super().transform(X)" ]
[ "0.62322056", "0.5949095", "0.5794452", "0.57718503", "0.56907374", "0.5687841", "0.56126106", "0.5525381", "0.55237037", "0.55102", "0.5448745", "0.5415053", "0.5387446", "0.53741324", "0.5301165", "0.517558", "0.5163078", "0.515195", "0.5121617", "0.51215816", "0.5120705", "0.51117307", "0.5102274", "0.51018655", "0.50947803", "0.5094247", "0.50869775", "0.5084358", "0.5067616", "0.5065327", "0.5031798", "0.5023161", "0.5013446", "0.5011118", "0.5005523", "0.49905956", "0.49777123", "0.49771038", "0.496461", "0.49625522", "0.4956291", "0.49492618", "0.4942015", "0.49407193", "0.49328572", "0.49273834", "0.4921362", "0.4919188", "0.4914698", "0.4910754", "0.49099085", "0.49077833", "0.49037617", "0.49022797", "0.49022734", "0.48913622", "0.48890227", "0.48785496", "0.4875529", "0.48716035", "0.4871448", "0.48691592", "0.48659426", "0.486553", "0.48640332", "0.48639432", "0.48575932", "0.48508132", "0.48377493", "0.48352602", "0.483469", "0.48325887", "0.48250738", "0.48242798", "0.4822386", "0.48157313", "0.48150125", "0.48124045", "0.48069364", "0.48069364", "0.48063153", "0.48025757", "0.47994784", "0.4796025", "0.4795091", "0.4795091", "0.47944787", "0.47928724", "0.47903478", "0.4789798", "0.47889993", "0.4788416", "0.47850248", "0.47689906", "0.47657627", "0.47648534", "0.47624716", "0.47603706", "0.47545558", "0.47480077", "0.4744282" ]
0.0
-1
`sam list stackoutputs` command entry point
def cli(self, stack_name, output, config_file, config_env): do_cli(stack_name=stack_name, output=output, region=self.region, profile=self.profile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_cli(stack_name, output, region, profile):\n from samcli.commands.list.stack_outputs.stack_outputs_context import StackOutputsContext\n\n with StackOutputsContext(\n stack_name=stack_name, output=output, region=region, profile=profile\n ) as stack_output_context:\n stack_output_context.run()", "def _get_stack_outputs(stack_name: str, region: str, profile: str = None) -> list:\n logger.debug(f\"Getting stack {stack_name} outputs in region {region}\")\n cfn_client = _get_cfn_client(region=region, profile=profile)\n try:\n result = cfn_client.describe_stacks(StackName=stack_name)\n except ClientError as e:\n if 'does not exist' in e.__str__():\n logger.warning(f\"Stack f{stack_name} has no status. Is it deployed?\")\n return []\n else:\n raise e\n if 'Outputs' not in result['Stacks'][0]:\n logger.debug(f\"stack {stack_name} has no outputs\")\n return list()\n else:\n logger.debug(f\"stack {stack_name} has outputs: {result['Stacks'][0]['Outputs']}\")\n return result['Stacks'][0]['Outputs']", "def get_stack_output(name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStackResult]:\n ...", "def describe_stacks(Names=None, NextToken=None):\n pass", "def describe_stacks(StackIds=None):\n pass", "def outputs() -> List[str]:\n return Invocation.current.expanded_outputs", "def list_associated_stacks(FleetName=None, NextToken=None):\n pass", "def stack_output(self, cfn_output: aws_cdk.core.CfnOutput) -> \"StackOutput\":\n return jsii.invoke(self, \"stackOutput\", [cfn_output])", "def list_stacks(stack_name: str = None, region: str = None, profile: str = None, **kwargs) -> bool:\n cfn_client = _get_cfn_client(region=region, profile=profile)\n if stack_name:\n logger.debug(f\"Logging stack {stack_name} in region {region}\")\n if not _stack_is_complete(stack_name=stack_name, region=region, profile=profile):\n logger.error(f\"STACK: {stack_name} \"\n f\"in status: {_get_stack_status(stack_name=stack_name, region=region, profile=profile)}. Exiting\")\n return False\n try:\n stacks = cfn_client.describe_stacks(StackName=stack_name)\n except Exception as e:\n logger.error(f\"unable to retrieve stack list\")\n logger.error(e)\n return False\n else:\n logger.debug(f\"Logging stacks in region {region}\")\n try:\n stacks = cfn_client.describe_stacks()\n except Exception as e:\n logger.error(f\"unable to retrieve stack list\")\n logger.error(e)\n return False\n logger.info(f\"{'stack_name':{20}} {'stack_status':{20}} {'drift_status':{20}} {'stack_description'}\")\n for stack in stacks['Stacks']:\n stack_name = stack['StackName']\n stack_status = stack['StackStatus']\n drift_status = stack['DriftInformation']['StackDriftStatus']\n if 'Description' in stack:\n stack_description = stack['Description']\n else:\n stack_description = ''\n logger.info(f\"{stack_name:{20}} {stack_status:{20}} {drift_status:{20}} {stack_description}\")\n # Support more than 100 stacks.. TODO: make this less lame\n next_token = stacks['NextToken'] if 'NextToken' in stacks else None\n while next_token:\n stacks = cfn_client.describe_stacks(NextToken=next_token)\n for stack in stacks['Stacks']:\n stack_name = stack['StackName']\n stack_status = stack['StackStatus']\n drift_status = stack['DriftInformation']['StackDriftStatus']\n if 'Description' in stack:\n stack_description = stack['Description']\n else:\n stack_description = ''\n logger.info(f\"{stack_name:{20}} {stack_status:{20}} {drift_status:{20}} {stack_description}\")\n next_token = stacks['NextToken'] if 'NextToken' in stacks else None\n\n return True", "def _list_outputs(self):\n \n outputs = self._outputs().get()\n return outputs", "def get_stacks_log(self):\n return self._get_log('stacks')", "def _get_stack_resources(stack_name: str, region: str, profile: str = None) -> list:\n logger.debug(f\"Getting stack {stack_name} resources in region {region}\")\n cfn_client = _get_cfn_client(region=region, profile=profile)\n try:\n result = cfn_client.describe_stack_resources(StackName=stack_name)\n except Exception as e:\n if 'does not exist' in e.__str__():\n logger.warning(f\"Stack f{stack_name} does not exits. Is it deployed?\")\n return []\n else:\n raise e\n logger.debug(f\"stack {stack_name} resource:{result['StackResources']}\")\n return result['StackResources']", "def _credstash_getall(self, team, exec_env):\n s = check_output([\"credstash\", \"-t\", \"credstash-%s\" % team,\n \"getall\"], env=exec_env)\n return str(s)", "def get_stack_info():\n\n response = cloudformation.describe_stacks(\n StackName=config.CLOUDFORMATION_STACK_NAME\n )\n return response['Stacks'][0]", "def outputs(self):\n\n outputs = []\n for arg in self.arguments:\n if arg.OUT:\n outputs.append(arg)\n\n return outputs", "def list_scenario_outputs(self, scenario_name: str):\n return sorted(\n [x[\"name\"] for x in self._store.read_scenario(scenario_name)[\"provides\"]]\n )", "def get_stacks_logs_bundle(self):\n return self._get_log('stacksBundle')", "def get_stack_values(self):\r\n response = None\r\n if is_connected():\r\n try:\r\n # Get stack information\r\n S.SESSION.send(dbgp.STACK_GET)\r\n response = S.SESSION.read()\r\n except ProtocolConnectionException:\r\n e = sys.exc_info()[1]\r\n self.timeout(lambda: connection_error(\"%s\" % e))\r\n return generate_stack_output(response)", "def list_command(rsf_file, output_format):\n\n try:\n if output_format:\n stream = StringIO()\n list_blobs(rsf_file, output_format, stream)\n\n click.echo(stream.read())\n\n else:\n result = list_blobs(rsf_file)\n\n for blob in result:\n click.echo(repr(blob))\n\n except RegistersException as err:\n utils.error(str(err))", "def get_stack_numbers():\n\n print(\"-> start stack count\")\n tstart = time.time()\n proc = sbp.run([\"curl\",\n \"--silent\",\n \"--request\", \"POST\",\n \"--location\",\n \"--data\", \"REQUEST=doQuery\",\n \"--data\", \"PHASE=RUN\",\n \"--data\", \"FORMAT=text\",\n \"--data\", \"LANG=ADQL\",\n \"--data\", \"QUERY=SELECT distinct a.name, a.detect_stack_id from csc21_snapshot.master_stack_assoc a\",\n \"https://cda.cfa.harvard.edu/csc21_snapshot_tap/sync\"],\n check=True, stdout=sbp.PIPE)\n\n tend = time.time()\n print(f\"<- took {tend - tstart:.1f} seconds\")\n\n stacks = defaultdict(int)\n header = True\n for l in proc.stdout.decode().split(\"\\n\"):\n if header:\n if l.startswith(\"#\"):\n continue\n\n if l != \"name\\tdetect_stack_id\":\n raise ValueError(l)\n\n header = False\n continue\n\n if l == \"\":\n continue\n\n toks = l.split(\"\\t\")\n assert len(toks) == 2, l\n stacks[toks[1]] += 1\n\n # remove default nature (so we know what stacks are not known)\n #\n out = {}\n for stack, count in stacks.items():\n out[stack] = count\n\n return out", "def execute(self):\n findings = []\n for stack in cloudformation_client.stacks:\n if not stack.is_nested_stack:\n report = Check_Report_AWS(self.metadata())\n report.region = stack.region\n report.resource_id = stack.name\n report.resource_arn = stack.arn\n report.resource_tags = stack.tags\n\n if stack.enable_termination_protection:\n report.status = \"PASS\"\n report.status_extended = f\"CloudFormation {stack.name} has termination protection enabled\"\n else:\n report.status = \"FAIL\"\n report.status_extended = f\"CloudFormation {stack.name} has termination protection disabled\"\n findings.append(report)\n\n return findings", "def output_names(self):\n return []", "def get_stacks() -> dict:\n return filter_stacks(\n stacks=client.describe_stacks()\n )", "def read_all_stack(self):\n return self.STACK", "def list(args, config):\n\n api = config['API']\n headers = {}\n if args.stack_name:\n headers = {'stack-name': args.stack_name} # put stack name in headers\n r = requests.get(api['list'], headers=headers) # send the GET request\n print('\\nThe following clusters exist:\\n{}\\n'.format(r.json()))\n return", "def describe_stack(cfn, stack_name):\n try:\n stacks = cfn.describe_stacks(StackName=stack_name)[\"Stacks\"]\n return stacks[0]\n except ClientError as e:\n if \"does not exist\" not in e.response[\"Error\"][\"Message\"]:\n raise e\n return None", "def access_stacks_report_list(context, endpoint, parameter='', history=''):\n url = urljoin(context.gemini_api_url, '{ep}/{param}'.format(ep=endpoint, param=parameter))\n context.response = requests.get(url)\n context.history = True if history == 'history' else False", "def get_unnamed_outputs(self):\n return []", "def list_outputs(self, sector_model_name: str):\n return sorted(\n [x[\"name\"] for x in self._store.read_model(sector_model_name)[\"outputs\"]]\n )", "def _list_outputs(self):\n outputs = self._outputs().get()\n\n out_dir = os.path.abspath(os.path.join(os.getcwd(), \"slicesdir\"))\n outputs[\"out_dir\"] = out_dir\n outputs[\"out_files\"] = [\n self._gen_fname(\n basename=f.replace(os.sep, \"_\"),\n cwd=out_dir,\n ext=self.inputs.out_extension,\n )\n for f in self.inputs.in_files\n ]\n return outputs", "def output(self):\n res = check_output([\"terraform\", \"output\", \"-json\"], env=self.exec_env)\n\n return res", "def use_outputs(self) -> typing.Optional[typing.Mapping[str, \"StackOutput\"]]:\n return self._values.get(\"use_outputs\")", "def _go_list(self, *args):\n return subprocess.check_output((\"go\", \"list\") + self.tag_args + args).strip().split(\"\\n\")", "def resources(stack, region, profile):\n logging.debug(f'finding resources - stack: {stack}')\n logging.debug(f'region: {region}')\n logging.debug(f'profile: {profile}')\n tool = ResourceTool(\n Stack=stack,\n Region=region,\n Profile=profile,\n Verbose=True\n )\n\n if tool.list_resources():\n sys.exit(0)\n else:\n sys.exit(1)", "def test_listCommand(self):\n from armi import cli\n\n cli = cli.ArmiCLI()\n\n origout = sys.stdout\n try:\n out = io.StringIO()\n sys.stdout = out\n cli.listCommands()\n finally:\n sys.stdout = origout\n self.assertIn(\"run-suite\", out.getvalue())", "def stdout(self, stdout: str) -> Tuple[List[Message], List[AnnotateCode], str]:\n return [], [], stdout", "def get_outputs(self):\n return self.outputs", "def do_overcloud_list(tuskar, args, outfile=sys.stdout):\n overclouds = tuskar.overclouds.list()\n fields = ['id', 'name', 'description', 'stack_id', 'attributes', 'counts']\n\n formatters = {\n 'attributes': fmt.attributes_formatter,\n 'counts': fmt.counts_formatter,\n }\n\n fmt.print_list(overclouds, fields, formatters, outfile=outfile)", "def start_list(command_line):\n stack_driver = CloudStackUtility(command_line)\n return stack_driver.list()", "def setupStackers(args, verbose=False):\n stackerList = []\n # Add here .. for example\n # stackerList.append(stackers.RandomDitherStacker(maxDither=0.5, randomSeed=42)\n if len(stackerList) == 0:\n stackerList = None\n return stackerList", "def test_listCommand(self):\n acli = ArmiCLI()\n\n origout = sys.stdout\n try:\n out = io.StringIO()\n sys.stdout = out\n acli.listCommands()\n finally:\n sys.stdout = origout\n\n self.assertIn(\"run-suite\", out.getvalue())", "def get_stack_tags(stacks) -> list:\n return list(map(get_stack_tags_helper, stacks))", "def get_report_summary():\n process = subprocess.Popen([\"stack\", \"hpc\", \"report\", \"--all\"], stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n return stderr", "def describe_stack_summary(StackId=None):\n pass", "def get_outputs(self):\n return self.attributes[\"outputs\"]", "def get_output_metadata(self):\n return []", "def outputs(self):\r\n return self._outputs", "def stage_list(args):\n\n for stage in args.stages:\n print stage", "def get_outputs():\n outputs = {}\n for obj in vars(acsploit.output).values():\n if hasattr(obj, 'OUTPUT_NAME'):\n outputs[obj.OUTPUT_NAME] = obj\n\n return outputs", "def collectTargets(self, output):\n pass", "def output(stack_name: str, region: str, profile: str = None, **kwargs) -> bool:\n logger.debug(f\"Logging stack {stack_name} outputs in region {region}\")\n if not _stack_is_complete(stack_name=stack_name, region=region, profile=profile):\n logger.error(f\"STACK: {stack_name} \"\n f\"in status: {_get_stack_status(stack_name=stack_name, region=region, profile=profile)}. Exiting\")\n return False\n logger.info(\"STACK OUTPUTS:\")\n for stack_output in _get_stack_outputs(stack_name=stack_name, region=region, profile=profile):\n logger.info(f\"{stack_output['OutputKey']:{20}} = {stack_output['OutputValue']}\")\n return True", "def list_command(arguments: List[str]) -> None:\n if len(arguments) > 1:\n print('Too many arguments for list command') # noqa: WPS421\n return\n token = token_load.load()\n gist_list = logic.list_gists(token)\n for gist_info in gist_list:\n print('{0} {1}:{2}'.format(*gist_info))", "def list_states(verbose=1):\n statefile = qcodes.config.get('statefile', None)\n if statefile is None:\n statefile = os.path.join(os.path.expanduser('~'), 'qtt_statefile.hdf5')\n if not os.path.exists(statefile):\n return []\n with h5py.File(statefile, 'r') as h5group:\n tags = list(h5group.keys())\n if verbose:\n print('states on system from file %s: ' % (statefile, ), end='')\n print(', '.join([str(x) for x in tags]))\n return tags", "def getListOfOutputs(self, *args):\n return _libsbml.Transition_getListOfOutputs(self, *args)", "def get_outputs(self):\n raise NotImplementedError", "def cmd_list(args):", "def get_outputs(self):\n return [x[1] for x in self.io_mapping]", "def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))", "def get(self, *args):\n return _libsbml.ListOfOutputs_get(self, *args)", "def get_outputs(self):\n outputs = []\n missing = []\n for i, name in enumerate(self.output_names[:]):\n try:\n value = self.proto.output_env.look_up(name).unwrapped\n except Exception:\n if self.optional_flags[i]:\n value = None\n missing.append((i, name))\n else:\n raise\n outputs.append(value)\n for i, name in reversed(missing):\n del outputs[i]\n del self.output_names[i]\n del self.optional_flags[i]\n if missing:\n return outputs, reversed(missing)\n return outputs", "def get_output_names():\n names = [device.name for device in get_devices() if device.is_output]\n return list(sorted(names))", "def outList(self,list=False):\n txt = ''\n txt += 'echo \">>> list of expected files on output sandbox\"\\n'\n listOutFiles = []\n stdout = 'CMSSW_$NJob.stdout'\n stderr = 'CMSSW_$NJob.stderr'\n if len(self.output_file) <= 0:\n msg =\"WARNING: no output files name have been defined!!\\n\"\n msg+=\"\\tno output files will be reported back/staged\\n\"\n common.logger.info(msg)\n\n if (self.return_data == 1):\n for file in (self.output_file):\n listOutFiles.append(numberFile(file, '$OutUniqueID'))\n for file in (self.output_file_sandbox):\n listOutFiles.append(numberFile(file, '$NJob'))\n listOutFiles.append(stdout)\n listOutFiles.append(stderr)\n listOutFiles.append('Watchdog_$NJob.log.gz')\n\n txt += 'echo \"output files: '+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'filesToCheck=\"'+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'export filesToCheck\\n'\n taskinfo={}\n taskinfo['outfileBasename'] = self.output_file\n common._db.updateTask_(taskinfo)\n\n if list : return self.output_file\n return txt", "def pants_list(args):\n os.chdir(git_toplevel())\n _pants_list = capture_stdout(\"./pants %s\" % args)\n\n for target in _pants_list.stdout.text.split(\"\\n\"):\n if \":\" in target:\n bare_target = target.split(\":\", 1)[-1]\n print(\":%s\" % bare_target)", "def outputStateNames(self):\n names = []\n for item in self.mechanisms:\n for output_state in item.outputStates:\n names.append(output_state)\n return names", "def get_outputs(self, flatten=False):\n ret = [x[1] for x in self.io_mapping]\n if flatten: return sum(ret,[])\n else: return ret", "def process(indir):\n\n stacks = stackdata.find_stack_obis(indir)\n targets = stackdata.read_cxc_targetnames()\n\n # Write out (really should be JSON but doint it this way for now).\n #\n print(\"var stack_name_map = {\")\n spacer = \"\"\n for stack in sorted(stacks):\n obsids = set(obi[0] for obi in stacks[stack])\n names = set(f'\"{targets[obsid]}\"' for obsid in obsids)\n print(f'{spacer}\"{stack}\": [{\",\".join(names)}]')\n spacer = \",\"\n\n print(\"};\")", "def log_dir_stacks_contents(dir_stacks):\r\n for directory in dir_stacks:\r\n logging.info('-'*80)\r\n logging.info('Predicted directory contents of:\\n{0}'\r\n .format(directory.path))\r\n files = directory.file_names\r\n files = sorted(files)\r\n logging.info('Number of files: {0}'.format(len(files)))\r\n logging.info('Files:')\r\n logging.info('\\t'.join(files))", "def output_run(run_data, name):\n\n print(json.dumps(run_data, indent=4))\n ret = run_data.get('return', {})\n display_output(\n {name: ret}, \n\tout=run_data.get('out', 'nested'),\n\topts = salt.config.minion_config('/dev/null'))", "def compiler_output(\n self, stdout: str, stderr: str\n ) -> Tuple[List[Message], List[AnnotateCode], str, str]:\n return [], [], stdout, stderr", "def outputs(self):\n return self.outputs", "def describe_instances(StackId=None, LayerId=None, InstanceIds=None):\n pass", "def run(self):\n environment = self.environment_config.environment_path + \"/\" + self.stack_config.name\n stack = Stack(name=environment, environment_config=self.environment_config,\n connection_manager=self.connection_manager)\n\n outputs = stack.describe_outputs()\n if outputs:\n core_artifacts_s3_bucket = self.stack_config['parameters']['CoreBootStrapRepositoryS3BucketName']\n print(core_artifacts_s3_bucket)\n\n client_artifacts_s3_bucket = [output['OutputValue'] for output in outputs if\n output['OutputKey'] == 'EnvironmentArtifactsS3Bucket']\n print(client_artifacts_s3_bucket[0])\n\n bootstrap_artifacts_key = \"bootstrap/\"\n\n s3 = boto3.resource('s3')\n\n source_bucket = s3.Bucket(core_artifacts_s3_bucket)\n destination_bucket = s3.Bucket(client_artifacts_s3_bucket[0])\n print(source_bucket)\n print(destination_bucket)\n\n for s3_object in source_bucket.objects.filter(Prefix=bootstrap_artifacts_key):\n destination_key = s3_object.key\n print(destination_key)\n s3.Object(destination_bucket.name, destination_key).copy_from(CopySource={\n 'Bucket': s3_object.bucket_name,\n 'Key': s3_object.key})", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def get_stack(stack_name, region, cfn_client=None):\n if not cfn_client:\n cfn_client = boto3.client(\"cloudformation\", region_name=region)\n return cfn_client.describe_stacks(StackName=stack_name).get(\"Stacks\")[0]", "def output(self, name: str) -> List[str]:\n if name not in self.settings:\n self.raise_named_exception(\n \"The system \"\n + name\n + \" is not present in the settings of the job \"\n + self.name\n )\n if \"output\" not in self.settings[name]:\n self.raise_named_exception(\n \"The settings for \"\n + name\n + \" in the Job \"\n + self.name\n + \"do not include an output specification\"\n )\n return self.settings[name][\"output\"]", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def cli(yamlfile, **args):\n print(ShExGenerator(yamlfile, **args).serialize(**args))", "def info(ctx, schain_name):\n skale = ctx.obj['skale']\n info = get_schain_info(skale, schain_name)\n print(json.dumps(info, indent=2))", "def get_stack_output_value(stack_outputs, output_key):\n return next((o.get(\"OutputValue\") for o in stack_outputs if o.get(\"OutputKey\") == output_key), None)", "def deploy_stack():\n build = \"sam build --use-container --manifest src/images/requirements.txt\"\n local(build)\n\n #package = f\"sam package --template-file template.yaml --output-template-file \\\n # packaged.yaml --s3-bucket {env.bucket_name} --region {env.aws_region}\"\n #local(package)\n\n deploy = f\"sam deploy --stack-name storge-machine-service \\\n --s3-bucket {env.bucket_name}\\\n --parameter-overrides env=dev --capabilities CAPABILITY_IAM CAPABILITY_AUTO_EXPAND --region {env.aws_region}\"\n #deploy = \"sam deploy\"\n local(deploy)", "def _get_output_objects_info(self):\n if len(self.output_objects) == 0:\n return []\n\n return self.output_objects[0].keys()", "def readShouts(self, start=-1):\n return []", "def main_list(args):\n return list_commands(args.directory)", "def do_list(cs, args):\n data = []\n _, repositories = cs.repositories.list(args.project_id)\n for repo in repositories:\n _, tags = cs.repositories.list_tags(repo)\n for tag in tags:\n _, manifests = cs.repositories.get_manifests(repo, tag)\n manifests['Name'] = repo\n manifests['Tag'] = tag\n manifests['Project'] = args.project_id\n manifests['Id'] = manifests['Id'][0:12]\n data.append(manifests)\n fields = [\n \"Id\", \"Name\", \"Tag\", \"Author\", 'Project', \"Created\", \"Docker Version\",\n \"Architecture\", \"OS\"\n ]\n utils.print_list(data, fields, sortby=args.sortby)", "def get_stack_name_stack_group(stacks) -> list:\n stack_names = []\n for stack in stacks:\n _stack = {\"stack\": stack[\"StackName\"]}\n for tag in stack[\"Tags\"]:\n if tag[\"Key\"] == \"stack-finder\":\n _stack[\"group\"] = tag[\"Value\"]\n stack_names.append(_stack)\n return stack_names", "def bundle_outputs(self):\n pass", "def main():\n dump(inventory(), fp=stdout, indent=4)", "def ShowPipeStats(cmd_args=None):\n print \"Number of pipes: {: d}\".format(kern.globals.amountpipes)\n print \"Memory used by pipes: {:s}\".format(sizeof_fmt(int(kern.globals.amountpipekva)))\n print \"Max memory allowed for pipes: {:s}\".format(sizeof_fmt(int(kern.globals.maxpipekva)))", "def test_subworkflows_info_remote(self):\n mods_info = nf_core.subworkflows.SubworkflowInfo(self.pipeline_dir, \"bam_sort_stats_samtools\")\n mods_info_output = mods_info.get_component_info()\n console = Console(record=True)\n console.print(mods_info_output)\n output = console.export_text()\n\n assert \"Subworkflow: bam_sort_stats_samtools\" in output\n assert \"Inputs\" in output\n assert \"Outputs\" in output", "def create_output_handlers():\n if not args.outputs:\n return []\n outputs = []\n for name in utils.split_comma(args.outputs):\n path = args.output_path % name if '%s' in args.output_path else args.output_path\n try:\n outputs.append(output.OUTPUT_REGISTRY[name](path, args))\n except KeyError:\n logging.fatal(\"Output format %s not available. Please double-check\"\n \" the --outputs parameter.\" % name)\n return outputs", "def ec2_list(ctx):\n\n from opstools.aws import ec2_list as this_ec2_list\n this_ec2_list.main()", "def _command(cmd, args=[], interactive_response=False):\n\t\n\tsys.argv = ['svnstash', cmd]\n\t\n\tsvnstash.interactive_response = interactive_response\n\t\n\tif isinstance(args, list):\n\t\tsys.argv += args\n\telse:\n\t\tsys.argv.append(args)\n\t\n\t#capture the output from the commands\n\tstdout = StringIO()\n\tstderr = StringIO()\n\tsys.stdout = stdout\n\tsys.stderr = stderr\n\t\n\tsvnstash.main()\n\t\n\tsys.stdout = STDOUT\n\tsys.stderr = STDERR\n\tret = (stdout.getvalue(), stderr.getvalue())\n\tstdout.close()\n\tstderr.close()\n\t\n\treturn ret", "def outputs(self):\n return self._outputs if self._outputs else [self.utop]", "def test_get_stack_bounds(images):\n print('STARTING STACK LOCATION TEST')\n\n # Show the stack bounds for all images\n for i, image in enumerate(images):\n stacks = get_stack_bounds(image)\n\n # Draw the bounds of each stack and the number of stacks\n stack_image = cv2.putText(np.copy(image),\n 'num_stacks: {}'.format(len(stacks)),\n (10,30),\n cv2.FONT_HERSHEY_DUPLEX, 0.75, (0,0,0), 1, cv2.LINE_AA)\n for stack in stacks:\n stack_image = draw_rect(stack_image, stack, [0, 0, 0])\n\n original_and_stacks = hstack_images(image, stack_image, cvt_to_bgr=False)\n cv2.imshow('original & stack-identified image ({})'.format(i), original_and_stacks)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n print('ENDING STACK LOCATION TEST\\n')", "def root_outputs(s):\n out = []\n for j in s.jobs:\n out.append(j.outROOT)\n return out", "def GetAllCmdOutput(args, cwd=None, quiet=False):\n # GetAllCmdOutput returns bytes on Python 3. As the downstream codes are\n # expecting strings, we decode the inpout here.\n stdout, stderr = cmd_util.GetAllCmdOutput(args, cwd, quiet)\n return (stdout.decode('utf-8'), stderr.decode('utf-8'))", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def serialize(self):\n return self.output_names", "def command_ssize(self):\n self.print_out(\"Stack size: %s\" % (str(len(self.vm.stack))))", "def outputs(self):\n return self._outputs" ]
[ "0.7447414", "0.7092905", "0.6619107", "0.6294256", "0.62495506", "0.6235097", "0.6223217", "0.5894003", "0.58861333", "0.58443314", "0.57637775", "0.5655446", "0.56269765", "0.56182945", "0.5560416", "0.5491101", "0.5430899", "0.5425569", "0.5410986", "0.54088354", "0.5386558", "0.53549194", "0.53499025", "0.5346875", "0.53363466", "0.52955323", "0.5287997", "0.52694035", "0.52592194", "0.52582204", "0.5247005", "0.5238708", "0.5212555", "0.52105194", "0.5193582", "0.51788414", "0.5165033", "0.51642525", "0.51578903", "0.5148231", "0.51368403", "0.51282156", "0.51260394", "0.51225376", "0.51157826", "0.5115725", "0.5102883", "0.5101181", "0.5101032", "0.5098403", "0.5094138", "0.5092498", "0.50860757", "0.50784105", "0.50752753", "0.5066732", "0.5049936", "0.5038643", "0.5034178", "0.502599", "0.5024518", "0.5017921", "0.50100815", "0.49978793", "0.49975935", "0.49877515", "0.49778968", "0.49457276", "0.49381453", "0.49319273", "0.49234653", "0.49201116", "0.49058354", "0.490293", "0.4901138", "0.48976213", "0.48922262", "0.4891595", "0.48859444", "0.4885304", "0.4875541", "0.48696172", "0.4856137", "0.48514995", "0.4841212", "0.4838512", "0.4820137", "0.48117143", "0.47979364", "0.47972208", "0.47891685", "0.47882578", "0.47870257", "0.4779133", "0.47737616", "0.4766136", "0.47650844", "0.47649497", "0.47616434", "0.4760926" ]
0.5937543
7
Implementation of the ``cli`` method
def do_cli(stack_name, output, region, profile): from samcli.commands.list.stack_outputs.stack_outputs_context import StackOutputsContext with StackOutputsContext( stack_name=stack_name, output=output, region=region, profile=profile ) as stack_output_context: stack_output_context.run()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():\n pass", "def cli():\r\n pass", "def cli() -> None:", "def cli() -> None:", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n return", "def cli():\n\n pass", "def cli():\n ...", "def cli():\n logger.debug('cli() called')", "def _cli():\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def main_cli():\n pass", "def cli(_):\n pass", "def cli(_):\n pass", "def cli(**_) -> None:\n pass", "def cli(args): # noqa; pylint: disable=unused-argument", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def cli() -> None:\n pass # pragma: no cover", "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def main():\n\tcli = Cli()\n\tcli.run()", "def cli(self, env):\n raise NotImplementedError", "def cli(ctx):", "def cli(ctx):", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def main():\n cli = Cli()\n res = cli.run()\n if res:\n print(res.strip())", "def cli() -> None:\r\n config_argparse = _configfile_parser()\r\n config_args, _ = config_argparse.parse_known_args()\r\n\r\n defaults = {}\r\n\r\n if config_args.config: \r\n defaults = _load_config(config_args)\r\n\r\n parser = _cli(config_argparse, defaults)\r\n _add_standard_args(parser) \r\n \r\n subparser = parser.add_subparsers()\r\n _add_create_command(subparser)\r\n _add_update_command(subparser) \r\n\r\n args = parser.parse_args()\r\n command = args.cmd\r\n command.execute(args)", "def runCLI(self):\n\t\tself.available_cmds['help'].__call__()\n\n\t\twhile True:\n\t\t\tcmd = input('--> Enter Cmd: ')\n\t\t\tprint(\"\\n\")\n\t\t\tcmd = cmd.split()\n\n\t\t\tif len(cmd) > 0 and cmd[0] in self.available_cmds:\n\t\t\t\tif len(cmd) >= 1:\n\t\t\t\t\targs = cmd[1:]\n\t\t\t\telse:\n\t\t\t\t\targs = []\n\n\t\t\t\tself.available_cmds[cmd[0]].__call__(args)", "def extend_cli(self, subparser):", "def cli(self, command):\n return self.mesh.cli(command)", "def run_cli(self, cli, args=None, node_paths=None):\n cli_args = [cli]\n if args:\n cli_args.append('--')\n cli_args.extend(args)\n return self.run_command(args=cli_args, node_paths=node_paths)", "def launch_cli() -> None:\n app.run(main, flags_parser=_parse_flags)", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)" ]
[ "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.85499233", "0.84482986", "0.8387393", "0.83868676", "0.83868676", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.83177346", "0.8250038", "0.82336634", "0.822685", "0.8219791", "0.8204065", "0.8012373", "0.8012373", "0.8012373", "0.8012373", "0.8012373", "0.7951811", "0.7931194", "0.7931194", "0.78174126", "0.7631638", "0.7588085", "0.7588085", "0.75526434", "0.749364", "0.74920344", "0.7477796", "0.74424267", "0.74424267", "0.744158", "0.74206775", "0.7215479", "0.70757693", "0.7062125", "0.7035952", "0.70264167", "0.6980945", "0.69532543" ]
0.0
-1
Plot an image along with its histogram and cumulative histogram.
def plot_img_and_hist(image, axes, bins=256): image = img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() # Display histogram ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) return ax_img, ax_hist, ax_cdf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_img_and_hist(image, axes, bins=256):\n# image = img_as_float(image)\n ax_img, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_img.imshow(image, cmap=plt.cm.gray);\n ax_img.set_axis_off()\n\n # Display histogram\n ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n img_cdf, bins = exposure.cumulative_distribution(image, bins)\n ax_cdf.plot(bins, img_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_img, ax_hist, ax_cdf", "def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])", "def plot_img_and_hist(image, axes, bins=256):\n\t\timage = img_as_float(image)\n\t\tax_img, ax_hist = axes\n\t\tax_cdf = ax_hist.twinx()\n\n\t\t# Display image\n\t\tax_img.imshow(image, cmap=plt.cm.gray)\n\t\tax_img.set_axis_off()\n\n\t\t# Display histogram\n\t\tax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')\n\t\tax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n\t\tax_hist.set_xlabel('Pixel intensity')\n\t\tax_hist.set_xlim(0, 1)\n\t\tax_hist.set_yticks([])\n\n\t\t# Display cumulative distribution\n\t\timg_cdf, bins = exposure.cumulative_distribution(image, bins)\n\t\tax_cdf.plot(bins, img_cdf, 'r')\n\t\tax_cdf.set_yticks([])\n\n\t\treturn ax_img, ax_hist, ax_cdf", "def plot_img_hist(img, num_plot, title):\n # Imagen\n plt.subplot(num_plot)\n plt.imshow(img, cmap='gray')\n plt.title(title)\n # Histograma\n plt.subplot(num_plot + 4)\n plt.hist(img_as_float(img).ravel(), bins=256)\n plt.xlim(0, 1)", "def plot_image_and_hist(image, mask, mask_cmap, img_cmap, axes, bins=256):\n flippedMask = -1*mask + 1\n \n # add transparency\n alphas = np.ones(image.shape)\n alphas = alphas * flippedMask\n alphas = np.clip(alphas, 0.7,1)\n\n image = image - image.min()\n image = image / image.max()\n\n colors = Normalize(0, 1, clip=True)(image)\n colors = img_cmap(colors)\n\n # Now set the alpha channel to the one we created above\n colors[..., -1] = alphas\n \n\n ax_image, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_image.imshow(mask, cmap=mask_cmap)\n ax_image.imshow(colors, cmap=img_cmap)\n ax_image.set_axis_off()\n\n # Display histogram\n ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n image_cdf, bins = exposure.cumulative_distribution(image, bins)\n ax_cdf.plot(bins, image_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_image, ax_hist, ax_cdf", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def show_histogram(im):\n\n if im.ndim == 2:\n # Input image is single channel\n plt.hist(im.flatten(), 256, range=(0, 250), fc='k')\n plt.show()\n\n elif im.ndim == 3:\n # Input image is three channels\n fig = plt.figure()\n fig.add_subplot(311)\n plt.hist(im[..., 0].flatten(), 256, range=(0, 250), fc='b')\n fig.add_subplot(312)\n plt.hist(im[..., 1].flatten(), 256, range=(0, 250), fc='g')\n fig.add_subplot(313)\n plt.hist(im[..., 2].flatten(), 256, range=(0, 250), fc='r')\n plt.show()", "def img_histogram(img):\n\n plt.figure()\n\n if len(img.shape) > 2:\n\n plt.subplot(3,1,1)\n plt.hist(img[:,:,0].ravel(),bins=range(257),color='b')\n plt.title('Image Histogram')\n plt.legend('Blue')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,2)\n plt.hist(img[:,:,1].ravel(),bins=range(257),color='g')\n plt.legend('Green')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,3)\n plt.hist(img[:,:,2].ravel(),bins=range(257),color='r')\n plt.legend('Red')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()\n\n else:\n\n plt.hist(img[:,:].ravel(),bins=range(257))\n plt.title('Image Histogram - Grayscale')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()", "def plot_pixel_intensity(image, path='./pixel_intensity_before_normalization.png'):\n\n plt.figure(figsize=(10, 5))\n plt.subplot(1, 2, 1)\n plt.imshow(image)\n plt.axis('off')\n histo = plt.subplot(1, 2, 2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(image[:, :, 0].flatten(), bins=n_bins, lw=0, color='r', alpha=0.5)\n plt.hist(image[:, :, 1].flatten(), bins=n_bins, lw=0, color='g', alpha=0.5)\n plt.hist(image[:, :, 2].flatten(), bins=n_bins, lw=0, color='b', alpha=0.5)\n plt.savefig(path)\n plt.show()", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plot_histogram(path: str, image: sitk.Image, no_bins: int=255, slice_no: int=-1,\n title: str='', xlabel: str='', ylabel: str='') -> None:\n if slice_no > -1:\n data = sitk.GetArrayFromImage(image[:, :, slice_no])\n else:\n data = sitk.GetArrayFromImage(image)\n\n data = data.flatten()\n\n plt.hist(data, bins=no_bins)\n if title: plt.title(title)\n if xlabel: plt.xlabel(xlabel)\n if ylabel: plt.ylabel(ylabel)\n plt.savefig(path)\n plt.close()", "def plot_image_and_brightness(axis, image, imageintensity, framecount):\n\n # Plot RGB Image\n axis[0].imshow(image)\n axis[0].axis('off')\n axis[0].set_title(f'Frame Number {framecount}')\n\n # Plot intensity\n axis[1].plot(imageintensity, '.-')\n axis[1].set_ylabel('Average Intensity')\n\n # Stuff to show and stream plot\n plt.show(block=False)\n plt.pause(0.001)", "def plot_histogram_and_image(hist_pred, hist_true, img, tile_name, out_dir=None, volume_weighted=False):\n img = img.astype(np.uint8)\n\n index = np.arange(len(hist_pred)) + 0.5\n\n KL_div = KL(hist_true, hist_pred)\n\n iou = calculate_iou(hist_true, hist_pred)\n\n dm_true = get_dm(hist_true, volume_weighted=volume_weighted)\n dm_pred = get_dm(hist_pred, volume_weighted=volume_weighted)\n\n # Create Figure and Axes instances\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(14, 6.5))\n\n # add title of the whole figure\n fig.suptitle('Comparison of Distribution\\n%s' % (tile_name), fontsize=18)\n\n ax1.bar(index, hist_true, width=1.0, label='true histogram')\n ax1.bar(index, hist_pred, width=1.0, alpha=0.5, label='predicted histogram')\n ax1.legend(fontsize=14)\n\n # axis labels\n ax1.set_xlabel('Grain diameter [cm]', fontsize=16)\n\n if volume_weighted:\n ax1.set_ylabel('Relative volume', fontsize=16)\n else:\n ax1.set_ylabel('Relative frequency', fontsize=16)\n\n # x ticks labels\n group_labels = np.array([0.00, 0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.10, 0.12, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.50, 0.60, 0.80, 1.0, 1.2, 1.5, 2.0]) * 100\n group_labels = np.array(group_labels, dtype=np.int)\n ax1.set_xticks(np.arange(len(group_labels)))\n ax1.set_xticklabels(group_labels, rotation='vertical')\n\n ax1.text( # position text relative to Axes\n 0.98, 0.82, 'KL: %.2f' % (KL_div),\n ha='right', va='top',\n transform=ax1.transAxes,\n fontsize=16\n )\n\n ax1.text( # position text relative to Axes\n 0.98, 0.76, 'IoU: %.2f' % (iou),\n ha='right', va='top',\n transform=ax1.transAxes,\n fontsize=16\n )\n\n ax1.text( # position text relative to Axes\n 0.98, 0.70, 'dm true: %.2f cm' % (dm_true),\n ha='right', va='top',\n transform=ax1.transAxes,\n fontsize=16\n )\n ax1.text( # position text relative to Axes\n 0.98, 0.64, 'dm pred: %.2f cm' % (dm_pred),\n ha='right', va='top',\n transform=ax1.transAxes,\n fontsize=16\n )\n\n ax2.set_xticks(())\n ax2.set_yticks(())\n\n ax2.imshow(img)\n\n fig.tight_layout()\n fig.subplots_adjust(top=0.88)\n\n if out_dir is not None:\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n plt.savefig(os.path.join(out_dir, '{}.png'.format(tile_name)), bbox_inches='tight')\n plt.close(fig) # close the figure", "def plot_image(img, label=\"\"): \n if img.shape[0] == 3:\n img = img.transpose(1,2,0)\n fig,ax = plt.subplots(1)\n sns.set_style('white')\n ax.imshow(np.asarray(img))\n if label!=\"\":\n plt.title(number_label[label])\n return fig,ax", "def plot_channel_histogram(img, tri):\n rgb = ['red', 'green', 'blue']\n\n f = img[tri == 255]\n b = img[tri == 0]\n\n for source in [f, b]:\n for channel in range(source.shape[-1]):\n sns.distplot(source[:, channel], color=rgb[channel])", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def plot_img(X: np.ndarray, **kwargs):\n kwargs.setdefault('origin', 'lower') # Sane default\n plt.imshow(X, **kwargs)", "def histograma_colorido(imagem, intervalo=(0, 256)):\n \n color = ('b','g','r')\n \n fig, ax = plt.subplots(3,1, figsize=(12,8))\n \n for i,col in enumerate(color):\n histr = cv2.calcHist([imagem],[i],None,[intervalo[1]],[intervalo[0],intervalo[1]])\n ax[i].plot(histr, color = col)\n ax[i].set_xlim([intervalo[0],intervalo[1]])\n# plt.plot(histr,color = col)\n# plt.xlim([intervalo[0],intervalo[1]])\n plt.show()", "def plot_histogram_overlay(path: str, image1: sitk.Image, image2: sitk.Image, no_bins: int=255, slice_no: int=-1,\n title: str='', xlabel: str='', ylabel: str='') -> None:\n if slice_no > -1:\n data1 = sitk.GetArrayFromImage(image1[:, :, slice_no])\n data2 = sitk.GetArrayFromImage(image2[:, :, slice_no])\n else:\n data1 = sitk.GetArrayFromImage(image1)\n data2 = sitk.GetArrayFromImage(image2)\n\n data1 = data1.flatten()\n data2 = data2.flatten()\n\n plt.hist(data1, bins=no_bins, alpha=0.5)\n plt.hist(data2, bins=no_bins, alpha=0.5)\n if title: plt.title(title)\n if xlabel: plt.xlabel(xlabel)\n if ylabel: plt.ylabel(ylabel)\n plt.savefig(path)\n plt.close()", "def plot_distribution(img_path):\n img = Image.open(img_path)\n img_width, img_height = img.size\n img = prepare_image(img = img)\n model = vgg19(pretrained=True).cuda().eval() \n predict = model.forward(img)\n predict = predict.detach().cpu().numpy().reshape(-1)\n \n label = pd.read_csv('./label.csv', sep = ';', index_col=0)\n label['predict'] = predict\n label.sort_values(by = 'predict', inplace = True)\n trace = go.Bar(x = [str(i) + '_' + j for i, j in enumerate(label.label)], y = label.predict)\n l = go.Layout(\n title = 'Class distribution',\n xaxis = dict(\n title = 'Class'\n ),\n yaxis = dict(\n title = 'Score'\n )\n )\n fig = go.Figure(data = [trace], layout = l)\n iplot(fig)", "def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def plot_image(image):\n plt.imshow(image, cmap=\"gray\", interpolation=\"nearest\")\n plt.axis(\"off\")", "def put_histogram(self, img, coords):\n self.char_color.update(img, coords)", "def matplotlibDisplay(img, title=\"Image\", colorFlag = 'gray'):\n plt.imshow(img, colorFlag)\n plt.title(title)\n plt.xticks([])\n plt.yticks([])\n plt.show()", "def plot_color_image(image):\n plt.imshow(image, interpolation=\"nearest\")\n plt.axis(\"off\")", "def plot_channel_image(image, title=None, vmin=None, vmax=None):\r\n plt.imshow(image, origin='lower', cmap='jet', vmin=vmin, vmax=vmax)\r\n fig = plt.gcf()\r\n fig.set_size_inches(11,11)\r\n plt.axis('off')\r\n if title is not None:\r\n plt.title(title, fontsize=16)\r\n plt.show()", "def plotPred(img, pred):\n\n #plota a imagem.\n plt.imshow(img)\n plt.axis('off')\n\n #grafico de barras.\n plt.figure() \n order = list(reversed(range(len(pred)))) \n bar_preds = [pr[2] for pr in pred]\n labels = (pr[1] for pr in pred)\n plt.barh(order, bar_preds, alpha=0.5)\n plt.yticks(order, labels)\n plt.xlabel('Probability')\n plt.xlim(0, 1.01)\n plt.tight_layout()\n plt.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def plot_single_image(image):\n image = image.cpu()\n \n assert type(image) is torch.Tensor, 'Image to plot is not torch.Tensor'\n image_size = int(np.sqrt(image.shape[0]))\n image = image.view(image_size, image_size)\n \n fig = plt.imshow(image, cmap = 'gray')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n plt.show()\n plt.close('all')", "def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()", "def img_show(img, counter, mode, RGB):\n plt.figure(counter)\n plt.axis('off')\n if not RGB:\n img_aux = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img_aux\n if mode is None:\n plt.imshow(img)\n else:\n plt.imshow(img, cmap=mode)\n plt.show()\n return counter + 1", "def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)", "def plot(self, show_contours=False):\n plt.imshow(self.img, cmap='gray')\n if show_contours:\n for X in self.contours:\n plt.plot(X[:, 0], X[:, 1])\n plt.gca().invert_yaxis()", "def plot_img():\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)", "def plot_image(self, i_group=0, i_integ=0, log=False, reverse_y=True, save=False, filename=None):\n\n img = self.data[i_integ, i_group, :, :]\n\n fig, ax = plt.subplots(1, 1, figsize=(8, 3))\n\n if log:\n im = ax.imshow(np.log10(img))\n ax.set_title('log10 Group {}; Integ {}'.format(i_group, i_integ))\n else:\n im = ax.imshow(img)\n ax.set_title('Group {}; Integ {}'.format(i_group, i_integ))\n\n if reverse_y:\n ax.invert_yaxis()\n\n fig.colorbar(im, ax=ax, orientation='horizontal')\n plt.tight_layout()\n\n # option to save the image\n if save:\n if filename is None:\n filename = 'image_G{}_I{}.png'.format(i_group, i_integ)\n fig.savefig(filename)", "def ecualizar_img(p):\n img = read_img(p)\n\n img1_histogram, bin_edges1 = np.histogram(img.ravel(), bins=256)\n ecualized_img = ecualizar_histograma(img, img1_histogram)\n img2_histogram, bin_edges2 = np.histogram(ecualized_img.ravel(), bins=256)\n double_ecualized_img = ecualizar_histograma(ecualized_img, img2_histogram)\n img3_histogram, bin_edges3 = np.histogram(double_ecualized_img, bins=256)\n\n _, axarr = plt.subplots(1, 3)\n axarr[0].plot(bin_edges1[0:-1], img1_histogram)\n axarr[1].plot(bin_edges2[0:-1], img2_histogram)\n axarr[2].plot(bin_edges3[0:-1], img3_histogram)\n plt.show()\n\n show_imgs([img, ecualized_img, double_ecualized_img])", "def hist_eq(img):\n hist, bins = np.histogram(img.flatten(), 256, [0, 256])\n cdf = hist.cumsum()\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint8')\n img2 = cdf[img]\n return img2", "def plot_image_and_proj(image, title=\"\", **kwargs):\n fig = plt.figure()\n gs = gridspec.GridSpec(3, 2, width_ratios=[3, 1], height_ratios=[0.2, 3, 1]) \n ax0 = plt.subplot(gs[1,0])\n plt.title(title)\n ims = plt.imshow(image, aspect=\"auto\", **kwargs)\n \n ax2 = plt.subplot(gs[2,0], sharex=ax0, )\n plt.plot(image.sum(axis=0))\n plt.subplot(gs[1,1], sharey=ax0)\n plt.plot(image.sum(axis=1), range(len(image.sum(axis=1))))\n\n ax = plt.subplot(gs[0,0])\n plt.colorbar(ims, orientation=\"horizontal\", cax=ax)\n fig.show()", "def plot_preds(image, preds): \r\n #image\r\n plt.imshow(image)\r\n plt.axis('off')\r\n \r\n #bar graph\r\n plt.figure() \r\n order = list(reversed(range(len(preds)))) \r\n bar_preds = [pr[2] for pr in preds]\r\n labels = (pr[1] for pr in preds)\r\n plt.barh(order, bar_preds, alpha=0.5)\r\n plt.yticks(order, labels)\r\n plt.xlabel('Probability')\r\n plt.xlim(0, 1.01)\r\n plt.tight_layout()\r\n plt.show()", "def plot_data(self):\n # plot every log image\n for log_img in self.log_img_map.itervalues():\n log_img.plot()", "def plot_image_sequence(self):\r\n\r\n imv = pg.ImageView()\r\n\r\n imv.show()\r\n\r\n imv.setImage(self.imageData)\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 1, 0)", "def plotColorbar(self):\n self.removeColorbar()\n\n # X Y W H\n self._colorbarax = self.figure.add_axes([0.02,-0.06,0.97,0.05])\n\n mx = self.images[0].get_clim()[1]\n mn = self.images[0].get_clim()[0]\n dm = (mx-mn)\n self._colorbar = self.figure.colorbar(self.images[0], cax=self._colorbarax, ticks=[mn,mn+dm*0.2,mn+dm*0.4,mn+dm*0.6,mn+dm*0.8,mx], orientation='horizontal')\n self._colorbar.ax.set_xticklabels(['0%','20%','40%','60%','80%','100%'])\n self._colorbar.ax.tick_params(labelcolor='white', color='white', labelsize=self.labelFontSize)", "def plot_bias_smooth(bias, bias_smooth, comp_figfile, hist_figfile):\n h, w = bias.shape\n # calculate the residual between bias and smoothed bias data\n bias_res = bias - bias_smooth\n\n fig1 = plt.figure(figsize=(12,4), dpi=150)\n ax1 = fig1.add_axes([0.055, 0.12, 0.25, 0.75])\n ax2 = fig1.add_axes([0.355, 0.12, 0.25, 0.75])\n ax3 = fig1.add_axes([0.655, 0.12, 0.25, 0.75])\n mean = bias.mean(dtype=np.float64)\n std = bias.std(dtype=np.float64, ddof=1)\n vmin = mean - 2.*std\n vmax = mean + 2.*std\n cax1 = ax1.imshow(bias, vmin=vmin, vmax=vmax, cmap='gray')\n cax2 = ax2.imshow(bias_smooth, vmin=vmin, vmax=vmax, cmap='gray')\n cax3 = ax3.imshow(bias_res, vmin=vmin, vmax=vmax, cmap='gray')\n cbar_ax = fig1.add_axes([0.925, 0.12, 0.02, 0.75])\n cbar = fig1.colorbar(cax1, cax=cbar_ax)\n ax1.set_title('bias')\n ax2.set_title('bias_smooth')\n ax3.set_title('bias - bias_smooth')\n for ax in [ax1,ax2,ax3]:\n ax.set_xlim(0, bias.shape[1]-1)\n ax.set_ylim(bias.shape[1]-1, 0)\n ax.set_xlabel('X', fontsize=11)\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(10)\n # only show y label in the left panel\n ax1.set_ylabel('Y',fontsize=11)\n \n # plot the histogram of smoothed bias\n # prepare the bin list\n bins = np.linspace(-4, 4, 40+1)\n \n # prepare the gaussian fitting and error function\n fitfunc = lambda p,x:p[0]*np.exp(-0.5*(x-p[1])**2/p[2]**2)\n errfunc = lambda p,x,y: y-fitfunc(p,x)\n \n # create figure\n fig2 = plt.figure(figsize=(8,6), dpi=150)\n for i, j in [(i, j) for i in range(3) for j in range(3)]:\n ax = fig2.add_axes([0.1+j*0.3, 0.7-i*0.3, 0.27, 0.27])\n \n labels = 'abcdefghi'\n alpha = 0.7\n # plot both bias and smoothed bias\n for idata,data in enumerate([bias,bias_res]):\n message = ['Parameters for gaussian fitting of the histograms',\n 'y, x, A, center, sigma']\n for iy, ix in [(iy, ix) for iy in range(3) for ix in range(3)]:\n yc = iy*(h//4) + h//4\n xc = ix*(w//4) + w//4\n x1, x2 = xc-200, xc+200\n y1, y2 = yc-200, yc+200\n ax1.plot([x1,x2], [y1,y1], 'm-', alpha=alpha)\n ax1.plot([x1,x2], [y2,y2], 'm-', alpha=alpha)\n ax1.plot([x1,x1], [y1,y2], 'm-', alpha=alpha)\n ax1.plot([x2,x2], [y1,y2], 'm-', alpha=alpha)\n ax3.plot([x1,x2], [y1,y1], 'c-', alpha=alpha)\n ax3.plot([x1,x2], [y2,y2], 'c-', alpha=alpha)\n ax3.plot([x1,x1], [y1,y2], 'c-', alpha=alpha)\n ax3.plot([x2,x2], [y1,y2], 'c-', alpha=alpha)\n ax1.text(xc-50,yc-20,'(%s)'%labels[iy*3+ix],color='m')\n ax3.text(xc-50,yc-20,'(%s)'%labels[iy*3+ix],color='c')\n data_cut = data[y1:y2,x1:x2]\n y,_ = np.histogram(data_cut, bins=bins)\n x = (np.roll(bins,1) + bins)/2\n x = x[1:]\n # use least square minimization function in scipy\n p1,succ = opt.leastsq(errfunc,[y.max(),0.,1.],args=(x,y))\n ax = fig2.get_axes()[iy*3+ix]\n color1 = ('r', 'b')[idata]\n color2 = ('m', 'c')[idata]\n # plot the histogram\n ax.bar(x, y, align='center', color=color1, width=0.2, alpha=0.5)\n # plot the gaussian fitting of histogram\n xnew = np.linspace(x[0], x[-1], 201)\n ax.plot(xnew, fitfunc(p1, xnew), color2+'-', lw=2)\n ax.set_xlim(-4, 4)\n x1,x2 = ax.get_xlim()\n y1,y2 = ax.get_ylim()\n message.append('%4d %4d %+10.8e %+10.8e %+6.3f'%(\n yc, xc, p1[0], p1[1], p1[2]))\n \n # write the fitting parameters into running log\n logger.info((os.linesep+' ').join(message))\n \n # find maximum y in different axes\n max_y = 0\n for iax, ax in enumerate(fig2.get_axes()):\n y1, y2 = ax.get_ylim()\n if y2 > max_y:\n max_y = y2\n \n # set y range for all axes\n for iax, ax in enumerate(fig2.get_axes()):\n x1, x2 = ax.get_xlim()\n ax.text(0.9*x1+0.1*x2, 0.2*y1+0.8*y2, '(%s)'%labels[iax],\n fontsize=12)\n ax.set_ylim(0, max_y)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(12)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(12)\n \n if iax in [0, 3, 6]:\n ax.set_ylabel('$N$', fontsize=11)\n else:\n ax.set_yticklabels([])\n if iax in [6, 7, 8]:\n ax.set_xlabel('Counts', fontsize=11)\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(9)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(9)\n\n # save figures\n fig1.savefig(comp_figfile)\n fig2.savefig(hist_figfile)\n plt.close(fig1)\n plt.close(fig2)", "def plot_histograms(p_hist, p_hbins, title, figure_path=None):\n\n base_fig_size = 7\n h_fig = base_fig_size\n w_fig = base_fig_size * 4\n\n fig = plt.figure(figsize=(w_fig, h_fig))\n fig.suptitle(title)\n iplot = 0\n\n p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1\n\n p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n if figure_path:\n plt.savefig(figure_path, format='png')\n\n return fig", "def showHistogram(image_list, name_list, path, toSave=False, hist_range=(0.0, 1.0)):\n\tfig = plt.figure()\n\tfig.subplots_adjust(hspace=.5)\n\timage_coordinate = 321\n\ti = 0\n\tfor image in image_list:\n\t\tfig.add_subplot(image_coordinate)\n\t\tplt.title(name_list[i])\n\t\tplt.set_cmap('gray')\n\t\tplt.axis('off')\n\t\tplt.imshow(image)\n\n\t\timage_coordinate += 1\n\n\t\tfig.add_subplot(image_coordinate)\n\t\tplt.title('histogram')\n\t\tplt.hist(image.ravel(), bins=256, range=hist_range)\n\n\t\timage_coordinate += 1\t\n\t\ti += 1\n\n\tif toSave:\n\t\tplt.savefig(path + \".jpg\")\n\tplt.show()", "def plot_numpy_img(np_img):\n plt.imshow(np_img, interpolation='nearest')\n plt.show()", "def histeq(im, nbr_bins = 256):\n\t# get image histogram\n\timhist, bins = pl.histogram(im.flatten(), nbr_bins, normed = True)\n\tcdf = imhist.cumsum() # cumulative distribution function\n\tcdf = 255 * cdf / cdf[-1] # normalize\n\t# use linear interpolation of cdf to find new pixel values\n\tim2 = pl.interp(im.flatten(), bins[:-1], cdf)\n\treturn im2.reshape(im.shape)", "def plot_i(im, Prior, nit, chi2_1, chi2_2, ipynb=False):\n\n plt.ion()\n plt.pause(0.00001)\n plt.clf()\n\n plt.imshow(im.reshape(Prior.ydim,Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian')\n xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)\n yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)\n plt.xticks(xticks[0], xticks[1])\n plt.yticks(yticks[0], yticks[1])\n plt.xlabel('Relative RA ($\\mu$as)')\n plt.ylabel('Relative Dec ($\\mu$as)')\n plt.title(\"step: %i $\\chi^2_1$: %f $\\chi^2_2$: %f\" % (nit, chi2_1, chi2_2), fontsize=20)\n #plt.draw()\n\n if ipynb:\n display.clear_output()\n display.display(plt.gcf())", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def visualizeImg(img):\n plt.figure(figsize=(10,4))\n plt.imshow(img)\n plt.show()", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def plot_100_image(X):\n size = int(np.sqrt(X.shape[1]))\n\n # sample 100 image, reshape, reorg it\n sample_idx = np.random.choice(np.arange(X.shape[0]), 100) # 100*400\n sample_images = X[sample_idx, :]\n\n fig, ax_array = plt.subplots(nrows=10, ncols=10, sharey=True, sharex=True, figsize=(8, 8))\n\n for r in range(10):\n for c in range(10):\n ax_array[r, c].matshow(sample_images[10 * r + c].reshape((size, size)),\n cmap=matplotlib.cm.binary)\n plt.xticks(np.array([]))\n plt.yticks(np.array([])) \n #绘图函数,画100张图片", "def visualize(self):\n self.dataFrame.hist()\n plt.show()", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def histogram(self, image):\n\n response = self._send_request(\"histogram\", files=dict(image=image))\n return response[self._layer]['histogram']", "def histeq(im,nbr_bins=256):\r\n # Calculate histogram of images\r\n imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)\r\n cdf = imhist.cumsum() # cumulative distribution function\r\n cdf = 255 * cdf / cdf[-1] # 归一化\r\n # Using the linear interpolation of cumulative distribution function, the new pixel value is calculated.\r\n im2 = interp(im.flatten(),bins[:-1],cdf)\r\n return im2.reshape(im.shape), cdf", "def calculateHistogram(self):\n \n # Define color map\n colors = [ (255,0,0),(0,255,0),(0,0,255) ]\n # Define empty image to plot histogram in\n plot_to_fill = np.zeros((280,400,3))\n # Define bins of the histogram\n bins = np.arange(256).reshape(256,1)\n \n # Boucle sur les canaux\n for channel, color in enumerate(colors):\n # Calcul de l'histogramme\n hist_item = cv2.calcHist(self.frame,[channel],None,[256],[0,256])\n # Normalisation\n cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)\n # Conversion\n hist = np.int32(np.around(hist_item))\n pts = np.int32(np.column_stack((bins, hist)))\n cv2.polylines(plot_to_fill, [pts], False, color)\n # Mettre dans le bon sens\n histplot = np.flipud(plot_to_fill)\n histplot = np.uint8(histplot)\n \n # Conversion en objet QPixelMap\n self.histplot_qpix = self.convertToQPixelmap(histplot)", "def plot_cv_img(input_image): \n # change color channels order for matplotlib \n plt.imshow(cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)) \n\n # For easier view, turn off axis around image \n plt.axis('off')\n plt.show()", "def histeq( im, nbr_bins = 256):\n\t# get image histogram \n\timhist, bins = histogram( im.flatten(), nbr_bins, normed = True) \n\tcdf = imhist.cumsum() \n\t# cumulative distribution function cdf = 255 * cdf / cdf[-1] \n\t# normalize \n\t# use linear interpolation of cdf to find new pixel values \n\tim2 = interp( im.flatten(), bins[:-1], cdf) \n\treturn im2.reshape( im.shape), cdf", "def show_plot(img, title):\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.title(\"Hand Number: \" + title)\n plt.show()", "def plot(self):\n cs = plt.contour(self.X, self.Y, self.fitness_function)\n plt.clabel(cs, inline=1, fontsize=6)\n plt.imshow(self.fitness_function, extent=self.limits, origin=\"lower\", alpha=0.3)", "def compute_histogram(self, image):\n\n hist = [0] * 256\n x, y = image.shape[:2]\n #print(image.shape)\n for i in range(x):\n for j in range(y):\n hist[image[i, j]] += 1\n\n return hist", "def histogram(self, ctr1, ctr2):\n # check for the current status of the viewer\n # (tiling, aligned by wcs)\n if self.run('tile', via='get') == 'yes':\n allframes = True\n frames = self.run('frame active', via='get').split()\n else:\n allframes = False\n frames = [self.run('frame', via='get')]\n if self.run('wcs align', via='get') == 'yes':\n cs = 'wcs'\n else:\n cs = 'image'\n\n # get any currently available regions\n all_regions = self.run(f'regions -system {cs}',\n allframes=allframes, via='get')\n if not allframes:\n all_regions = [all_regions]\n\n param = self.plot_parameters\n for frame in frames:\n log.info('')\n if allframes:\n self.run('frame ' + frame)\n # check for loaded data\n if not self._loaded_data():\n continue\n\n try:\n results = self.retrieve_data(ctr1, ctr2, photometry=False)\n except (ValueError, TypeError) as err:\n log.debug(f'Error in retrieving Frame {frame} data: {err}')\n continue\n fulldata = results['fulldata']\n data = results['data']\n wdw = results['window']\n hwcs = results['wcs']\n xctr = results['xctr']\n yctr = results['yctr']\n filename = results['filename']\n\n # get file and ext name if possible\n log.info(f'Frame {frame}: {filename}')\n\n log.info(f'Histogram at x={ctr1}, y={ctr2} '\n f'(in {cs} coordinates)')\n\n # get data from region mask or window\n mask = self._region_mask(cs, all_regions, xctr, yctr, hwcs)\n if mask is None:\n if param['window'] is None:\n log.info('Using the full image')\n reg_name = 'full image'\n short_reg_name = 'full'\n hist_data = fulldata\n else:\n log.info(f'Using the analysis window '\n f'(width: {wdw} pixels)')\n reg_name = f'{wdw} pixel window'\n short_reg_name = f'x={xctr:.0f} y={yctr:.0f} {wdw}pix'\n hist_data = data\n else:\n reg_name = 'DS9 region'\n short_reg_name = f'x={xctr:.0f} y={yctr:.0f} region'\n hist_data = mask.multiply(fulldata)\n if hist_data is None: # pragma: no cover\n # condition occasionally but unreliably encountered\n # in testing\n log.warning('Region is too small; skipping histogram')\n continue\n hist_data[hist_data == 0] = np.nan\n\n hist_data = hist_data.ravel()\n hist_minmax = (np.nanmin(hist_data), np.nanmax(hist_data),\n np.nansum(hist_data))\n hist_stats = (np.nanmean(hist_data),\n np.nanmedian(hist_data),\n np.nanstd(hist_data))\n nnan = np.isfinite(hist_data)\n clip_stats = stats.sigma_clipped_stats(hist_data[nnan])\n text_stats = [f'Total pixels: {np.sum(nnan)}',\n f'Min, max, sum: '\n f'{hist_minmax[0]:.5g}, {hist_minmax[1]:.5g}, '\n f'{hist_minmax[2]:.5g}',\n f'Mean, median, stddev: '\n f'{hist_stats[0]:.5g}, {hist_stats[1]:.5g}, '\n f'{hist_stats[2]:.5g}',\n f'Clipped mean, median, stddev: '\n f'{clip_stats[0]:.5g}, {clip_stats[1]:.5g}, '\n f'{clip_stats[2]:.5g}']\n for t in text_stats:\n log.info(t)\n\n title = f'Frame {frame}, x={xctr:.0f} y={yctr:.0f} in {reg_name}'\n l1 = f'F{frame} {short_reg_name}'\n hist_kwargs = {'bins': param['bin'], 'label': l1, 'alpha': 0.8}\n if param['hist_limits'] is not None:\n hist_kwargs['range'] = (param['hist_limits'][0],\n param['hist_limits'][1])\n new_hist = {'plot_type': 'histogram', 'args': [hist_data],\n 'kwargs': hist_kwargs}\n\n if param['separate_plots'] or len(self.histogram_data) < 1:\n # summary stat (mean, median, clipped mean, or clipped median)\n summary_stat = str(param.get('summary_stat', 'mean')).lower()\n if 'clip' in summary_stat:\n se = clip_stats[2]\n if 'median' in summary_stat:\n ss = clip_stats[1]\n ss_label = 'Clipped median'\n else:\n ss = clip_stats[0]\n ss_label = 'Clipped mean'\n else:\n se = hist_stats[2]\n if 'median' in summary_stat:\n ss = hist_stats[1]\n ss_label = 'Median'\n else:\n ss = hist_stats[0]\n ss_label = 'Mean'\n l2 = f'{ss_label} {ss:.3g} +/- {se:.3g}'\n\n overplots = [new_hist]\n vlines = [ss, ss - se, ss + se]\n vlabels = [l2, None, None]\n vstyles = ['-', ':', ':']\n for vdata, vlabel, vstyle in zip(vlines, vlabels, vstyles):\n overplots.append({'plot_type': 'vline',\n 'args': [vdata],\n 'kwargs': {'label': vlabel,\n 'color': 'gray',\n 'linewidth': 1,\n 'linestyle': vstyle}})\n overplots.append({'plot_type': 'legend', 'args': []})\n\n plot_data = {'args': [],\n 'kwargs': {'title': title,\n 'xlabel': 'Flux',\n 'ylabel': 'Count',\n 'colormap': param['color']},\n 'plot_kwargs': {},\n 'overplot': overplots}\n self.histogram_data.append(plot_data)\n else:\n # append new histogram to existing ones\n plot_data = self.histogram_data[-1]\n overplots = []\n for plot in plot_data['overplot']:\n if plot['plot_type'] == 'histogram':\n overplots.append(plot)\n overplots.append(new_hist)\n overplots.append({'plot_type': 'legend', 'args': []})\n plot_data['overplot'] = overplots\n plot_data['kwargs']['title'] = 'All histogram regions'\n\n if self.signals is not None:\n self.signals.make_histogram_plot.emit()", "def plot_image(image, factor=1):\n fig = plt.subplots(nrows=1, ncols=1, figsize=(15, 7))\n\n if np.issubdtype(image.dtype, np.floating):\n plt.imshow(np.minimum(image * factor, 1))\n plt.show()\n else:\n plt.imshow(image)\n plt.show()", "def imshow(self):\n axes([0, 0, 1, 1], xticks=[], yticks=[])\n imshow(self.rgb_image())", "def histo_image(image, verbose=False):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n histo_global = cv2.equalizeHist(gray)\n\n _, histo = cv2.threshold(histo_global, thresh=250,\n maxval=255, type=cv2.THRESH_BINARY)\n\n if verbose:\n plt.imshow(histo, cmap='gray')\n plt.show()\n\n return histo", "def hist(self,geo,pfile):\n\n # Create histogram of box data, rounding to nearest integers if temperature\n boxdata = self.img.flatten()\n imin = int(round(min(boxdata))) - 1\n imax = int(round(max(boxdata))) + 1\n ni = imax-imin+1 # number of bins to plot\n h = np.zeros(ni,dtype=int) # initialise with zeros\n for val in boxdata: # assign each image value to a bin\n i = int(round(val)) - imin \n h[i] += 1\n n = sum(h) # total number of values binned\n h = h * 100.0/n # convert no.in bins to %frequency\n plt.figure(WINDOW_HIST,figsize=(4,4))\n plt.clf()\n # Create title for histogram plot\n ttl = self.desc + '\\n' + \\\n 'Box: X=' + str(self.ix-self.mbox) + ':' \\\n + str(self.ix) + ':' \\\n + str(self.ix+self.mbox) + \\\n ', Y=' + str(self.iy-self.mbox) + ':' \\\n + str(self.iy) + ':' \\\n + str(self.iy+self.mbox)\n plt.title(ttl)\n plt.ylabel(\"% Frequency\")\n tdisp = self.label in ( 'T9', 'T10', 'TS' )\n if tdisp: plt.xlabel(\"Pixel Temperature [K]\")\n else: plt.xlabel(\"Pixel Value [0:255]\")\n xval = np.arange(imin,imax+1,dtype=int)\n # Set colour of histogram according to channel\n plt.bar(xval,h,color=plot_colours.get(self.label,'gray'))\n x0,x1 = plt.xlim()\n y0,y1 = plt.ylim()\n boxmean = np.mean(boxdata)\n boxsd = np.std(boxdata)\n midpix = self.img[self.mbox,self.mbox]\n plt.plot( boxmean+[0,0], [y0,y1], ':', color='black' )\n plt.errorbar ( boxmean, 0.9*y1, xerr=boxsd, color='black', \n capsize=4 )\n plt.plot ( midpix, 0.9*y1, 's', color='black', \n markerfacecolor='none' ) \n plt.tight_layout()\n if boxmean > 0.5 * ( x1 + x0 ): xt = x0 + 0.4 * ( x1 - x0 )\n else: xt = x0 + 0.95*(x1-x0)\n yt = y0 + 0.95*(y1-y0)\n yd = 0.05*(y1-y0)\n text = 'Mean = {:6.2f}'.format(boxmean)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n text = 'S.D. = {:6.2f}'.format(boxsd)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n text = 'NPix = {:6n}'.format(n)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n if tdisp: text = 'MidPix = {:6.2f}'.format(midpix)\n else: text = 'MidPix = {:6n}'.format(midpix)\n plt.text(xt,yt,text,ha=\"right\")\n if geo.cal:\n lat,lon,zen = geo.locate(self.ix,self.iy) \n text = 'Lat = {:6.2f}'.format(lat)\n yt -= yd\n plt.text(xt,yt,text,ha=\"right\") \n text = 'Lon = {:6.2f}'.format(lon)\n yt -= yd\n plt.text(xt,yt,text,ha=\"right\") \n if pfile: \n file = input ( \"Save to file (<CR>=hist.pdf): \" ) or \"hist.pdf\"\n plt.savefig(file)", "def plot_image(self, ax=None, offset=None, energy=None, **kwargs):\n import matplotlib.pyplot as plt\n\n kwargs.setdefault('cmap', 'afmhot')\n kwargs.setdefault('origin', 'bottom')\n kwargs.setdefault('interpolation', 'nearest')\n\n ax = plt.gca() if ax is None else ax\n\n if offset is None:\n vals = self.offset.value\n offset = np.linspace(vals.min(), vals.max(), 100)\n offset = Angle(offset, self.offset.unit)\n\n if energy is None:\n vals = self.energy.value\n energy = np.logspace(np.log10(vals.min()), np.log10(vals.max()), 100)\n energy = Quantity(energy, self.energy.unit)\n\n aeff = self.evaluate(offset, energy).T\n extent = [\n offset.value.min(), offset.value.max(),\n energy.value.min(), energy.value.max(),\n ]\n ax.imshow(aeff.value, extent=extent, **kwargs)\n # ax.set_xlim(offset.value.min(), offset.value.max())\n # ax.set_ylim(energy.value.min(), energy.value.max())\n\n ax.semilogy()\n ax.set_xlabel('Offset ({0})'.format(offset.unit))\n ax.set_ylabel('Energy ({0})'.format(energy.unit))\n ax.set_title('Effective Area ({0})'.format(aeff.unit))\n ax.legend()\n\n return ax", "def plot_color_distribution(img, trimap):\n\n f = img[trimap == 255]\n b = img[trimap == 0]\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('Red')\n ax.set_ylabel('Blue')\n ax.set_zlabel('Green')\n ax.set_title(\"3D RGB Scatter\")\n\n for source in [f, b]:\n X, Y, Z = source[:, 0], source[:, 1], source[:, 2]\n ax.scatter3D(X, Y, Z)", "def plot_images(images, labels, nrows, ncols, cls_true=None, cls_pred=None, grey=False):\n fig, axes = plt.subplots(nrows, ncols, figsize=(16, 2*nrows))\n\n for i, ax in enumerate(axes.flat): \n if grey:\n ax.imshow(images[i,:,:,0], cmap='binary')\n else:\n ax.imshow(images[i])\n\n ax.set_xticks([]); ax.set_yticks([])\n if labels:\n ax.set_title(labels[i])", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def plot_img(img, savefig=\"test.png\", **kwargs):\n plt.figure()\n if img.ndim > 2:\n plt.imshow(cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB), **kwargs)\n else:\n plt.imshow(img.astype(np.uint8), **kwargs)\n plt.axis(\"off\")\n if savefig:\n cv2.imwrite(savefig, img.astype(np.uint8))", "def histogram(self):\r\n channel = self.ui.channel_selection.itemData(self.ui.channel_selection.currentIndex())\r\n\r\n #create a window, the reference must be stored, because the window\r\n #gets destroyed when its reference is garbage collected\r\n #make plotWindow a list and append to that if multiple windows should be possible\r\n title = \"histogram of {:s} channel\".format(self.ui.channel_selection.currentText())\r\n self.plotWindow = pyguitools.SimplePlotWindow(name = title)\r\n self.plotWindow.ax1.hist(self.npImg[self.ui.y0.value():self.ui.y1.value(),\r\n self.ui.x0.value():self.ui.x1.value(), \r\n channel].flatten(),\r\n bins=self.settings[\"histogramm bins\"],\r\n range=(self.settings[\"histogramm min\"],self.settings[\"histogramm max\"]))\r\n self.plotWindow.ax1.set_xlim(self.settings[\"histogramm min\"],self.settings[\"histogramm max\"]) \r\n self.plotWindow.show()", "def plot_preds(image, preds):\n plt.imshow(image)\n plt.axis('off')\n\n plt.figure()\n order = list(reversed(range(len(preds))))\n bar_preds = [pr[2] for pr in preds]\n labels = (pr[1] for pr in preds)\n plt.barh(order, bar_preds, alpha=0.5)\n plt.yticks(order, labels)\n plt.xlabel('Probability')\n plt.xlim(0,1.01)\n plt.tight_layout()\n plt.show()\n return labels", "def plot_vector_as_image(image, h, w, title):\n plt.imshow(image.reshape((h, w)), cmap=plt.cm.gray)\n plt.title(title, size=12)\n plt.show()", "def plot_hist(self):\n \n plt.figure();\n self.dist_frame.plot(kind='hist',legend=False,orientation='horizontal')", "def plotFace(original,blurred):\n plt.subplot(121),plt.imshow(original,cmap=cm.Greys_r),plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(122),plt.imshow(blurred,cmap=cm.Greys_r),plt.title('Gaussian Blurred')\n plt.xticks([]), plt.yticks([])\n return None", "def get_color_hist(img, nbins=32, visualize=False):#, bins_range=(0, 1)):\n channel1_hist = np.histogram(img[:, :, 0], bins=nbins)#, range=bins_range)\n channel2_hist = np.histogram(img[:, :, 1], bins=nbins)#, range=bins_range)\n channel3_hist = np.histogram(img[:, :, 2], bins=nbins)#, range=bins_range)\n\n hist_feat = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n\n if visualize:\n plot_histfeatures(channel1_hist, channel2_hist, channel3_hist)\n return hist_feat", "def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")", "def plot_preds(image, preds):\n plt.imshow(image)\n plt.axis('off')\n\n plt.figure()\n \"\"\"labels = (\"BEANS\", \"CAKE\",\"CANDY\",\"CEREAL\",\"CHIPS\",\n \"CHOCOLATE\", \"COFFEE\", \"CORN\", \"FISH\", \"FLOUR\",\n \"HONEY\", \"JAM\", \"JUICE\", \"MILK\", \"NUTS\",\"OIL\",\"PASTA\",\n \"RICE\", \"SODA\", \"SPICES\", \"SUGAR\", \"TEA\", \"TOMATO_SAUCE\",\n \"VINEGAR\",\"WATER\")\"\"\"\n labels = (\"BEANS\", \"CAKE\",\"CHIPS\")\n\n x_pos = np.arange(len(labels))\n print(x_pos)\n plt.barh(x_pos, preds, align='center', alpha=0.5)\n plt.yticks(x_pos, labels)\n plt.xlabel('Probability')\n plt.xlim(0,1.01)\n plt.tight_layout()\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def draw_hogs(img, hog_img, vect, rescale=True, fname=\"hog_plot.png\"):\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(1,2, figsize=(16, 10), sharex=True, sharey=True)\n \n ax1.axis('off')\n ax1.imshow(img, cmap=plt.cm.gray)\n ax1.set_title('Vstupni obrazek')\n ax1.set_adjustable('box-forced')\n \n hog_img_rescaled = exposure.rescale_intensity(hog_img, in_range=(0, 0.02))\n \n ax2.axis('off')\n ax2.imshow(hog_img_rescaled, cmap=plt.cm.gray)\n ax2.set_title('Histogram Orientovanych Gradientu')\n ax2.set_adjustable('box-forced')\n \n if show_plots: plt.show()\"\"\"\n \n fig = plt.figure(figsize=(18, 12))\n\n gs = GridSpec(2, 2)\n ax1 = plt.subplot(gs[0, :1])\n ax2 = plt.subplot(gs[0, -1])\n ax3 = plt.subplot(gs[1, :])\n \n ax1.axis('off')\n ax1.imshow(img, cmap=plt.cm.gray)\n ax1.set_title('Vstupni obrazek - predzpracovany')\n #ax1.set_adjustable('box-forced')\n \n hog_img_rescaled = exposure.rescale_intensity(hog_img, in_range=(0, 0.02)) if rescale else hog_img\n \n ax2.axis('off')\n ax2.imshow(hog_img_rescaled, cmap=plt.cm.gray)\n ax2.set_title('Histogram Orientovanych Gradientu')\n #ax2.set_adjustable('box-forced')\n \n ax3.plot(vect)\n ax3.grid()\n \n if show_plots: \n plt.show()\n plt.savefig(foldername+\"/hog_plots/\"+fname)\n plt.savefig(parentname+\"/hog_plots/\"+fname+\"/\"+childname+\".png\")\n dr.save_image(hog_img, parentname+\"/hog_images/\"+fname+\"/\"+childname+\".png\")\n \n if close_plots:\n plt.close('all')", "def plot_histograms_from_imgs(self, images_itk: list, file_name='', titles=[],\n plane=PlaneTypes.AXIAL):\n\n n_images = len(images_itk)\n fig, ax = plt.subplots(2, n_images, squeeze=True, figsize=(8 * n_images, 8))\n # Iterate over all the images\n for ii, c_img in enumerate(images_itk):\n img_np = sitk.GetArrayFromImage(c_img)\n middle_slice = get_slices(SliceMode.MIDDLE, img_np)[0]\n\n if n_images > 1:\n t1 = ax[0][ii]\n t2 = ax[1][ii]\n else:\n t1 = ax[0]\n t2 = ax[1]\n\n if plane == PlaneTypes.ALL:\n data_for_histogram = img_np\n else:\n data_for_histogram = get_proper_plane(img_np, plane, middle_slice)\n\n t1.hist(data_for_histogram.flatten(), 'auto')\n t2.imshow(get_proper_plane(img_np, plane, middle_slice))\n if len(titles) > 0:\n t1.title.set_text(titles[ii])\n t2.title.set_text(titles[ii])\n\n if file_name != '':\n pylab.savefig(join(self._output_folder, file_name), bbox_inches='tight')\n\n self._close_figure()", "def get_image_stats(image, out_dir, cur_file):\n # Output directory\n output_base = osp.join(out_dir, cur_file.split('.')[0])\n os.mkdir(output_base)\n # Print dimensions of the image\n width, height, color = image.shape\n print('The resolution of the image if of {}x{}x{}'.format(width,\n height,\n color))\n print('Total of {} pixels'.format(width * height * color))\n\n # Get histogram\n print('Calculating histogram')\n flat_img = image.mean(axis=2).flatten()\n counts, bins = np.histogram(flat_img, range(257))\n plt.bar(bins[:-1], counts, width=1, edgecolor='none')\n output_file = osp.join(out_dir, output_base, 'histogram.png')\n plt.xlabel('Intensidad')\n plt.ylabel('Número de pixeles')\n print('Saving histogram')\n plt.savefig(output_file, bbox_inches='tight')\n plt.close()\n\n # LAB space\n lab_image = cv2.cvtColor(image[8000:8500, 8000:8500, :], cv2.COLOR_BGR2LAB)\n output_file = osp.join(out_dir, output_base, 'lab.png')\n cv2.imwrite(output_file, lab_image)\n output_file = osp.join(out_dir, output_base, 'original.png')\n cv2.imwrite(output_file, image[8000:8500, 8000:8500, :])", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def plot_obs(self):\n if self.obs_im is None and self.obs_ax is None:\n fig, self.obs_ax = plt.subplots()\n self.obs_ax.set_title('Observation')\n self.obs_ax.set_xticks(())\n self.obs_ax.set_yticks(())\n self.obs_im = self.obs_ax.imshow(self.obs, cmap='gray')\n else:\n self.obs_im.set_data(self.obs)", "def plot(data, interactive=False):\n if interactive:\n plt.ion()\n fig = plt.figure()\n fig.canvas.draw()\n image = call_imshow(data)\n else:\n fig = plt.figure()\n image = call_imshow(data)\n plt.show()\n return fig, image", "def plot_image(image2d,colorbar=True,cm=None,percentile=(None,None)):\n from matplotlib.colors import LinearSegmentedColormap\n cmDefault='gray'\n if type(cm) is str:\n if cm.lower() in 'red green blue magenta':\n cm = LinearSegmentedColormap.from_list(cm,['black', cm])\n elif not cm in COLORMAPS:\n print(\"WARNING: colormap '%s' is invalid. Consider:\"%cm)\n [print(\" \"+x) for x in COLORMAPS]\n cm=None\n if not cm:\n cm=cmDefault\n if percentile[0] is None:low=np.min(image2d)\n else:low=np.percentile(image2d,percentile[0])\n if percentile[1] is None:high=np.max(image2d)\n else:high=np.percentile(image2d,percentile[1])\n plt.imshow(image2d,cmap=cm,clim=(low,high))\n if colorbar:\n plt.colorbar()", "def plt_show_image(image):\r\n plt.imshow(image)\r\n plt.axis('off')\r\n plt.axis('image')\r\n plt.tight_layout(pad=0)", "def plot_image(self, image, axes=None, colorbar=True, imshow_args={}):\n # Set up new axes if necessary.\n if axes is None:\n fig = plt.figure()\n axes = fig.add_subplot(111)\n\n # Plot the image.\n # Set options, without overwriting caller settings.\n imshow_args = {\n 'X': image,\n 'cmap': plt.get_cmap('jet'),\n **imshow_args\n }\n imshow_plot = axes.imshow(**imshow_args)\n # Make the ascpect ratio 1 to avoid stretching the image.\n axes.set_aspect(1.)\n\n # Add a colorbar if requested.\n if colorbar:\n plt.colorbar(imshow_plot)\n\n # Return the axes of the plot.\n return axes", "def create_histogram(self, i):\n # styling\n sns.set(style=\"whitegrid\")\n font = {'weight': 'normal'}\n plt.rc('font', **font)\n plt.rc('axes', labelsize=25) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=25) # fontsize of the tick labels\n plt.rc('ytick', labelsize=25)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=100)\n try:\n if self.dtype_is_object() or self.num_of_values() <= 15:\n if self.num_of_values() > 15:\n data = pd.to_numeric(self.data, errors='coerce')\n plot = sns.distplot(data.dropna())\n else:\n plot = sns.countplot(self.remove_nan_values())\n else:\n plot = sns.distplot(self.remove_nan_values())\n plot.set(xlabel='', ylabel='')\n except Exception:\n plt.text(0.5, 0.5, f'Unable to plot', ha='center', va='center', transform=ax.transAxes, fontsize=16)\n if not os.path.isdir('hist_images'):\n os.mkdir('hist_images')\n plt.savefig(f'hist_images/histogram{i}.png', bbox_inches='tight')\n plt.close()\n plt.clf()", "def show_image(image, percl=99, percu=None, figsize=(6, 10), cmap='viridis', log=False): \n if percu is None:\n percu = percl\n percl = 100-percl\n \n if figsize is not None:\n # Rescale the fig size to match the image dimensions, roughly\n image_aspect_ratio = image.shape[0]/image.shape[1]\n figsize = (max(figsize) * image_aspect_ratio, max(figsize))\n \n fig, ax = plt.subplots(1,1, figsize=figsize)\n \n # To preserve details we should *really* downsample correctly and not rely on \n # matplotlib to do it correctly for us (it won't).\n \n # So, calculate the size of the figure in pixels, block_reduce to roughly that,\n # and display the block reduced image.\n \n # Thanks, https://stackoverflow.com/questions/29702424/how-to-get-matplotlib-figure-size\n fig_size_pix = fig.get_size_inches() * fig.dpi\n \n ratio = (image.shape // fig_size_pix).max()\n \n if ratio < 1:\n ratio = 1\n \n # Divide by the square of the ratio to keep the flux the same in the reduced image\n reduced_data = block_reduce(image, ratio) / ratio**2\n\n # Of course, now that we have downsampled, the axis limits are changed to match\n # the smaller image size. Setting the extent will do the trick to change the axis display\n # back to showing the actual extent of the image.\n extent = [0, image.shape[1], 0, image.shape[0]]\n if log:\n stretch = aviz.LogStretch()\n else:\n stretch = aviz.LinearStretch()\n norm = aviz.ImageNormalize(reduced_data, interval=aviz.AsymmetricPercentileInterval(percl, percu), \n stretch=stretch)\n\n plt.colorbar(ax.imshow(reduced_data, norm=norm, origin='lower', cmap=cmap, extent=extent))" ]
[ "0.7969631", "0.78266644", "0.7776819", "0.74996084", "0.7412575", "0.72977114", "0.68852156", "0.6873014", "0.6869999", "0.668876", "0.66544735", "0.66544735", "0.66544735", "0.66200167", "0.652162", "0.6493563", "0.64923745", "0.6483739", "0.64781225", "0.6413293", "0.63585216", "0.6323674", "0.63230073", "0.6289314", "0.6266374", "0.62630296", "0.6245436", "0.6242609", "0.622281", "0.61817324", "0.61742723", "0.61742723", "0.61742723", "0.6159538", "0.6143227", "0.6136734", "0.61151254", "0.6091764", "0.6078497", "0.6069277", "0.6054033", "0.60505056", "0.6046198", "0.6028784", "0.6027459", "0.60212713", "0.6007193", "0.5984232", "0.5931098", "0.5927537", "0.5914306", "0.5907558", "0.5896937", "0.58946294", "0.5869886", "0.583961", "0.5824806", "0.5824566", "0.5823405", "0.58183837", "0.5814506", "0.5781543", "0.577492", "0.57735455", "0.5753067", "0.5744209", "0.5737022", "0.5730986", "0.57259005", "0.57223725", "0.5721261", "0.5709105", "0.5696373", "0.5693088", "0.5682694", "0.5676279", "0.56730455", "0.5672627", "0.5666668", "0.56580746", "0.56563824", "0.56543225", "0.56469405", "0.56456685", "0.56433487", "0.564179", "0.564179", "0.564179", "0.56399226", "0.56363213", "0.56292254", "0.5610723", "0.56098676", "0.56054944", "0.56027204", "0.5597843", "0.559716", "0.5592441", "0.5590246" ]
0.7809336
3
Download metadata catalogues necessary for downloading via FORCE if user confirms.
def download_catalogues(directory): while True: answer = input(f"\nTo download datasets via FORCE, it is necessary to have " f"metadata catalogues stored in a local directory (size ~9 GB).\n " f"More information: https://force-eo.readthedocs.io/en/latest/howto/level1-csd.html#downloading-the-metadata-catalogues \n" f"Do you want to download the latest catalogues into {directory}? (y/n)") if answer in ['y', 'yes']: print("\n#### Starting download...") utils.isdir_mkdir(directory) out = Client.execute(FORCE_PATH, ["force-level1-csd", "-u", directory], options=["--cleanenv"], stream=True) for line in out: print(line, end='') elif answer in ['n', 'no']: print("\n#### Download cancelled...") sys.exit() else: print(f"\n{answer} is not a valid answer!") continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def download_meta(self):\n for f in self._manager.remote.list_contents(\".yml\"):\n self._manager.remote.download(f)", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def maybe_download_and_extract(self, force=False):\r\n if force:\r\n if os.path.exists(self.get_working_dir()):\r\n logger.info(\"Removing downloaded data...\")\r\n shutil.rmtree(self.get_working_dir(), ignore_errors=True)\r\n while os.path.exists(self.get_working_dir()):\r\n pass", "def download_and_prepare(self):\n self._download_and_prepare()", "def single_download(self, url, meta_mode=False):\n self.println(DL_HEAD)\n try:\n if self.djs_core is None or self.analyzer is None:\n print(\"Download failed, enter `help` for help.\")\n else:\n if meta_mode:\n self._meta_download([url, ])\n else:\n self._download([url, ])\n os.chdir(self.home)\n except Exception as e:\n self.println(\"Download failed and stopped.\")\n print(str(e))\n self.println(DL_TAIL)", "def get_ckan_metadata(self, force_download=False):\n # Simplify the metadata structure to insulate from CKAN API changes? Only need resource name or dataset title?\n # No - more explicit if done in accessor methods instead, e.g. `self.get_resource_metadata`\n if not self._metadata and force_download is False:\n self.load_user_metadata()\n\n if not self._metadata or \\\n force_download or \\\n (self._metadata_last_updated + datetime.timedelta(seconds=self.check_for_updates_every) <\n datetime.datetime.utcnow()):\n try:\n # This returns a list of datasets, and within each there is a 'resources' key with a list of resources.\n metadata = self.api.action.package_search(include_private=True)['results']\n # `api.current_package_list_with_resources` gets public resources only, not private ones.\n except requests.exceptions.ConnectionError as e:\n error = \\\n 'Unable to reach CKAN and no local copy of CKAN metadata found at %s' % self.metadata_cache_filename\n logging.error(error)\n raise RuntimeError('%s\\n%s' % (error, str(e)))\n\n self._metadata_last_updated = datetime.datetime.utcnow()\n\n self._metadata = dict()\n for dataset in metadata:\n for resource in dataset['resources']:\n # After unpickling, `(meta['resource_a']['dataset'] is meta['resource_b']['dataset'])`\n resource['dataset'] = dataset\n self._metadata[resource['id']] = resource\n\n # self._metadata = {resource_id: {resource}} where resource['dataset'] = {dataset} for all CKAN resources\n\n if not self._in_context_block:\n self.save_user_metadata()\n return self._metadata", "def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True", "def download():\n raise NotImplementedError", "def download(self):\n if not os.path.exists(self.pkg_dir):\n os.makedirs(self.pkg_dir)\n\n url = self.metadata_pkg[\"url\"]\n\n # Download modelpkg only if not already downloaded.\n if os.path.exists(self.file_path):\n self.is_downloaded = True\n else:\n print(f\"Fetching {os.path.basename(self.file_path)} model package from {url} to {self.file_path}\", flush=True)\n r = requests.get(url, stream=True)\n with open(self.file_path, \"wb\") as file_out:\n for chunk in r.iter_content(chunk_size=2048):\n file_out.write(chunk)\n r.close()\n self.is_downloaded = True", "def fetch(self, is_dl_forced=False):\n\n self.get_files(is_dl_forced)\n\n return", "def download(self):\n pass", "def download(self):\n pass", "def _fetch_info(self, items, write, force):\n tags = self.config['tags'].as_str_seq()\n for item in items:\n # If we're not forcing re-downloading for all tracks, check\n # whether the data is already present. We use one\n # representative field name to check for previously fetched\n # data.\n if not force:\n mood_str = item.get('mood_acoustic', '')\n if mood_str:\n self._log.info('data already present for: {}', item)\n continue\n\n # We can only fetch data for tracks with MBIDs.\n if not item.mb_trackid:\n continue\n\n self._log.info('getting data for: {}', item)\n data = self._get_data(item.mb_trackid)\n if data:\n for attr, val in self._map_data_to_scheme(data, ABSCHEME):\n if not tags or attr in tags:\n self._log.debug('attribute {} of {} set to {}',\n attr,\n item,\n val)\n setattr(item, attr, val)\n else:\n self._log.debug('skipping attribute {} of {}'\n ' (value {}) due to config',\n attr,\n item,\n val)\n item.store()\n if write:\n item.try_write()", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def download(self, force: bool = False):\n # hide this import, since we'll only ever need it _once_ (per model version)\n from .. import io as tio\n\n model_fname = self.model_fpath.name\n url = urllib.parse.urljoin(\n \"https://github.com/bdewilde/textacy-data/releases/download/\",\n self.model_id + \"/\" + model_fname,\n )\n tio.utils.download_file(\n url, filename=model_fname, dirpath=self.data_dir, force=force,\n )", "def download(self, download) -> None:\n path_cifarh = path.join(self.root, self.filename_cifarh)\n path_cifar = path.join(self.root, self.filename_cifar)\n is_there = path.isfile(path_cifarh) and path.isfile(path_cifar)\n if is_there:\n print(\"Files already exist.\")\n if download == \"force\" or not is_there:\n download_and_extract_archive(\n self.url_cifar, self.root, filename=self.filename_cifar\n )\n download_and_extract_archive(\n self.url_cifarh, self.root, filename=self.filename_cifarh\n )", "def download(self):\n #the link has some meta data in it that we need to get a hold of so we cant use metaData.getLink()\n data = None\n\n for link in self.metaData.jsonObj['links']:\n if link.get('rel') == \"content\":\n data = link\n\n assert data is not None\n\n response = self._adapter.getRequest(data['href'], self._baseHeader)\n return {\"filename\": data['title'], \"mime\": data['type'], \"binary\": response['Body'] }", "def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")", "def download_files(self):", "def download_mission(self):\n cmds = self.vehicle.commands\n cmds.download()\n # Wait until download is complete.\n cmds.wait_valid()", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def metadata_storage_config():\n\n\tprint_section_header(\"METADATA STORAGE\", Fore.BLUE)\n\n\t###\n\t# Language Modeling Data\n\t###\n\n\tif prompt_yes_no(top_line=\"-> Clear language modeling metadata?\",\n\t bottom_line=\"This includes user spelling, typing and suggestion data.\"):\n\t\tprint_confirmation(\"Removing language modeling data...\")\n\t\tsp.run('rm -rfv \"~/Library/LanguageModeling/*\" \"~/Library/Spelling/*\" \"~/Library/Suggestions/*\"', shell=True, stdout=sp.PIPE)\n\n\tif prompt_yes_no(bottom_line=\"-> Disable language modeling data collection?\"):\n\t\tprint_confirmation(\"Disabling language modeling data collection...\")\n\t\tsp.run('sudo chmod -R 000 ~/Library/LanguageModeling ~/Library/Spelling ~/Library/Suggestions', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo chflags -R uchg ~/Library/LanguageModeling ~/Library/Spelling ~/Library/Suggestions', shell=True, stdout=sp.PIPE)\n\n\t###\n\t# QuickLook and Quarantine Data\n\t###\n\n\tif prompt_yes_no(top_line=\"-> Clear QuickLook metadata?\",\n\t bottom_line=\"This will erase spotlight user data.\"):\n\t\tprint_confirmation(\"Removing QuickLook metadata...\")\n\t\tsp.run('rm -rfv \"~/Library/Application Support/Quick Look/*\"', shell=True, stdout=sp.PIPE)\n\n\t###\n\t# Downloads Metadata\n\t###\n\n\t# TODO: BUG --> /bin/sh: /Users/alichtman/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2: Operation not permitted\n\tif prompt_yes_no(bottom_line=\"-> Clear Downloads metadata?\"):\n\t\tprint_confirmation(\"Removing Downloads metadata...\")\n\t\tsp.run(':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2', shell=True, stdout=sp.PIPE)\n\n\tif prompt_yes_no(bottom_line=\"-> Disable metadata collection from Downloads?\"):\n\t\tprint_confirmation(\"Disabling Quarantine data collection from downloaded files...\")\n\t\tsp.run('sudo chflags schg ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2', shell=True, stdout=sp.PIPE)\n\n\t# TODO: ERRORS\n\t\t# chmod: ~/Library/Application Support/Quick Look: No such file or directory\n\t\t# chflags: ~/Library/Application Support/Quick Look: No such file or directory\n\n\t# if prompt_yes_no(bottom_line=\"Disable QuickLook data logging?\"):\n\t# \tprint_confirmation(\"Disabling QuickLook data logging...\")\n\t# \tsp.run('sudo chmod -R 000 \"~/Library/Application Support/Quick Look\"', shell=True, stdout=sp.PIPE)\n\t# \tsp.run('sudo chflags -R uchg \"~/Library/Application Support/Quick Look\"', shell=True, stdout=sp.PIPE)\n\n\t###\n\t# Siri Metadata\n\t###\n\n\tif prompt_yes_no(bottom_line=\"-> Clear SiriAnalytics database?\"):\n\t\tprint_confirmation(\"Clearing SiriAnalytics database...\")\n\t\tsp.run('rm -rfv ~/Library/Assistant/SiriAnalytics.db', shell=True, stdout=sp.PIPE)", "def _download_openfda_faers(self, resource, output) -> List[ManifestResource]:\n self._logger.info(\"OpenFDA available files download, URI '{}' --- START ---\".format(resource.uri))\n self._logger.info(\"Download OpenFDA FAERS repository metadata\")\n download = Downloads.download_staging_http(output.staging_dir, resource)\n repo_metadata = {}\n with open(download.path_destination, 'r') as f:\n repo_metadata = json.load(f)\n return self._download_selected_event_files(repo_metadata, output)", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def _download_to_flc(self):\n self.communicator.download_to_flc()", "def download_data(dataset_name=None):\r\n\r\n dr = data_resources[dataset_name]\r\n if not authorize_download(dataset_name):\r\n raise Exception(\"Permission to download data set denied.\")\r\n\r\n if dr.has_key('suffices'):\r\n for url, files, suffices in zip(dr['urls'], dr['files'], dr['suffices']):\r\n for file, suffix in zip(files, suffices):\r\n download_url(os.path.join(url,file), dataset_name, dataset_name, suffix=suffix)\r\n else:\r\n for url, files in zip(dr['urls'], dr['files']):\r\n for file in files:\r\n download_url(os.path.join(url,file), dataset_name, dataset_name)\r\n return True", "def download(self, target: PluginJar, *, force: bool = False) -> bool:\n pass", "def download(self,**attrs):\n\t\treturn super().download(**attrs)", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def get_all_info(self) -> None:\n self.fetch_info(False)\n if not self.found and not Config.Config.get_strict_meta():\n Logger.Logger.log('No iTunes data found using full song name, retrying using a shorter version...')\n self.fetch_info(True)\n if not self.found:\n Logger.Logger.log('No available data for this song, skipping it...')\n return\n self.fetch_cover()\n self.fetch_lyrics()", "def main():\n get_obofoundry(force_download=True)", "def authorize_download(dataset_name=None):\r\n print('Acquiring resource: ' + dataset_name)\r\n # TODO, check resource is in dictionary!\r\n print('')\r\n dr = data_resources[dataset_name]\r\n print('Details of data: ')\r\n print(dr['details'])\r\n print('')\r\n if dr['citation']:\r\n print('Please cite:')\r\n print(dr['citation'])\r\n print('')\r\n if dr['size']:\r\n print('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.')\r\n print('')\r\n print('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.')\r\n print('')\r\n if overide_manual_authorize:\r\n if dr['license']:\r\n print('You have agreed to the following license:')\r\n print(dr['license'])\r\n print('')\r\n return True\r\n else:\r\n if dr['license']:\r\n print('You must also agree to the following license:')\r\n print(dr['license'])\r\n print('')\r\n return prompt_user('Do you wish to proceed with the download? [yes/no]')", "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "def downloadAll(self, force=False):\n if self.minutesSinceLastUpdate() == 0 and force == False:\n self.log(\"TOO SOON SINCE LAST DOWNLOAD!\")\n return\n for grabber in self.grabbers:\n self.downloadGrab(grabber[\"url\"], grabber[\"ID\"])+\"\\n\"", "def download_dataset(self):\n raise NotImplementedError", "def find_and_download_files(context):\n\n\n input_path = 'input/'\n if os.path.isdir(input_path):\n log.debug('Path already exists: ' + input_path)\n else:\n log.debug('Creating: ' + input_path)\n os.mkdir(input_path)\n\n fw = context.client\n\n if 'classification_measurement' in context.config:\n class_meas = context.config['classification_measurement'].split()\n else:\n class_meas = ['T1']\n\n # session and acquisition include/exclude lists can come from:\n # project info metadata,\n # subject info metadata, and\n # config options\n # The last one wins (how about getting it from an input file also, eh?)\n ses_exclude_list = None\n ses_include_list = None\n acq_exclude_list = None\n acq_include_list = None\n\n fs = 'freesurfer_longitudinal_'\n where = 'Found in project info'\n # check for exclude/include lists of regexs for sessions in project info\n sel = context.gear_dict['project'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['project'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in project info\n ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in subject info'\n # check for exclude/include lists of regexs for sessions in subject info\n sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['subject'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in subject info\n ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in config'\n # set up exclude/include lists of reegexs for sessions in config\n if 'session_excludelist' in context.config:\n ses_exclude_list = context.config['session_excludelist'].split()\n log.info(where+' session_excludelist: \"'+str(ses_exclude_list)+'\"')\n if 'session_includelist' in context.config:\n ses_include_list = context.config['session_includelist'].split()\n log.info(where+' session_includelist: \"'+str(ses_include_list)+'\"')\n\n # set up exclude/include lists of reegexs for acquisitions in config\n if 'acquisition_excludelist' in context.config:\n acq_exclude_list = context.config['acquisition_excludelist'].split()\n log.info(where+' acquisition_excludelist: \"'+str(acq_exclude_list)+'\"')\n if 'acquisition_includelist' in context.config:\n acq_include_list = context.config['acquisition_includelist'].split()\n log.info(where+' acquisition_includelist: \"'+str(acq_include_list)+'\"')\n\n # go through all sessions, acquisitions to find files\n for session in context.gear_dict['subject'].sessions():\n\n lemme_out = False\n if ses_exclude_list:\n for regex in ses_exclude_list:\n if re.search(regex, session.label): # if excluded, skip\n log.info('Session \"' + session.label + '\" matches ' + \\\n 'exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if ses_include_list:\n match = False\n for regex in ses_include_list:\n if not re.search(regex, session.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Session \"' + session.label + '\" matches ' \\\n 'an inclusion regex, keeping it')\n\n for acquisition in fw.get_session_acquisitions(session.id):\n\n lemme_out = False\n if acq_exclude_list:\n for regex in acq_exclude_list:\n if re.search(regex, acquisition.label): # if excluded, skip\n log.info('Acquisition \"' + acquisition.label + \\\n '\" matches exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if acq_include_list:\n match = False\n for regex in acq_include_list:\n if not re.search(regex, acquisition.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Acquisition \"' + acquisition.label + '\" ' + \\\n 'matches an inclusion regex, keeping it')\n\n for afile in acquisition.files:\n\n # Scan must be nifti\n if afile.type == 'nifti':\n\n found_one = False\n for cm in class_meas:\n if 'Measurement' in afile.classification:\n if cm in afile.classification['Measurement']:\n found_one = True\n log.info('Found ' + cm + ' file')\n\n if found_one:\n download_it(fw, acquisition, afile.name, input_path)\n context.gear_dict['visits'].append(\n make_file_name_safe(session.label, '_'))\n else:\n log.info('Ignoring ' + afile.name)", "def download_link(self, handle):\n return None", "def download(self, session):\n target_path = self.get_target_full_dir()\n os.chdir(target_path)\n schema_get = session.get(self.get_full_url(), verify=False)\n target_name = self.get_target_name()\n logger.debug('Starting download of file {} to {}.'.format(target_name.upper(), target_path))\n with open(os.path.join(target_path, target_name), \"wb\") as code:\n code.write(schema_get.content)\n logger.info('{} file has been downloaded successfully.'.format(target_name.upper()))", "def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)", "def _download(self) -> bytes:\n\n self.log.info(\"Downloading FCC facilities..\")\n # Disabling weak dh check. FCC should update their servers.\n ciphers = requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'\n r = requests.get(FACILITIES_URL)\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS = ciphers\n r.raise_for_status()\n return r.content", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def download_and_preprocess(self):\n print('Preparing steering angle database.')\n print('Downloading...')\n self.download()\n print('Preprocessing...')\n self.preprocess()", "def batch_download(self, yml_path, meta_mode=False):\n self.println(DL_HEAD)\n if not os.path.exists(yml_path):\n print(\"Urls file not found\\n\")\n else:\n with open(yml_path, 'r', encoding='UTF-8') as file:\n yaml_cfg = yaml.load(file, Loader=yaml.FullLoader)\n print(\"Read yaml file done.\\n\")\n try:\n for section in yaml_cfg:\n print(\n \"Downloading author {}'s doujinshi.\\n\".format(\n section['author']))\n\n # convert int to string if needed\n if isinstance(section['author'], int):\n section['author'] = str(section['author'])\n\n self.djs_core.enter_sub_dir(section['author'])\n\n if meta_mode:\n if self._meta_download(section['urls']):\n print(\n \"Author {}'s doujinshi' metadata download done.\\n\".format(\n section['author']))\n else:\n if self._download(section['urls']):\n print(\n \"Author {}'s doujinshi download done.\\n\".format(\n section['author']))\n\n self.djs_core.exit_dir()\n self.println(DL_MIDDLE)\n print(\"All sections' download done.\\n\")\n except Exception as e:\n print(\"Download failed and stopped.\")\n print(str(e))\n os.chdir(self.home)\n self.println(DL_TAIL)", "def _download_pkg(self, context):\n pkg_url = context.package.arg\n dst_file_path = context.package.full_path\n log.debug('downloading {0} to {1}'.format(pkg_url, dst_file_path))\n download_file(pkg_url, dst_file_path, context.package.get('timeout', 1), verify_https=context.get('verify_https', False))", "def fetch_velib_auto():\n # This try statement guards against the lack of internet connection\n try:\n dat = get_velib_data()\n except URL.URLError as err:\n print \"URLError: No internet connection?\"\n return 0\n\n save_velib_data(dat, glob.datafile)", "def fetch_anime_metadata(jikan, user_animes, force, rate_limit):\n for anime in user_animes:\n metadata_fpath = os.path.join(ANIMEDIR, f\"{anime['mal_id']}.json\")\n cache_exists = os.path.exists(metadata_fpath) and os.path.isfile(metadata_fpath)\n if force or not cache_exists:\n print(f\"Saving metadata for: {anime['title']}\")\n anime_metadata = jikan.anime(anime[\"mal_id\"])\n with open(metadata_fpath, \"w\") as fptr:\n json.dump(anime_metadata, fptr)\n time.sleep(rate_limit)\n else:\n print(f\"Skipping already cached metadata for: {anime['title']}\")", "def download_corpus(self, name, dst_directory):\n logging.info('Not downloading corpus because no Filestore.')", "def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])", "def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])", "def _stream(self):\n logger.info('getting meta-data')\n while not self.handle.has_metadata():\n time.sleep(0.1)\n\n #self.handle.rename_file(0, 'test.mp4')\n\n while not self.handle.is_seed():\n stat = self.handle.status()\n\n print 'downloading %.2f%%'%(stat.progress * 100)\n sys.stdout.flush()\n\n time.sleep(1)", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def fetch_zenodo(self):\n\n # full path to the stitches root directory where the example dir will be stored\n if self.data_dir is None:\n data_directory = pkg_resources.resource_filename('stitches', 'data')\n else:\n data_directory = self.data_dir\n\n # build needed subdirectories if they do not already exist\n tas_data_path = os.path.join(data_directory, \"tas-data\")\n temp_data_path = os.path.join(data_directory, \"temp-data\")\n if not os.path.exists(tas_data_path):\n os.mkdir(tas_data_path)\n if not os.path.exists(temp_data_path):\n os.mkdir(temp_data_path)\n\n # get the current version of stitches that is installed\n current_version = pkg_resources.get_distribution('stitches').version\n\n try:\n data_link = InstallPackageData.DATA_VERSION_URLS[current_version]\n\n except KeyError:\n msg = f\"Link to data missing for current version: {current_version}. Using default version: {InstallPackageData.DEFAULT_VERSION}\"\n\n data_link = InstallPackageData.DEFAULT_VERSION\n\n print(msg)\n\n # retrieve content from URL\n print(\"Downloading example data for stitches version {}. This may take a few minutes...\".format(current_version))\n response = requests.get(data_link)\n\n with zipfile.ZipFile(BytesIO(response.content)) as zipped:\n\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n\n extension = os.path.splitext(f)[-1]\n\n # Extract only the csv and nc files\n if all([len(extension) > 0, extension in (\".csv\", \".nc\")]):\n\n basename = os.path.basename(f)\n\n # Check to see if tas-data is in the file path\n if \"tas-data\" in f:\n basename = os.path.join(\"tas-data\", basename)\n\n out_file = os.path.join(data_directory, basename)\n\n # extract to a temporary directory to be able to only keep the file out of the dir structure\n with tempfile.TemporaryDirectory() as tdir:\n\n # extract file to temporary directory\n zipped.extract(f, tdir)\n\n # construct temporary file full path with name\n tfile = os.path.join(tdir, f)\n\n print(f\"Unzipped: {out_file}\")\n # transfer only the file sans the parent directory to the data package\n shutil.copy(tfile, out_file)", "def download(bot, update):\n update.message.reply_text(''' ''')", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))", "def download(self, session: Session = None):\n\n def dont_skip_download():\n return not self.skip_download\n\n def archive_path_does_not_exist():\n return not os.path.exists(self.archive_path)\n\n # Check to see if the dataset already exists.\n if self._does_not_exist_or_forced():\n if dont_skip_download() or archive_path_does_not_exist():\n self._download()\n else:\n logger.info(f\"Download skipped.\")\n\n with unknown_progress(f\"Extracting\"):\n self._extract()\n\n # Clean-up archive file if -c/--clean is used.\n if self.clean:\n logger.info(f\"Removing archive file.\")\n os.remove(self.archive_path)\n\n with unknown_progress(f\"Parsing metadata\"):\n self._save_metadata()\n\n with unknown_progress(f\"Moving images\"):\n self._move_images()\n\n with unknown_progress(f\"Cleaning up\"):\n self._clean_up()", "def onContentDownloadComplete(self, fetcher, connectionResp): #$NON-NLS-1$\r", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def download_agent_if_missing(filename):\n if file_missing(filename):\n print filename+'is missing, downloading it first'\n download(filename)", "def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"", "def download_finish(self, cloud_file):", "def download(self, account, code):\n\n url = Spider.BASE_URL + \"/p/%s/?taken-by=%s\" % (code, account)\n r = self.session.get(url)\n content_match = re.search(r\"<script.*?>\\s*?window._sharedData\\s*?=\\s*?({.*}).*?</script>\", r.text,\n re.MULTILINE)\n data = json.loads(content_match.group(1))\n media = data['entry_data']['PostPage'][0]['graphql']['shortcode_media']\n download_urls = []\n if media['__typename'] == 'GraphVideo': # video\n download_urls.append(media[\"video_url\"])\n if media['__typename'] == 'GraphImage': # image\n download_urls.append(media[\"display_url\"])\n if media['__typename'] == 'GraphSidecar': # slide\n nodes = media['edge_sidecar_to_children']['edges']\n for node in nodes:\n node = node['node']\n if node['is_video']:\n download_urls.append(node['video_url'])\n else:\n download_urls.append(node['display_url'])\n\n actual_download_dir = os.path.join(download_dir, account)\n if not os.path.isdir(actual_download_dir):\n os.mkdir(actual_download_dir)\n for url in download_urls:\n filename = os.path.join(actual_download_dir, url.split('/')[-1].split('?')[0])\n temp_name = filename + '.tmp'\n if os.path.isfile(filename):\n if self.spider.auto_stop:\n print('file', filename, \"already exists, exiting......\")\n sys.exit()\n print('file', filename, \"already exists, skipping\")\n else:\n print('downloading %s:' % filename)\n r = self.session.get(url, stream=True)\n content_length = int(r.headers['content-length'])\n curr = 0\n with open(temp_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n f.write(chunk)\n curr += 1024\n progress(curr, content_length)\n os.rename(temp_name, filename)\n self.spider.item_count += 1", "def _download_and_extract_popt_devel(self):\n raise NotImplementedError('Implement this method.')", "def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def fetch_dataset(\n dataset,\n ignore_hashinfo: bool = False,\n verify: bool = False,\n read_only: bool = False,\n verbose: bool = False,\n pre_scan: bool = True,\n) -> Union[bool, dict[str, Any]]:\n if dataset not in dials_data.datasets.definition:\n return False\n definition = dials_data.datasets.definition[dataset]\n\n target_dir: Path = dials_data.datasets.repository_location() / dataset\n if read_only and not target_dir.is_dir():\n return False\n\n integrity_info = definition.get(\"hashinfo\")\n if not integrity_info or ignore_hashinfo:\n integrity_info = dials_data.datasets.create_integrity_record(dataset)\n\n if \"verify\" not in integrity_info:\n integrity_info[\"verify\"] = [{} for _ in definition[\"data\"]]\n filelist: list[dict[str, Any]] = [\n {\n \"url\": source[\"url\"],\n \"file\": target_dir / os.path.basename(urlparse(source[\"url\"]).path),\n \"files\": source.get(\"files\"),\n \"verify\": hashinfo,\n }\n for source, hashinfo in zip(definition[\"data\"], integrity_info[\"verify\"])\n ]\n\n if pre_scan or read_only:\n if all(\n item[\"file\"].is_file()\n and item[\"verify\"].get(\"size\")\n and item[\"verify\"][\"size\"] == item[\"file\"].stat().st_size\n for item in filelist\n ):\n return True\n if read_only:\n return False\n\n # Obtain a (cooperative) lock on a dataset-specific lockfile, so only one\n # (cooperative) process can enter this context at any one time. The lock\n # file sits in the directory above the dataset directory, as to not\n # interfere with dataset files.\n target_dir.mkdir(parents=True, exist_ok=True)\n with target_dir.with_name(f\".lock.{dataset}\").open(mode=\"w\") as fh:\n with _file_lock(fh):\n verification_records = _fetch_filelist(filelist)\n\n # If any errors occured during download then don't trust the dataset.\n if verify and not all(verification_records):\n return False\n\n integrity_info[\"verify\"] = verification_records\n return integrity_info", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download(all):\n print(\"Downloading\")", "def download():\n\treturn response.download(request, db)", "def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n logging.info(\"Fetch housing data.....\")\n os.makedirs(housing_path, exist_ok=True)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download(\n self,\n overwrite: bool = False,\n refresh_token: bool = False,\n show_progress: bool = True,\n ):\n _LOGGER.info(f\"Downloading model {self.stub}\")\n\n if self.card_file:\n _LOGGER.info(f\"Downloading model card {self.stub}\")\n self.card_file.download(\n overwrite=overwrite,\n refresh_token=refresh_token,\n show_progress=show_progress,\n )\n\n if self.onnx_file:\n _LOGGER.info(f\"Downloading model onnx {self.stub}\")\n self.onnx_file.download(\n overwrite=overwrite,\n refresh_token=refresh_token,\n show_progress=show_progress,\n )\n\n if self.onnx_file_gz:\n _LOGGER.info(f\"Downloading model onnx gz {self.stub}\")\n self.onnx_file_gz.download(\n overwrite=overwrite,\n refresh_token=refresh_token,\n show_progress=show_progress,\n )\n\n self.download_framework_files(\n overwrite=overwrite,\n refresh_token=refresh_token,\n show_progress=show_progress,\n )\n\n for data in self._data.values():\n _LOGGER.info(f\"Downloading model data {data.name} {self.stub}\")\n data.download(\n overwrite=overwrite,\n refresh_token=refresh_token,\n show_progress=show_progress,\n )\n\n for recipe in self._recipes:\n _LOGGER.info(f\"Downloading model recipe {recipe.display_name} {self.stub}\")\n recipe.download(\n overwrite=overwrite,\n refresh_token=refresh_token,\n show_progress=show_progress,\n )", "def download_all_files(self):\n self.server_handler.get_sensor_data_from_server()", "def allow_download(self, url, config):\n\n url = url.lstrip('/')\n asset_url = url.split('asset/')[-1]\n id, filename = asset_url.split('/')\n\n oai_server = OAIServer(self._db, config)\n try:\n header, metadata, description = oai_server.getRecord(\n 'oai_dc', config.oai_id_prefix + id)\n except oaipmh.error.IdDoesNotExistError:\n # record is not in the oai feed, don't download\n return False\n if header.isDeleted():\n # record has deleted status, don't download\n return False\n\n return True", "def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")", "def do_fetch(self):\n pass", "def should_download_magnet(self, magnet):\n return True", "def downloadFile(self, path, out):\n\t\ttry:\n\t\t\tlogger.info(\"downloadFile('%s', ...)\" % (path))\n\n\t\t\t# Downloads from dropbox\n\t\t\t# Manually :( update the metadata cache\n\t\t\tf, metadata = self.client.get_file_and_metadata(path)\n\t\t\tf = f.read()\n\t\t\tlogger.info('* file downloaded')\n\t\t\tself.cache_metadata.setNewValue(path, metadata)\n\t\t\tlogger.info('* metadata updated')\n\t\t\t# Write to tmp file and close\n\t\t\tos.write(out, f)\n\t\t\tlogger.info(\"* file written\")\n\t\t\tos.close(out)\n\t\t\tlogger.info('* file closed')\n\t\t\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tlogger.error(\"Exception %s at downloadFile(%s)\" % (sys.exc_info()[0], path))\n\t\t\tlogger.debug(pformat(sys.exc_info()))\n\t\t\treturn False", "def find_and_download_songs(kwargs):\n sponsorblock_postprocessor = []\n reference_file = kwargs[\"reference_file\"]\n files = {}\n with open(reference_file, \"r\", encoding=\"utf-8\") as file:\n for line in file:\n temp = line.split(\";\")\n name, artist, album, i = (\n temp[0],\n temp[1],\n temp[4],\n int(temp[-1].replace(\"\\n\", \"\")),\n )\n\n query = f\"{artist} - {name} Lyrics\".replace(\":\", \"\").replace('\"', \"\")\n print(f\"Initiating download for {query}.\")\n\n file_name = kwargs[\"file_name_f\"](\n name=name, artist=artist, track_num=kwargs[\"track_db\"][i].get(\"playlist_num\")\n )\n\n if kwargs[\"use_sponsorblock\"][0].lower() == \"y\":\n sponsorblock_postprocessor = [\n {\n \"key\": \"SponsorBlock\",\n \"categories\": [\"skip_non_music_sections\"],\n },\n {\n \"key\": \"ModifyChapters\",\n \"remove_sponsor_segments\": [\"music_offtopic\"],\n \"force_keyframes\": True,\n },\n ]\n save_path = kwargs[\"track_db\"][i][\"save_path\"]\n file_path = path.join(save_path, file_name)\n\n mp3file_path = f\"{file_path}.mp3\"\n\n if save_path not in files:\n path_files = set()\n files[save_path] = path_files\n else:\n path_files = files[save_path]\n\n path_files.add(f\"{file_name}.mp3\")\n\n if (\n kwargs[\"no_overwrites\"]\n and not kwargs[\"skip_mp3\"]\n and path.exists(mp3file_path)\n ):\n print(f\"File {mp3file_path} already exists, we do not overwrite it \")\n continue\n\n outtmpl = f\"{file_path}.%(ext)s\"\n ydl_opts = {\n \"proxy\": kwargs.get(\"proxy\"),\n \"default_search\": \"ytsearch\",\n \"format\": \"bestaudio/best\",\n \"outtmpl\": outtmpl,\n \"postprocessors\": sponsorblock_postprocessor,\n \"noplaylist\": True,\n \"no_color\": False,\n \"postprocessor_args\": [\n \"-metadata\",\n \"title=\" + name,\n \"-metadata\",\n \"artist=\" + artist,\n \"-metadata\",\n \"album=\" + album,\n ],\n }\n if not kwargs[\"skip_mp3\"]:\n mp3_postprocess_opts = {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"192\",\n }\n ydl_opts[\"postprocessors\"].append(mp3_postprocess_opts.copy())\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n try:\n ydl.download([query])\n except Exception as e: # skipcq: PYL-W0703\n log.debug(e)\n print(f\"Failed to download {name}, make sure yt_dlp is up to date\")\n if not kwargs[\"skip_mp3\"]:\n set_tags(temp, mp3file_path, kwargs)\n if kwargs[\"remove_trailing_tracks\"] == \"y\":\n for save_path in files:\n for f in os.listdir(save_path):\n if f not in files[save_path]:\n print(f\"File {f} is not in the playlist anymore, we delete it\")\n os.remove(path.join(save_path, f))", "def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download_case(\n self, casename, syncmode=True, ignore_raw_data=True, ignore_figures=True,\n ignore_rasters=True, ignore_noexist=False):\n\n ignore_patterns = [\"__pycache__\"]\n\n if ignore_raw_data:\n ignore_patterns += [\".*?\\.data\", \"fort\\..*?\"]\n\n if ignore_figures:\n ignore_patterns += [\"_plots\"]\n\n if ignore_rasters:\n ignore_patterns += [\".*?\\.asc\", \".*?\\.prj\"]\n\n try:\n self.controller.download_cloud_dir(\n self.info, casename, self.info.tasks[casename][\"path\"], \n syncmode, ignore_patterns)\n except KeyError:\n if ignore_noexist:\n pass\n else:\n raise", "def is_downloadable(self):\n return True", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def check_fetch_frey():\n url = 'http://www.cs.nyu.edu/~roweis/data/frey_rawface.mat'\n partial_path = get_dataset_dir(\"frey\")\n full_path = os.path.join(partial_path, \"frey_rawface.mat\")\n if not os.path.exists(partial_path):\n os.makedirs(partial_path)\n if not os.path.exists(full_path):\n download(url, full_path, progress_update_percentage=1)\n return full_path", "def _enableLazyDownload(self):\n procScript = \"cmssw_enable_lazy_download.py\"\n cmd = \"%s --input_pkl %s --output_pkl %s\" % (\n procScript,\n os.path.join(self.stepSpace.location, self.configPickle),\n os.path.join(self.stepSpace.location, self.configPickle))\n self.scramRun(cmd)\n\n return", "def is_downloadable(self):\n return False", "def pre_download(self, crawler, parent_url=None):\n\n if self.get_headers_and_data():\n self.status = True\n # Obtained from cache\n return True\n\n eventr = crawlerbase.CrawlerEventRegistry.getInstance()\n \n try:\n # If a fake mime-type only do a HEAD request to get correct URL, dont\n # download the actual data using a GET.\n if self.given_content_type in self.config.client_fake_mimetypes or \\\n any(map(lambda x: self.given_content_type.startswith(x),\n self.config.client_fake_mimetypes_prefix)): \n log.info(\"Making a head request\",self.url,\"...\")\n fhead = urlhelper.head_url(self.url, headers=self.build_headers())\n log.info(\"Obtained with head request\",self.url,\"...\")\n\n self.headers = fhead.headers\n # If header returns 404 then skip this URL\n if fhead.status_code not in range(200, 300):\n log.error('Error head requesting URL =>', fhead.url,\"status code is\",fhead.status_code)\n return False\n \n if self.url != fhead.url:\n # Flexi scope - no problem\n # Allow external domains only for flexible site scope\n print \"SCOPE =>\", self.config.site_scope\n if self.config.site_scope == 'SITE_FLEXI_SCOPE':\n self.url = fhead.url\n log.info(\"URL updated to\",self.url) \n else:\n scoper = CrawlerScopingRules(self.config, self.url)\n if scoper.allowed(fhead.url, parent_url, redirection=True):\n self.url = fhead.url\n log.info(\"URL updated to\",self.url)\n else:\n log.extra('Site scoping rules does not allow URL=>', fhead.url)\n return False \n\n self.content_type = urlhelper.get_content_type(self.url, self.headers)\n\n # Simulate download event for this URL so it gets added to URL graph\n # Publish fake download complete event \n eventr.publish(self, 'download_complete_fake',\n message='URL has been downloaded fakily',\n code=200,\n params=self.__dict__)\n\n self.status = False\n return True\n except urlhelper.FetchUrlException, e:\n log.error('Error downloading',self.url,'=>',str(e))\n # FIXME: Parse HTTP error string and find out the\n # proper code to put here if HTTPError.\n eventr.publish(self, 'download_error',\n message=str(e),\n is_error = True,\n code=0,\n params=self.__dict__)\n\n return False", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])" ]
[ "0.6125075", "0.6057573", "0.5919949", "0.59092546", "0.58205396", "0.57917017", "0.5775783", "0.5760028", "0.5726218", "0.56873673", "0.5681924", "0.5636815", "0.5625458", "0.5623649", "0.5623649", "0.56185186", "0.55663484", "0.55158645", "0.5502782", "0.54930496", "0.54860747", "0.5480579", "0.5466807", "0.54662263", "0.54660505", "0.53735125", "0.5366554", "0.5360187", "0.5360187", "0.5339469", "0.5317428", "0.5317001", "0.5303386", "0.5289521", "0.52822775", "0.5279419", "0.5275885", "0.5251606", "0.5249825", "0.52320033", "0.5219504", "0.5205091", "0.5202262", "0.51923794", "0.51893306", "0.5186458", "0.5176655", "0.51749647", "0.51586264", "0.51522654", "0.51448756", "0.5138474", "0.5135603", "0.51349324", "0.5128473", "0.5116217", "0.5115461", "0.5097751", "0.5097167", "0.5091441", "0.5084005", "0.5078237", "0.50764704", "0.5072255", "0.50703615", "0.5061752", "0.5053043", "0.5052303", "0.5035869", "0.50283486", "0.50283486", "0.5022668", "0.5020291", "0.50108224", "0.5008223", "0.5007456", "0.50014246", "0.49963224", "0.4983189", "0.49779493", "0.49760777", "0.4972984", "0.49726838", "0.49698755", "0.4967598", "0.4967275", "0.49601942", "0.49547145", "0.49498677", "0.4945642", "0.4945642", "0.4945642", "0.4945642", "0.4945642", "0.4945642", "0.49400043", "0.4929841", "0.4929713", "0.49208337", "0.49201268" ]
0.5666497
11
Recursively searches for 'datacubedefinition.prj' in a level2 directory and returns its parent directory.
def _get_datacubeprj_dir(directory): prj_path = [] for path in Path(directory).rglob('datacube-definition.prj'): prj_path.append(path) if len(prj_path) < 1: raise FileNotFoundError(f"'datacube-definition.prj' not found in {directory}") elif len(prj_path) > 1: raise RuntimeError(f"'datacube-definition.prj' multiple copies found in {directory}") else: return prj_path[0].parent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_parent_path(self):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"genes\")", "def find_in_parent_dir(fname):\n p = os.path.abspath(os.path.curdir)\n \n while not os.path.exists(os.path.join(p, project_conf_name)):\n oldp, p = p, os.path.dirname(p)\n if p == oldp:\n return None\n \n return open(os.path.join(p, project_conf_name), 'r')", "def test_find_in_grandparent_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake', 'fake')\n result = steptest.find_project_directory(subdirectory)\n self.assertEqual(directory, result)", "def search_parents(name, cwd):\n for pdir in parents(cwd):\n if name in os.listdir(pdir):\n return os.path.join(pdir, name)\n\n return None", "def test_find_in_parent_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake')\n result = steptest.find_project_directory(subdirectory)\n self.assertEqual(directory, result)", "def get_path(path, parent=None, prj=None):\n if prj is None:\n prj = QgsProject.instance()\n\n if parent is None:\n parent = prj.layerTreeRoot()\n\n if path is None:\n return parent\n if not isinstance(path, (list, tuple)):\n path = path.split(\"/\")\n\n for part in path:\n if len(path) > 0:\n parent = get_group(part, parent)\n\n return parent", "def project_path(cur_path=''):\n if not cur_path:\n cur_path = __file__\n real_path = os.path.realpath(cur_path)\n # path of upper-level directory\n upper_folder = os.path.split(real_path)[0]\n # path of topmost-level directory (trunk)\n return os.path.split(upper_folder)[0]", "def getProjectDir(self):\n logger.debug(\"Func: getProjectDir\")\n return -1", "def _loadSubprojects(self):\n logger.debug(\"Func: _loadSubprojects\")\n\n if not os.path.isfile(self._pathsDict[\"subprojectsFile\"]):\n data = [\"None\"]\n self._dumpJson(data, self._pathsDict[\"subprojectsFile\"])\n else:\n data = self._loadJson(self._pathsDict[\"subprojectsFile\"])\n if data == -2:\n return -2\n return data", "def have_ebuild_dir(path, maxdepth=3):\n\tstack = [(normalize_path(path), 1)]\n\twhile stack:\n\t\tpath, depth = stack.pop()\n\t\tbasename = os.path.basename(path)\n\t\ttry:\n\t\t\tlistdir = os.listdir(path)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\tfor filename in listdir:\n\t\t\tabs_filename = os.path.join(path, filename)\n\t\t\ttry:\n\t\t\t\tst = os.stat(abs_filename)\n\t\t\texcept OSError:\n\t\t\t\tcontinue\n\t\t\tif stat.S_ISDIR(st.st_mode):\n\t\t\t\tif depth < maxdepth:\n\t\t\t\t\tstack.append((abs_filename, depth + 1))\n\t\t\telif stat.S_ISREG(st.st_mode):\n\t\t\t\tif filename.endswith(\".ebuild\") and \\\n\t\t\t\t\tfilename.startswith(basename + \"-\"):\n\t\t\t\t\treturn os.path.dirname(os.path.dirname(path))", "def test_level2_recursion(self):\n recursed = recurse_files('filename2', self.files['filename2'], self.files)\n self.assertEqual(recursed, [\"file7\", \"file2\", \"file3\", \"file6\"])", "def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]", "def _parent_path(pkg, pkg_path):\n parent = pkg_path[: -len(pkg)] if pkg_path.endswith(pkg) else pkg_path\n return parent.rstrip(\"/\" + os.sep)", "def get_parent_dir(path):\n return os.path.dirname(path)", "def getParentDirectory():\n path = os.path.dirname(os.path.realpath(__file__))\n path = '/'.join( path.split('/')[:-1] )\n return path", "def getProjectRoot(self):\n currentPath = os.getcwd()\n while(True):\n if \"DataStore\" in os.listdir(currentPath):\n break\n currentPath = \"/\".join(currentPath.split(\"/\")[:-1])\n return currentPath + \"/\"", "def get_parent_dir(path):\n\n return os.path.abspath(os.path.join(path, os.pardir))", "def get_level_path(target_level, cwd=None):\n if cwd is None:\n cwd = os.getwd()\n q = \"\"\n for ll in levels:\n q = os.path.join(q, get_level_name(ll, cwd))\n if ll == target_level:\n break\n return q", "def get_parent_until(path):\r\n dirname = osp.dirname(path)\r\n try:\r\n mod = osp.basename(path)\r\n mod = osp.splitext(mod)[0]\r\n imp.find_module(mod, [dirname])\r\n except ImportError:\r\n return\r\n items = [mod]\r\n while 1:\r\n items.append(osp.basename(dirname))\r\n try:\r\n dirname = osp.dirname(dirname)\r\n imp.find_module('__init__', [dirname + os.sep])\r\n except ImportError:\r\n break\r\n return '.'.join(reversed(items))", "def _find_project_by_import():\n try:\n import _databand_project\n\n return abs_join(_databand_project.__file__, \"..\")\n except ImportError:\n dbnd_log_init_msg(\"Can't import `_databand_project` marker.\")\n return None", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def _find_root() -> pathlib.Path:\n cwd = pathlib.Path.cwd()\n while not (\n pathlib.Path(cwd, \"pyproject.toml\").exists() or\n pathlib.Path(cwd, \"poetry.lock\").exists() or\n pathlib.Path(\"/\") == cwd\n ):\n cwd = cwd.parent\n return cwd", "def _find_bids_root(dataset_path) -> Path:\n description_paths = list(Path(dataset_path).glob(\"**/dataset_description.json\"))\n # 1 - if more than one, select first and output warning\n # 2 - if zero, output error\n # 3 - if 1, add to dataset path and set ats bids root dir\n if len(description_paths) == 0:\n msg = (\"The file 'dataset_description.json' should be part of the BIDS dataset \"\n \"in order for the 'bids_dataset' extractor to function correctly\")\n raise FileNotFoundError(msg)\n elif len(description_paths) > 1:\n msg = (f\"Multiple 'dataset_description.json' files ({len(description_paths)}) \"\n f\"were found in the recursive filetree of {dataset_path}, selecting \"\n \"first path.\")\n lgr.warning(msg)\n return Path(description_paths[0]).parent\n else:\n return Path(description_paths[0]).parent", "def resolve_level(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n pl = [\".\"]\n for i in range(0, this_idx - target_idx):\n pl.append(\"..\")\n return os.path.join(*pl)", "def get_project_root():\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def parentOrThisDir(path):\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n return path", "def find_diagnostics_dir(cube, image):\n return find_subdir(cube, image, 'diagnostics')", "def get_project_dir():\n path = Path(__file__).parent.parent\n project_dir = path.parent\n return project_dir", "def findDirAbove(dirName):\n workDir = \"\"\n for i in range(20):\n path = os.path.join(workDir, dirName)\n if os.path.exists(path):\n return os.path.abspath(path)\n workDir = os.path.join(workDir, \"..\")\n\n return None", "def find_build(branch, tag, build_path=None, old_build_path=None):\n for directory_format in [build_path, old_build_path]:\n if directory_format is None:\n continue\n loc = directory_format % (branch, tag)\n if isdir(loc):\n return loc\n raise BuildNotFound(branch, tag)", "def _find_production_root(path: pathlib.Path) -> Optional[pathlib.Path]:\n if is_valid_production_root(path):\n return path\n try:\n parent_path = path.parents[0]\n return _find_production_root(parent_path)\n except IndexError:\n return None", "def test_find_config_parent_dir(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n os.mkdir(\"subdir\")\n os.chdir(\"subdir\")\n\n # Verify our current working dir\n assert_paths_equal(os.getcwd(), in_tmp_path.joinpath(\"subdir\"))\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, \"subdir\")", "def get_project_root(self):\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def parent_dir_path(path):\n return absolute_path(os.path.dirname(path))", "def ancestors(self):\r\n\r\n def find_parent(dir):\r\n parent = os.path.dirname(dir)\r\n buildfile = os.path.join(parent, BuildFile._CANONICAL_NAME)\r\n if os.path.exists(buildfile) and not os.path.isdir(buildfile):\r\n return parent, BuildFile(self.root_dir, os.path.relpath(buildfile, self.root_dir))\r\n else:\r\n return parent, None\r\n\r\n parent_buildfiles = OrderedSet()\r\n\r\n parentdir = os.path.dirname(self.full_path)\r\n visited = set()\r\n while parentdir not in visited and self.root_dir != parentdir:\r\n visited.add(parentdir)\r\n parentdir, buildfile = find_parent(parentdir)\r\n if buildfile:\r\n parent_buildfiles.update(buildfile.family())\r\n\r\n return parent_buildfiles", "def get_project_root():\n return str(Path(__file__).parent.parent.parent.parent)", "def get_cabal_project_dir_of_file(filename):\n return get_cabal_project_dir_and_name_of_file(filename)[0]", "def _search_parent_dir(file_name):\n\n current_dir = os.getcwd()\n parent_dir = os.path.dirname(current_dir)\n while current_dir != parent_dir:\n if not os.path.splitdrive(current_dir)[-1]:\n return False\n file_list = os.listdir(current_dir)\n parent_dir = os.path.dirname(current_dir)\n\n if file_name in file_list:\n return current_dir\n\n else:\n current_dir = parent_dir\n return False", "def _find_project_root_dir(target: Path) -> Path:\n p = Path(\".\").resolve()\n for _ in range(1000):\n try:\n candidate = _search_for_dir(target, p)\n return candidate.parent\n except DirectoryNotFoundException:\n p = p.parent\n raise DirectoryNotFoundException", "def test_level3_recursion(self):\n recursed = recurse_files('filename3', self.files['filename3'], self.files)\n self.assertEqual(recursed, [\"file7\", \"file2\", \"file3\", \"file6\", \"file5\"])", "def get_project_path():\n file_path = os.path.abspath(__file__)\n root_dir = os.path.join(os.path.dirname(file_path), \"..\")\n return os.path.abspath(root_dir)", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def get_path_to_rel_location(directory_to_find):\n path = Path.cwd()\n num_tries = 5\n for num_up_folder in range(num_tries):\n path = path.parent\n if path / directory_to_find in path.iterdir():\n break\n\n if num_tries == num_up_folder:\n raise FileNotFoundError(f\"The directory {directory_to_find} could not be found in the {num_tries}\"\n f\" directories above this file's location.\")\n return path / directory_to_find", "def find_build_dir(hw, r):\n os.chdir(hw)\n find_cache(hw, r);\n os.chdir(\"..\")", "def get_project_source_dir() -> Path:\n return Path(__file__).resolve().parents[1].resolve()", "def _look_in_package(tree: dict, module_path: str, name: str, level: Optional[int] = None) -> Union[str, None]:\n parent_path = os.path.dirname(module_path)\n if level is not None:\n for _ in range(level - 1):\n parent_path = os.path.dirname(parent_path)\n parent = find_tree(tree, lambda x, p: x[\"path\"] in [p, os.path.join(p, \"__init__.py\")], args=(parent_path,))\n if parent:\n if parent[\"fullname\"] in [name, \"{}.__init__\".format(name)]:\n return parent[\"path\"]\n for child in parent[\"children\"].values():\n if child[\"name\"] == name:\n return child[\"path\"]\n target = find_tree(tree, lambda x, f: x[\"fullname\"] == f, args=(\"{}.{}\".format(parent[\"fullname\"], name),))\n if target:\n return target[\"path\"]\n return None", "def parent_dir(self):\n parent = os.path.dirname(self.dirn)\n if self.is_subdir:\n parent = os.path.basename(parent)\n else:\n if self.platform is not None and parent.endswith(self.platform):\n parent = parent[:-len(self.platform)].rstrip(os.sep)\n if self.year is not None and parent.endswith(str(year)):\n parent = parent[:-len(str(year))].rstrip(os.sep)\n return parent", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def get_project_root():\n return Path(__file__).parent.parent", "def get_project_root() -> pl.Path:\n return pl.Path(__file__).parent.parent", "def find_project_directory(start=\".\", look_for=None):\n look_for = set(look_for or DEFAULT_PROJECT_INDICATORS)\n\n directory = path.path(start).abspath()\n\n while directory.parent != directory:\n items = os.listdir(directory)\n if any(i in look_for for i in items):\n return directory\n\n directory = directory.parent\n\n raise WatsonError('%s does not look like a project subdirectory' % start)", "def get_proj_dir(path: Union[pathlib.PurePath, str] = __file__) -> str:\n return str(pathlib.Path(path).parent.absolute())", "def find_project_dir():\r\n for path in sys.path:\r\n abs_path = os.path.join(os.path.abspath(path), \"app.yaml\")\r\n if os.path.exists(abs_path):\r\n return os.path.dirname(abs_path)\r\n\r\n raise RuntimeError(\"Unable to locate app.yaml on sys.path\")", "def projectDir(self):\n logger.debug(\"Func: projectDir/getter\")\n return self._pathsDict[\"projectDir\"]", "def findRepositoryByBackTracking():\n \n cLookBack = '.'\n while(True):\n cDir = os.path.abspath(cLookBack)\n print(\"Searching in %s\" % cDir)\n if os.path.isdir( os.path.join(cDir, DB_SUBFOLDER) ):\n return cDir\n else:\n if os.path.abspath(cLookBack) == os.path.abspath(cLookBack + '/..'):\n return os.path.abspath('.')\n cLookBack = cLookBack + '/..'\n \n return cDir", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_parent(self):\n parent_id = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"parentId\", None)\n if parent_id is None:\n return None\n else:\n return DSSProjectFolder(self.client, parent_id)", "def cfgpath(p):\n p = Path(p)\n if p.is_absolute():\n return p\n else:\n for d in reversed(cfgdirs):\n try:\n fp = (d / p).resolve()\n except FileNotFoundError:\n continue\n if fp.is_file():\n return fp\n else:\n return p", "def get_parent(path):\n\n # security check\n parent = os.path.dirname(path)\n\n try:\n get_abspath(parent)\n except:\n parent = ''\n\n return parent", "def get_level_name(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n i = this_idx\n cw = cwd\n pp = \"\"\n while i >= target_idx:\n cw, pp = os.path.split(cw)\n i -= 1\n return pp", "def _GetHigherLevelOwnersFilePath(path):\n # The highest directory that is searched for component information is one\n # directory lower than the directory above tools. Depending on the machine\n # running this code, the directory above tools may or may not be src.\n path_to_limiting_dir = os.path.abspath(os.path.join(*DIR_ABOVE_TOOLS))\n limiting_dir = path_to_limiting_dir.split(os.sep)[-1]\n owners_file_limit = (os.sep).join([limiting_dir, _OWNERS])\n if path.endswith(owners_file_limit):\n return ''\n\n parent_directory = os.path.dirname(os.path.dirname(path))\n parent_owners_file_path = os.path.join(parent_directory, _OWNERS)\n\n if (os.path.exists(parent_owners_file_path) and\n os.path.isfile(parent_owners_file_path)):\n return parent_owners_file_path\n return _GetHigherLevelOwnersFilePath(parent_owners_file_path)", "def find_directory_with_a_file(\n filename: str,\n cwd: Optional[Union[str, Path]] = None) -> Optional[Path]:\n if cwd is None:\n curr_dir = Path(os.getcwd()).absolute()\n else:\n curr_dir = Path(cwd).absolute()\n\n pathname = curr_dir / filename\n if pathname.exists():\n return curr_dir\n\n for work_dir in curr_dir.parents:\n pathname = work_dir / filename\n if pathname.exists():\n return work_dir\n\n return None", "def get_path(self):\n definition = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id)\n parent_id = definition.get(\"parentId\", None)\n if parent_id is not None:\n parent = DSSProjectFolder(self.client, parent_id)\n path = parent.get_path()\n return (\"\" if path == \"/\" else path) + \"/\" + definition.get(\"name\", \"\")\n else:\n return \"/\"", "def determine_subdirectories(file_path):\n\tsource_dir = file_path.replace('/data/','/data-cg1d/')\n\tlead_dir_1, subdir_1 = split_leading_directory(source_dir)\n\tlead_dir_2, subdir_2 = split_leading_directory(subdir_1)\n\tipts_dir, new_subdir = split_leading_directory(subdir_2)\n\tprint('\\n\\nsource_dir: {}\\nlead_dir_2: {}\\nsubdir_2: {}\\nipts_dir: {}\\n new_subdir: {}\\n\\n'.format(\n\t\tsource_dir, lead_dir_2, subdir_2, ipts_dir, new_subdir))\n\treturn source_dir, ipts_dir, new_subdir", "def get_current_path(self, cvs_path, lod):\n\n node = self.get_current_lod_directory(lod)\n\n for sub_path in cvs_path.get_ancestry()[1:]:\n node = node[sub_path]\n\n return node", "def _find_reporoot(self, reporoot_opt, relnotessubdir_opt):\n reporoot = os.path.abspath(reporoot_opt)\n # When building on RTD.org the root directory may not be\n # the current directory, so look for it.\n try:\n return repo.Repo.discover(reporoot).path\n except Exception:\n pass\n\n for root in ('.', '..', '../..'):\n if os.path.exists(os.path.join(root, relnotessubdir_opt)):\n return root\n\n raise Exception(\n 'Could not discover root directory; tried: %s' % ', '.join([\n os.path.abspath(root) for root in ('.', '..', '../..')\n ])\n )", "def get_cabal_in_dir(cabal_dir):\n for entry in os.listdir(cabal_dir):\n if entry.endswith(\".cabal\"):\n project_name = os.path.splitext(entry)[0]\n return (project_name, os.path.join(cabal_dir, entry))\n return (None, None)", "def find(name):\n\n if os.path.exists(name):\n return name\n\n path = os.path.dirname(__file__) or '.'\n filename = os.path.abspath(os.path.join(path,name))\n if os.path.exists(filename):\n return filename\n\n for d in os.listdir(path):\n fullpath = os.path.abspath(os.path.join(path,d))\n if os.path.isdir(fullpath):\n filename = os.path.abspath(os.path.join(fullpath,name))\n if os.path.exists(filename):\n return filename\n return None", "def get_path(self, project_file=None):\n root = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..')\n )\n if project_file:\n return os.path.join(root, project_file)\n else:\n return root", "def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_cabal_project_dir_and_name_of_file(filename):\n # Check that a .cabal file is present:\n directory_of_file = os.path.dirname(filename)\n cabal_file_path = find_file_in_parent_dir(directory_of_file, '*.cabal')\n if cabal_file_path is None:\n return None, None\n # Return the directory containing the .cabal file:\n project_path, cabal_file = os.path.split(cabal_file_path)\n project_name = os.path.splitext(cabal_file)[0]\n return project_path, project_name", "def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))", "def get_project_config(filepath):\n config_file = Path.joinpath(filepath, \".td.cfg\")\n\n if Path.home() >= filepath:\n return None\n elif Path.exists(config_file):\n return config_file\n else:\n return get_project_config(filepath.parent)", "def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def get_project_folder(project_dir: str) -> str:\n from pathlib import Path\n d = Path().resolve()\n while d.as_posix().split('/')[-1] != project_dir:\n d = d.parent\n return d.as_posix()", "def test_team_template_folders_id_parent_get(self):\n pass", "def GetFeedParent(organization, project, folder):\n if organization:\n return 'organizations/{0}'.format(organization)\n if folder:\n return 'folders/{0}'.format(folder)\n return 'projects/{0}'.format(project_util.GetProjectNumber(project))", "def get_parent_build(build):\n parent_buildername = build.properties_as_dict['parent_buildername']\n parent_builder = build.builders[parent_buildername]\n return parent_builder.builds[build.properties_as_dict['parent_buildnumber']]", "def get_output_folder(parent_dir, env_name):\n os.makedirs(parent_dir, exist_ok=True)\n experiment_id = 0\n for folder_name in os.listdir(parent_dir):\n if not os.path.isdir(os.path.join(parent_dir, folder_name)):\n continue\n try:\n folder_name = int(folder_name.split('-run')[-1])\n if folder_name > experiment_id:\n experiment_id = folder_name\n except:\n pass\n experiment_id += 1\n\n parent_dir = os.path.join(parent_dir, env_name)\n parent_dir = parent_dir + '-run{}'.format(experiment_id)\n return parent_dir", "def find_parent(folder: str, parents, parent=None):\n\n if parent is None:\n for parent_name, parent_path in parents.items():\n if os.path.exists(os.path.join(parent_path, folder)):\n return parent_name\n elif parent in parents:\n parent_name = parent\n parent_path = parents[parent]\n if os.path.exists(os.path.join(parent_path, folder)):\n return parent_name\n else:\n assert False, \"Invalid parent specified: %s\" % parent", "def find_conf():\n path = os.path.abspath(os.path.expanduser(os.getcwd()))\n while path not in ('', '/'):\n conf_path = os.path.join(path, 'dataplicity.conf')\n if os.path.exists(conf_path):\n return conf_path\n path = os.path.dirname(path)\n return None", "def find_pyproject_toml(files_or_directories: Sequence[Path]) -> Optional[Path]:\n if not files_or_directories:\n files_or_directories = [Path.cwd()]\n common = Path(os.path.commonpath(files_or_directories)).absolute()\n for p in [common] + list(common.parents):\n fn = p / \"pyproject.toml\"\n if fn.is_file():\n return fn\n return None", "def _find_parent(child: ModuleInfo, potentialParents: List[ModuleInfo])\\\n -> ModuleInfo:\n\n return next(\n (p for p in potentialParents if child.get_name() in p.get_children()),\n None\n )", "def findDirectory ( path ) :\r\n\r\n if path is None : return None\r\n\r\n library = \"libraryPython\"\r\n\r\n if sys.platform.startswith( \"win\" ) :\r\n\r\n path = path.lower()\r\n\r\n library = library.lower()\r\n\r\n directory, dummy = os.path.split( path )\r\n\r\n # particular cases: we are in the library, or in a subfolder of the library\r\n\r\n if path.endswith( os.sep + library ) : return path\r\n\r\n if directory.endswith( os.sep + library ) : return directory # **EF os.sep + ...\r\n\r\n # looks in subdirectories\r\n \r\n while True :\r\n\r\n # looks for path/libraryPython/\r\n\r\n libraryPath = path + os.sep + \"libraryPython\" \r\n\r\n if os.path.exists( libraryPath ) : return libraryPath\r\n\r\n # looks for path/*/libraryPython/\r\n \r\n items = os.listdir( path )\r\n\r\n for item in items :\r\n\r\n libraryPath = path + os.sep + item + os.sep + \"libraryPython\"\r\n \r\n if os.path.exists( libraryPath ) : return libraryPath\r\n\r\n\r\n # goes to parent directory\r\n \r\n directory, dummy = os.path.split( path )\r\n\r\n # not found\r\n\r\n if directory == path : break\r\n\r\n path = directory\r\n\r\n return None", "def parent_path_with_file(name, path=None):\n return parent_path_with(lambda p: os.path.isfile(os.path.join(p, name)), path=path)", "def find_proj_root(self, descendant: Path, file_names: list, dir_names: list, max_levels: int = 25):\n count_test_files = len(file_names) if isinstance(file_names, list) else 0\n count_test_files += len(dir_names) if isinstance(dir_names, list) else 0\n if count_test_files == 0:\n raise RuntimeError(\"_find_proj_root() must be given at least one expected file/dir in project root\")\n levels = 0\n if descendant.is_dir() and self._dir_contains(descendant, file_names) and self._dir_contains(descendant, dir_names, True):\n return descendant\n for d in descendant.parents:\n if max_levels < 1 or levels < max_levels:\n levels += 1\n else:\n break\n if self._dir_contains(d, file_names) and self._dir_contains(d, dir_names, True):\n return d\n return None", "def find_parent(self):\n pass", "def find_metadata_dir(cube, image):\n return find_subdir(cube, image, 'metadata')", "def r2n2_depth_dir(self):\n root = '/DATA_PATH/3dr2n2-renders/3dr2n2/'\n if self.depth_native_res == 137:\n depth_dir = root + 'depth-137/%s/%s' % (self.synset, self.mesh_hash)\n elif self.depth_native_res == 224:\n depth_dir = root + 'depth-normals-npy-224/%s/%s' % (self.synset,\n self.mesh_hash)\n else:\n assert False\n return depth_dir", "def __getLibAbsPath(currentPath, depth):\n libPath = currentPath\n while depth:\n libPath = os.path.split(libPath)[0]\n depth -= 1\n return libPath", "def test_level1_recursion(self):\n recursed = recurse_files('filename', self.files['filename'], self.files)\n self.assertEqual(recursed, [\"file7\", \"file2\", \"file3\"])", "def toplevel_subdir( path, target_dir ):\n parts = split_all( path )\n rparts = split_all( target_dir )\n assert( rparts == parts[ :len( rparts ) ] )\n return parts[ len( rparts ) ]", "def project(self) -> 'prjmod.Level':\n return self.lineage.project", "def get_output_folder(parent_dir, env_name):\n os.makedirs(parent_dir, exist_ok=True)\n experiment_id = 0\n for folder_name in os.listdir(parent_dir):\n if not os.path.isdir(os.path.join(parent_dir, folder_name)):\n continue\n try:\n folder_name = int(folder_name.split('-evaluate')[-1])\n if folder_name > experiment_id:\n experiment_id = folder_name\n except:\n pass\n experiment_id += 1\n\n parent_dir = os.path.join(parent_dir, env_name)\n parent_dir = parent_dir + '-evaluate{}'.format(experiment_id)\n return parent_dir" ]
[ "0.59310746", "0.5910224", "0.5907544", "0.56730086", "0.567043", "0.54829234", "0.5468787", "0.5349158", "0.53045815", "0.52922255", "0.52142084", "0.52010685", "0.519746", "0.51958925", "0.51642823", "0.5158747", "0.51506376", "0.51362514", "0.5133889", "0.51276433", "0.511751", "0.5075891", "0.50754297", "0.5072611", "0.5060326", "0.5051632", "0.50437516", "0.50249946", "0.5024484", "0.49995455", "0.49963167", "0.49946406", "0.49913278", "0.49711454", "0.4967529", "0.49553546", "0.4951974", "0.49418446", "0.49325585", "0.49322107", "0.49148643", "0.4910237", "0.49010432", "0.4900638", "0.4881171", "0.487925", "0.4875197", "0.48715028", "0.48685685", "0.48685467", "0.48613006", "0.4859647", "0.48562714", "0.48532042", "0.48491234", "0.48339176", "0.48339176", "0.48339176", "0.48339176", "0.48291534", "0.48267993", "0.48260388", "0.48206556", "0.4811749", "0.48056605", "0.47931215", "0.4792792", "0.4782834", "0.4782558", "0.47754654", "0.4775169", "0.47718653", "0.4769201", "0.47674435", "0.47674435", "0.47597185", "0.47586927", "0.4753547", "0.47432905", "0.4742397", "0.4741739", "0.4740785", "0.47406948", "0.47391713", "0.4734164", "0.47333097", "0.47324872", "0.4726438", "0.47236633", "0.4713936", "0.47118023", "0.4703271", "0.4700634", "0.46991503", "0.4692606", "0.4690947", "0.4690745", "0.4687519", "0.4685964", "0.46858388" ]
0.72337234
0
send 1 to enable motor
def timer_callback(self): self.i += 1 ms=MotorState() ms.state=1 self.motor_state_pub.publish(ms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_motor():\n print('Enabling motor')\n start_motor = '{\"id\" : \"Motor1\", \"enabled\" : \"1\"}'\n SERIAL_PARENT.send(start_motor)\n OUTGOING.append(start_motor)", "def enable_motor(self, enabled):\r\n self.enabled = enabled\r\n\r\n # Set motors in neutral if disabling.\r\n if not self.enabled:\r\n self.set_neutral()", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def turn_on(self):\n self._remote.power(1)", "def enable_charge_pump(enable):\n send_command(0x8D)\n if enable:\n send_command(0x14)\n else:\n send_command(0x10)", "def force_switch_on(self):\n self.turn_on_modem()", "def turnOn(self):\n self.write('E;O1;E;')\n return self.output()", "def _led_enable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.LOW)", "def turn_on(self):\n self.write(\"OUT1\\n\")", "def enable(self):\n self.switch.enable()\n self._enabled = True", "def togglePWMEnable(self):\n mask = 1 << 3\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def turn_on(self):\n repeatedFEs = {19200:25, 9600:13, 4800:7, 1200:3, 300:2}\n on_cmd_pre = [0xFE] * repeatedFEs[self.connection.BAUD]\n on_cmd = Command()\n on_cmd.set_num(0x18)\n on_cmd.set_subcmd_num(0x01)\n\n self.connection.send_cmd(on_cmd_pre + on_cmd.render())", "def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1", "def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()", "def run_motor(self, motor, power):\n self.run_flag = True\n super(RemoteControl, self).run_motor(motor, power)", "def turn_on(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x00', True)\n self._state = True\n self.schedule_update_ha_state()", "def toggleCounterEnable(self):\n mask = 1\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "async def async_turn_on(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"off\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"auto-on\")\n\n await self._ctrl.force_update()", "def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)", "def turn_on_modem(self):\n if not self.is_power_on():\n self._logger.debug(\"Switching modem on...\")\n self.set_pin()\n # give modem some time to login\n time.sleep(10)\n else:\n self._logger.debug(\"Modem is already powered on...\")", "async def power_on(self):\n ...", "def r1_on_off():\n \n r1_cmd_packet = b'\\x04\\x14\\x01\\x00\\x00\\xe7\\x0f'\n ser_relay.write(r1_cmd_packet)", "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "def ON(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.PIN, GPIO.OUT)\n GPIO.output(self.PIN, True)\n self.STATUS = \"ON\"", "async def async_turn_on(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"on\"):\n self._is_on = True\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "def on(self):\n\t\trb0 = [0x00]\n\t\trb1 = [0x00, 0x00]\n\t\tattempts = 0\n\n\t\twhile self.state != ON and attempts < MAX_RETRIES:\n\t\t\tself.spi.transfer([0x03], rb0, 1)\t\t## Send the command byte; response will be written to rb0\n\t\t\ttime.sleep(9e-3) \t\t\t\t\t\t## Sleep for 9 ms\n\t\t\tself.spi.transfer([0x00, 0x01], rb1, 2)\t## Send the following 2 bytes; response will be written to rb1\n\t\t\ttime.sleep(0.1)\n\n\t\t\tif rb0[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\t\trb0[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\n\t\t\tattempts += 1\n\t\t\tprint(f\"[{self.__class__.__name__}::on]\", end=' ')\n\t\t\tif rb0[0] == 0xF3 and rb1[0] == 0x03: \t## Ensure response values are as expected\n\t\t\t\tself.state = ON \n\t\t\t\tprint(\"SUCCESS -- device powered on.\")\n\t\t\telse:\n\t\t\t\tif attempts != MAX_RETRIES:\n\t\t\t\t\tprint(f\"Attempt #{attempts} failed -- retrying after delay ...\")\n\t\t\t\t\ttime.sleep(RETRY_DELAY)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERROR -- command failed.\")\n\n\t\treturn self.state == ON", "def turn_on(self, **kwargs):\n self._is_on = True\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 1)", "def turn_output_on(self):\n self.instr.write('RF1')\n time.sleep(self.sleep_time)", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def enable_1_wire_port(self):\n return self._enable_1_wire_port", "def power_on(self):\n return self.inst.write(':OUTP ON')", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def enable(self):", "def enable_relays(self):\n #ensure clock and data are low\n self.e.clear_bit(7)\n self.e.clear_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)", "def enable(mikrobus_index):\n ret = _LIB.oled_click_enable(mikrobus_index)\n if ret < 0:\n raise Exception(\"oled click enable failed\")", "def turn_on(self, **kwargs):\n self._send_command(\"turn_on\")", "def _turn_on(self):\n logger.info(\"Check antenna power\")\n power = yield WaitDBus(self.gsm_device.GetAntennaPower)\n logger.info(\"antenna power is %d\", power)\n if power:\n yield None\n logger.info(\"turn on antenna power\")\n try:\n yield WaitDBus(self.gsm_device.SetAntennaPower, True)\n except dbus.exceptions.DBusException, ex:\n if ex.get_dbus_name() != 'org.freesmartphone.GSM.SIM.AuthFailed':\n raise\n yield self._ask_pin()", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def enable(self):\n self.write(\":OUTPUT ON;\")", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def enable(self, message):\n self.Enable()", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def enable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.ARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False", "def setLED(self):\n newValue = 0\n while newValue != '':\n newValue = input('Enter 0 or 1 to turn LED on or off or enter to exit.\\n')\n \n if newValue == '0':\n self.board.write(b'0')\n time.sleep(1)\n elif newValue == '1':\n self.board.write(b'1')\n time.sleep(1)\n else:\n time.sleep(1)", "def power_on(self):\n pass", "def readout_enable(self, enable):\n dlm = 8 if enable else 9\n self.send_command(dlm)", "def enable_robot(self):\n self._franka_robot_enable_interface.enable()", "def setOn(self, command):\r\n self.setDriver('ST', 1)", "def enable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def enable_radio(self):\n self.acquire_response(b'AT*R1')", "def turn_on(self, **kwargs):\n self._is_on = True", "def enable(self):\n self._enabled = True", "def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True", "def turn_on(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 1)", "def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)", "def enable(self) -> None:", "def _nixie_enable():\n # type: () -> None\n GPIO.output(NIXIE_nOE, GPIO.LOW)", "def turn_on(self) -> None:\n self._monoprice.set_power(self._zone_id, True)", "def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")", "def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')", "def enable_i2s(self, enable):\n control = self.get_control()\n if enable:\n control = control | CONTROL_ENABLE\n else:\n control = control & (~CONTROL_ENABLE)\n\n self.set_control(control)", "def turn_on(self, **kwargs):\n if not self.is_on:\n _LOGGER.debug(\"Sending START command to: %s\", self._name)\n self._api.control('START')\n self._mower_status = STATUS_EXECUTING_START\n self.schedule_update_ha_state()", "def poweron(self) -> None:\n self.servo_reset()", "def turnOnSdkMode(self):\n \n command = b\"\\x90\\x01\\x01\"\n #print(\"turnOnSdkMode run, command: \")\n #print(command)\n \n self.sendCommand(command)", "def enable(self):\r\n self.update(enabled=True)", "def SPIwriteenable(self):\n data=[0x06];\n self.SPItrans(data);", "def enable(self):\n pass", "async def enable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(True)", "async def async_turn_on(self):\n data_cmd = _command(COMMAND_POWER_ON)\n await self._async_send_command(data_cmd)", "def turn_on(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn on\"):\n self.wemo.on()", "async def async_turn_on(self, **kwargs):\n try:\n state_on = await self._api.set_relay_state(\n self._dev_id, self._members, \"on\"\n )\n if state_on:\n self._is_on = True\n self.async_write_ha_state()\n except PlugwiseException:\n _LOGGER.error(\"Error while communicating to device\")", "def emitters_on(self):\n self.wp.digitalWrite(self.LEDON_PIN, self.wp.HIGH)\n self.wp.delayMicroseconds(20)", "def enable(self, coil):\n self.log.info(\"RASPDriver.Enable(%s %s)\" % (coil.config['label'], coil.hw_driver.number))\n self.platform.communicator.driver_enable(coil.hw_driver.number)\n pass", "def change_stepper_status(self, status):\n\n if status:\n GPIO.output(26, GPIO.HIGH)\n else:\n GPIO.output(26, GPIO.LOW)", "def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"", "def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])", "async def enable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation enabled.\"))", "def _doEnableRegulation(self):\n self._cmdRegulOn()", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def enable(self):\n options = self.get_direction_options()\n self.direction = options[0]\n self.state['enabled'] = True\n self.sound_manager.play_loop('std')", "def cmd(self, data, enable):\n pass", "async def async_turn_on(self) -> None:\n self._zone.power = True", "def enable_lock(self, lock_on=True):\n if lock_on:\n self.write('ScanM_Mode=2') #Search\n time.sleep(10)\n self.write('ScanM_Mode=3') #Lock, its unclear from manual if\n #this is redundant. i.e. autolocks\n #at end of search\n if not self.query_lock_status():\n raise ac_excepts.CouplingkError('Not meeting threshold power',\n self.enable_lock)\n if not lock_on:\n self.write('ScanM_Mode=0') #Off", "def set_enable(self, enable):\n\n with AutoUpdater._lock:\n if isinstance(enable, Bus):\n AutoUpdater.remove_link(self._enable)\n AutoUpdater.add_link(\n enable,\n self._enable)\n else:\n raise ValueError(\n \"ERROR: Invalid Enable input. Enable must be a \"\n \"1-bit Bus or a Connector.\")", "def send(self,cmd):\n bit_list = '{:b}'.format(int(cmd,16))\n self._lead()\n for i in bit_list:\n self.ir_pin.duty(512)\n time.sleep_us(_Const.NEC_BIT_MARK)\n self.ir_pin.duty(0)\n if i == '0':\n time.sleep_us(_Const.NEC_ZERO_SPACE)\n else:\n time.sleep_us(_Const.NEC_ONE_SPACE)\n self._end()", "def r4_on_off():\n \n r4_cmd_packet = b'\\x04\\x14\\x08\\x00\\x00\\xe0\\x0f'\n ser_relay.write(r4_cmd_packet)", "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "def power():\n request_command(tv_command=TVCommand.power)", "def enable_button(self, index):\n if index != 0:\n self.roll_dem_bones.setEnabled(True)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def enable(self, *args, **kwargs):\n pass", "def on(self):\n print(f\"RF {self.name} on\")\n self.status(True)", "def enable(self):\n if not self.tm_started:\n for name, tm in self.air_traffic_manager.items():\n logging.debug(\"Starting tm %s\" % name)\n tm.start()\n tm_started = True\n\n logging.debug(\"Enabling switch %s\" % self.name)\n self.disabled = False", "def _turn_on(self):\n self._turn_display('ON')", "def enable_amplitude_modulation(self):\n self.write(\":SOUR:AM:STAT ON\")", "def enable(self):\n if not self.labExperiment:\n super().enable()\n else:\n self.connection.command('open_dm', self.DMserial)\n status = self.connection.query('get_status')\n assert status == 0, 'Error connecting to DM. Error: ' + str(status)\n numActProfile = self.connection.query('num_actuators')\n assert numActProfile == self.numActProfile, 'Wrong number of profile actuators entered'\n print(\"'BM1k' is now enabled\")" ]
[ "0.8308927", "0.72755456", "0.71145564", "0.69568515", "0.68919075", "0.6866074", "0.6863435", "0.68514323", "0.683486", "0.6831353", "0.68029517", "0.6771648", "0.6737354", "0.6695286", "0.66666657", "0.6659766", "0.66348124", "0.66312325", "0.6606162", "0.65834403", "0.657781", "0.6559701", "0.6530475", "0.65258783", "0.64941055", "0.64753723", "0.64470416", "0.6446813", "0.6442043", "0.6442043", "0.6440485", "0.6433436", "0.64242834", "0.64217323", "0.6416613", "0.6408326", "0.6388705", "0.637229", "0.6370534", "0.63688046", "0.6365153", "0.6356579", "0.6347082", "0.6338739", "0.63368815", "0.63268214", "0.63222635", "0.630188", "0.6291133", "0.6282182", "0.62818104", "0.628166", "0.6281454", "0.6274862", "0.6244004", "0.6241089", "0.6236448", "0.6234621", "0.62333506", "0.6220678", "0.62187266", "0.6214327", "0.61995935", "0.6184445", "0.6181252", "0.6179298", "0.61649406", "0.6163858", "0.6160597", "0.6157896", "0.6154449", "0.6148152", "0.6140777", "0.61398315", "0.61328113", "0.61322546", "0.6126976", "0.6117472", "0.6116165", "0.611543", "0.6112125", "0.61101925", "0.6102391", "0.6077852", "0.6074608", "0.6070396", "0.6048152", "0.6033931", "0.6026287", "0.6016403", "0.6007701", "0.60051876", "0.5984544", "0.5981887", "0.597298", "0.5969479", "0.59671205", "0.59617", "0.5960848", "0.5954168", "0.59540766" ]
0.0
-1
Create the list of dictionaries
def load_training_data(vocab, directory): top_level = os.listdir(directory) dataset = [] for d in top_level: if d[-1] == '/': label = d[:-1] subdir = d else: label = d subdir = d+"/" files = os.listdir(directory+subdir) for f in files: bow = create_bow(vocab, directory+subdir+f) dataset.append({'label': label, 'bow': bow}) return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_list_dict(list_of_nodes: List['Part']):\n list_of_part_dict = list()\n\n return list_of_part_dict", "def codebook_json_data_factory() -> List[Dict[str, Any]]:\n codebook_data = [\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 0, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_A\"\n },\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 2, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_B\"\n },\n ]\n return codebook_data", "def to_list_of_dicts(self) -> List[Dict[str, Any]]:\n return [this.to_dict() for this in self._elements]", "def init_data(stats_list):\n\n data = {stats_name: {} for stats_name in stats_list}\n return data", "def get_dicts(self, clean=False):\n return list(self.iter_dicts(clean=clean))", "def create_dict(list_database):\n return_dict = dict()\n for key, value in list_database:\n if key != None:\n return_dict[key] = value\n return return_dict", "def book_list_manipulation():\n global book_list\n book_json = {}\n for book in book_list:\n book_container = {'title': book.title,\n 'author': book.author,\n 'read': str(book.read),\n 'rating': book.rating,\n 'review': book.review,\n 'date read': book.date_read,\n 'id': str(book.id)}\n book_json[str(book.id)] = book_container\n\n return book_json", "def construct_all_holdings(self):\n d = dict((k,v) for k,v in [(s,0.0) for s in self.symbol_list])\n d['datetime'] = self.start_date\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n \n return [d]", "def construct_all_holdings(self):\n d = dict((k,v) for k,v in [(s,0.0) for s in self.symbol_list])\n d['datetime'] = self.start_date\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n \n return [d]", "def create_dict(self, data):\n\n for i in range(len(data)):\n for j in range(len(data[i])):\n if i+1 < len(data) and \":\" in data[i][j] and \"}\" not in data[i] and \"{\" not in data[i] and data[i+1][0] == \"{\":\n data[i] = data[i] + [data[i+1][0]]\n data[i+1] = data[i+1][1:]\n\n if \":\" in data[i][j] and data[i][j+1] != \"{\":\n data[i][j+1] += \",\\n\"\n elif data[i][j] == \"}\":\n data[i][j] = \"},\\n\"\n elif data[i][j] == \"{\":\n data[i][j] = \"{\\n\"\n data = \"\".join([\"\".join(i) for i in data])\n data = self.separate(data)\n result = []\n\n for item in data:\n ind = item.index(\":\")\n name = item[1:ind-1] # fetch name of date and remove quotes\n if \"{\" in item:\n con = ast.literal_eval(item[ind+1:])[0]\n else:\n con = item[ind+2:-3]\n result.append([name, con])\n return result", "def makeDict(result_list):\n \n result_dict = dict()\n for line in result_list:\n if line[0] == 'set_property' and line[3] == 'get_ports':\n if line[4] not in result_dict:\n result_dict[line[4]] = dict()\n result_dict[line[4]][line[1]] = line[2]\n\n return result_dict", "def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []", "def list_flattened_to_dict(self, listH, defaultItem={}):\n dictList = defaultItem\n for name in reversed(listH):\n dictList = {name: dictList}\n return dictList", "def get_info(self) -> List[Dict]:\n return [{} for _ in range(self.num)]", "def __convert_data_to_list_of_dict__(self, data):\n jsons = list()\n for row in data:\n json_for_row = dict(zip(self.__fieldnames__, row))\n jsons += [json_for_row]\n return jsons", "def dicts(self):\n if self._dicts is None:\n self.make_dicts()\n return self._dicts", "def create_time_dicts(\n Spc:Dict) -> List[Dict]:\n abreviations = ['Kd','Km','Kq']\n daily_range = Spc['daily']\n monthly_range = Spc['monthly']\n quarterly_range = Spc['quarterly']\n all_ranges = np.cumsum([0,daily_range,monthly_range,quarterly_range])\n\n out_list = []\n for i,abrev in enumerate(abreviations):\n temp_dict = {}\n temp_dict['range'] = range(all_ranges[i],all_ranges[i+1])\n temp_dict['one'] = np.ones(Spc[abrev])\n temp_dict['w'] = np.arange(1,Spc[abrev]+1)/Spc[abrev]\n temp_dict['k'] = np.arange(1,Spc[abrev]+1)\n temp_dict['kk'] = Spc[abrev]\n out_list.append(temp_dict)\n\n return out_list", "def getCacheDictFromRawData( rawList ):\n\n res = [ ( ( name, sType ), status ) for name, sType, status in rawList ]\n return dict( res )", "def make_dicts(self):\n self._dicts = [tree.to_dict() for tree in self.reaction_trees]\n self._update_route_dict(self._dicts, \"dict\")", "def list_items(self) -> List[Dict[str, Any]]:\n return [c.to_dict() for c in self._objects.values()]", "def derive_url_dicts(self, url_obj_list):\n dict_list = []\n for url_obj in url_obj_list:\n dict_list.append(self.derive_url_dict(url_obj))\n return dict_list", "def dictogram_dictlist(self):\n for key, value in self.word_dict.items():\n self.word_dict[key] = dictogram.Dictogram(value)\n # print(\"self.word_dict\", self.word_dict)", "def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d", "def __init__(self):\r\n self.dct = defaultdict(list)", "def to_dict(self) -> List[Dict[str, Any]]:\n return [x.to_dict() for x in self.params]", "def __init__(self):\n self.list = []\n self.dict = {}", "def __init__(self):\n self.list = []\n self.dict = {}", "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "def get_list():\n hash_map_list = model.hash_table.find()\n data = dict(success=True, hash_map_list=hash_map_list)\n return data", "def construct_output_dict():\n list_of_recipes = construct_list_of_recipes()\n output_dict = {}\n for recipe_list in list_of_recipes:\n recipe_instance = construct_recipe_object(recipe_list)\n recipe_dict = recipe_instance.construct_json_rep_obj()\n for k, v in recipe_dict.iteritems():\n output_dict[k] = v\n output_dict = filter_output_dict(output_dict)\n return {'recipes': output_dict}", "def __init__(self):\n self.dict = {}\n self.list = []", "def __init__(self):\n self.dict = {}\n self.list = []", "def __init__(self):\n self.dict = {}\n self.list = []", "def into_data(self) -> List[Dict[str, Any]]:\n if self.previous:\n data = self.previous.into_data()\n else:\n data = []\n\n datum: Dict[str, Any] = dict(\n required={name: up_to_date.into_data() for name, up_to_date in self.required.items()}\n )\n\n if self.command is None:\n assert self.start is None\n assert self.end is None\n else:\n assert self.start is not None\n assert self.end is not None\n datum[\"command\"] = self.command\n datum[\"start\"] = str(self.start)\n datum[\"end\"] = str(self.end)\n\n data.append(datum)\n return data", "def get_datalist_fr_json(self):\n raw_data = json.load(open(self.saved_json_file, 'r'))\n for indivdual_set in raw_data['query']['results']['stats']:\n temp_dict_data = {}\n if type(indivdual_set) == str:\n #for single data\n continue # temp do not use\n for parameters in indivdual_set.keys():\n if type(indivdual_set[parameters]) == str:\n temp_dict_data[parameters] = indivdual_set[parameters]#for symbol\n elif type(indivdual_set[parameters]) == dict:\n if indivdual_set[parameters].has_key('content'):\n temp_dict_data[parameters] = indivdual_set[parameters]['content']\n\n ## append to list\n self.com_data_allstock_list.append(temp_dict_data)", "def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []", "def to_listing_dict(self) -> dict:\n data = super().to_listing_dict()\n return data", "def generate_data_list(self, data_list):\n sol = []\n for i in data_list:\n sol.append({\n \"data_sig\": i[0],\n \"data\": pickle.loads(i[1]),\n \"attachment\": i[2],\n \"likes\":pickle.loads(i[3]),\n \"dislikes\":pickle.loads(i[4]),\n \"owner\": i[5]\n })\n return sol", "def createDictInstance(self):\n\n dictinstance = {}\n for i in range(len(self.instancenumbers)):\n dictinstance.setdefault(self.instancenumbers[i], []).append(i)\n\n return dictinstance", "def _construct_all_holdings(self):\n d = dict((s, 0.0) for s in self.symbol_list)\n d['datetime'] = self.backtest_date\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n d['buy_times'] = 0\n d['sell_times'] = 0\n d['total_times'] = 0\n d['hold'] = 0\n return [d]", "def __init__(self):\n self.dict = collections.defaultdict(list)", "def __init__(self):\n self.dict = collections.defaultdict(list)", "def createDictionary(self):\n\t\tdictionary: dict = {}\n\t\tdictionary.update({'deckname': self.mDeckName})\n\t\tdictionary.update({'filename': self.autoFilename})\n\t\tdictionary.update({'creatorname': str(self.mCreatorname)})\n\t\tdictionary.update({'maxAttrPoints': str(self.mMaxAttributePoints)})\n\t\tminionListDict: dict = {}\n\t\tfor minion in self.mMinionSet:\n\t\t\tminionDict: dict = {}\n\t\t\tminionDict.update({'minionName': str(minion.mMinionName)})\n\t\t\tminionDict.update({'attack': str(minion.mAttackPoints)})\n\t\t\tminionDict.update({'hp': str(minion.mHealthPoints)})\n\t\t\tskillList: list = minion.mSkills\n\t\t\tskillNames: list = []\n\t\t\tfor skill in skillList:\n\t\t\t\tskillNames.append(skill.mSkillName)\n\t\t\tminionDict.update({'skills': skillNames})\n\t\t\tminionListDict.update({minion.mMinionName: minionDict})\n\t\tdictionary.update({'minions': minionListDict})\n\t\tdictionary.update({'id' : hash(str(dictionary))}) # TODO LPO: let DB handle that\n\t\tself.mDeckDict = dictionary\n\t\treturn dictionary", "def to_dictionary(self):\n list_dic = {}\n list_dic['id'] = self.id\n list_dic['width'] = self.__width\n list_dic['height'] = self.__height\n list_dic['x'] = self.__x\n list_dic['y'] = self.__y\n return (list_dic)", "def __toListOfDict(self, cursor):\n lst = []\n for row in cursor.fetchall():\n # first convert row to a dictionary\n rowdict={}\n for idx, col in enumerate(cursor.description):\n rowdict[col[0]] = row[idx]\n lst.append(rowdict)\n return lst", "def to_dict(cls):\n return dict((item.name, item.number) for item in iter(cls))", "def to_dict(self) -> List[dict]:\n return asyncio.run(self.parse(key=\"series\"))", "def __init__(self):\n self.dic = dict()\n self.lst = list()", "def list(self):\n return {\n k: json.loads(v)\n for k, v in iteritems(self._db.hgetall(self.index))\n }", "def build_unq_dict_lst(self, lst1, lst2, key1 = \"start_index\", key2 = \"random_seed\"):\n dict_lst = []\n for i in range(len(lst1)):\n for j in range(len(lst2)):\n dictt = {}\n dictt[key1] = lst1[i]\n dictt[key2] = lst2[j]\n dict_lst.append(dictt)\n return dict_lst", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def _json_inv_to_dict_list(inv: List[dict]) -> List[dict]:\n flat_inv = []\n for price_item in inv:\n d = dict()\n e = price_item['entries'][0] # could be multiple matches, but use the first one.\n d['item_id'] = str(e['item']['no'])\n d['color_id'] = str(e['color_id'])\n d['name'] = e['item']['name']\n d['itemtype'] = e['item']['type']\n d['category_id'] = str(e['item']['category_id'])\n d['quantity'] = int(e['quantity'])\n flat_inv.append(d)\n\n return flat_inv", "def new(num_buckets=256):#用空列表初始化字典\n\taMap=[]\n\tfor i in range(num_buckets):\n\t\taMap.append([])\n\treturn aMap", "def dict_copies(my_dict, num_copies):\n answer = []\n for idx in range(num_copies):\n answer.append(dict(my_dict))\n return answer", "def _to_dict(self, data_list):\n data_dict = dict(pair.split('=') for pair in data_list)\n return data_dict", "def return_dictionary_list(lst_of_tuples):\r\n orDict = defaultdict(list)\r\n # iterating over list of tuples\r\n for key, val in lst_of_tuples:\r\n orDict[key].append(val)\r\n return orDict", "def dict_list(self):\n word_list = []\n for index in range(len(self.token)-1):\n if self.token[index] not in self.word_dict:\n word_list.append(self.token[index+1]) # Appends words that follow the types\n self.word_dict[self.token[index]] = word_list # Create a dictionary based on types and it's word list\n else:\n if self.token[index] in self.word_dict.keys():\n value = self.word_dict.get(self.token[index]) # gets the list if the type already exists\n value.append(self.token[index+1]) # and append the new word to the list\n word_list = []\n # print(\"self.word_dict\", self.word_dict)", "def make_dict(self):\n return self.generate_widgets()", "def get_json_adata( activity_list ):\n\tdata = []\n\tfor idx,activity in enumerate( activity_list ):\n\t\ttemp = {\n\t\t\t'name' \t\t\t: activity.get_name(),\n\t\t\t'url' \t\t\t: activity.get_url(),\n\t\t\t'type'\t\t\t: activity.get_activity_type(),\n\t\t\t'hour'\t\t\t: activity.get_hours(),\n\t\t\t'locations'\t\t: activity.get_location_list(),\n\t\t\t'tweet_count'\t: activity.tweet_count\n\t\t}\n\t\t# data[ str( idx ) ] = temp\n\t\tdata.append(temp)\n\treturn data", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "def __init__(self):\n self.dic = defaultdict(list)", "def __init__(self):\n self._list = []\n self._dict = {}", "def call(self) -> List[Dict]:", "def createHistoryDictList(histList):\n if histList[0][0][0] == \"<\":\n return [dict()]\n\n return [\n dict(\n (\n histList[0][i],\n tryFloat(histList[j][i])\n ) for i in range(len(histList[0]))\n ) for j in range(1, len(histList))\n ]", "def to_dict(self) -> List[Dict[str, Any]]:\n return [x.to_dict() for x in self.inputs]", "def __init__(self):\n self.dic={}\n self.data=[]", "def makeDictionary(self):\n self.dictionary = {}\n for i in range(len(self.movie)):\n if self.movie[i] in self.dictionary:\n vectors = self.dictionary[self.movie[i]]\n vectors[self.user[i]] = self.rating[i]\n self.dictionary[self.movie[i]] = vectors\n else:\n newMovie = dict([(self.user[i], self.rating[i])])\n self.dictionary[self.movie[i]] = newMovie\n return self.dictionary", "def asdict():\n pass", "def create_dict(info):\n \"\"\"\n dict = {ip: {counter:*}, {weekdays: []}, {hours: []}}\n \"\"\"\n dict_info = dict()\n for i in info:\n ip = i[0]\n hours = i[1]\n weekdays = i[2]\n if ip not in dict_info:\n dict_info[ip] = {}\n dict_info[ip]['counter'] = 0\n dict_info[ip]['hours'] = []\n dict_info[ip]['weekdays'] = []\n dict_info[ip]['counter'] += 1\n dict_info[ip]['hours'].append(hours)\n dict_info[ip]['weekdays'].append(weekdays)\n return dict_info", "def __init__(self,n):\n\t\tself._dict={}\n\t\tfor i in range(n):\n\t\t\tself._dict[i]=[]", "def to_dict(a_list):\n return dict(zip(map(str, range(len(a_list))), a_list))", "def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:\n return [row.as_dict() for row in self.rows]", "def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:\n return [row.as_dict() for row in self.rows]", "def create_film_lists_dict(film_list):\n film_lists_dict = {}\n for film in film_list:\n if 'disabled' not in film or not film['disabled']:\n # make lists of film types\n # Note, that this means a film can be in several lists\n if 'tags' in film:\n for tag in film['tags']:\n if tag not in film_lists_dict: \n film_lists_dict[tag] = [film]\n else:\n film_lists_dict[tag].append(film)\n return film_lists_dict", "def to_dict(self) -> dict:\n\n value_list = []\n if self.list_values:\n for value in self.list_values:\n if isinstance(value, dict):\n value_list.append(value)\n elif isinstance(value, PicklistValue):\n value_list.append(value.to_dict())\n else:\n raise TypeError(f\"Invalid type for `list_values` in Picklist.to_dict:\"\n f\"{type(self.list_values)}, {self.list_values}\")\n\n output = {\n \"list_name\": self.list_name.__str__(),\n \"list_values\": value_list,\n \"last_modified\": self.last_modified.timestamp() * 1000 # JS timestamps are in ms\n }\n\n return output", "def prep_data(data: list):\n book = {\n 'title': data['title'],\n 'authors': [],\n 'categories': []\n }\n try:\n for author in data['authors']:\n author_obj, created = Author.objects.get_or_create(name=author)\n book['authors'].append(author_obj.id)\n except KeyError:\n pass\n try:\n for category in data['categories']:\n category_obj, created = Category.objects.get_or_create(name=category)\n book['categories'].append(category_obj.id)\n except KeyError:\n pass\n book['published_date'] = data.get('publishedDate', None)\n book['average_rating'] = data.get('averageRating', None)\n book['ratings_count'] = data.get('ratingsCount', None)\n try:\n book['thumbnail'] = data['imageLinks']['thumbnail']\n except KeyError:\n book['thumbnail'] = None\n return book", "def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}", "def get_structure(self):\n main = {}\n for line in self.load():\n match = re.match('^\\s*([A-Za-z0-9_]+)(\\((\\d+)\\))?=(.*)$', line)\n if match:\n key = match.group(1)\n index = match.group(3)\n value = match.group(4)\n if index is None:\n main[key] = self.parse_data_value(value)\n else:\n if key not in main:\n main[key] = []\n main[key].append(self.parse_data_value(value))\n #else:\n # print(line)\n return main", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def buildDict(self, dict):\n for item in dict:\n length = len(item)\n if length not in self.dic:\n self.dic[length] = [item]\n else:\n self.dic[length].append(item)", "def createDates(self, data: QDate=None):\n if data is None:\n data = self.oggi\n # print('CREATEDATES DATA', data)\n dateList = MeseGiorniDictGen.bigList(data)\n return dateList", "def base_dict():\n out = OrderedDict()\n ao(out, 'name', 'String', 'Name', name='Name')\n ao(out, 'mro', 'List', name='mro', attr=['Hidden'])\n ao(out, 'comment', 'String', '')\n ao(out, 'preset', 'Preset', '', attr=['Hidden'])\n ao(out, 'dev', 'String', attr=['Hidden'])\n ao(out, 'devpath', 'String', attr=['Hidden'])\n ao(out, 'fullpath', 'String', attr=['Hidden'])\n ao(out, 'zerotime', 'Float', name='Start time', attr=['Hidden'])\n ao(out, 'initInstrument', 'Progress', attr=['Hidden'])\n return out", "def __init__(self):\n self.d = collections.defaultdict(list)", "def make_books_dicts(xml, book_list):\n\n books_response = xml.GoodreadsResponse.reviews.review\n for book in books_response:\n a_book = {}\n a_book['title'] = book.book.title.cdata.encode('utf8')\n a_book['author_name'] = book.book.authors.author.name.cdata.encode('utf8')\n a_book['author_gr_id'] = int(book.book.authors.author.id.cdata.encode('utf8'))\n a_book['gr_work_id'] = int(book.book.work.id.cdata.encode('utf8'))\n a_book['description'] = book.book.description.cdata\n\n a_book['edition'] = {}\n a_book['edition']['isbn'] = valid_isbn(book.book.isbn.cdata.encode('utf8'))\n a_book['edition']['format_id'] = get_format_id(book.book.format.cdata.encode('utf8'))\n a_book['edition']['pic_url'] = book.book.image_url.cdata.encode('utf8')\n a_book['edition']['publisher'] = book.book.publisher.cdata.encode('utf8')\n a_book['edition']['gr_url'] = book.book.link.cdata.encode('utf8')\n a_book['edition']['gr_id'] = int(book.book.id.cdata.encode('utf8'))\n year = date_is_valid(book.book.publication_year.cdata.encode(\"utf8\"))\n month = date_is_valid(book.book.publication_month.cdata.encode(\"utf8\"))\n day = date_is_valid(book.book.publication_day.cdata.encode(\"utf8\"))\n a_book['edition']['date'] = datetime.date(year, month, day)\n a_book['edition']['num_pages'] = valid_page_count(book.book.num_pages.cdata.encode('utf8'))\n book_list.append(a_book)\n\n print \"*******THERE ARE \" + str(len(book_list)) + \" ON THIS SHELF*******\"\n\n return book_list", "def json(self) -> List[Dict[str, Union[str, List[str]]]]:\n model_json: List[Dict[str, Union[str, List[str]]]] = []\n for phrase in self.phrase_index:\n entry = {'phrase': phrase}\n if phrase in self.has_variants:\n entry['variants'] = list(self.has_variants[phrase])\n if phrase in self.has_labels:\n entry['label'] = list(self.has_labels[phrase])\n if phrase in self.custom:\n entry['custom'] = self.custom[phrase]\n model_json += [entry]\n return model_json", "def init_objects(config_dict):\n # only testing purposes\n obj_list = dict()\n obj_list['input_cfg'] = config_dict\n return obj_list", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def initializeCollection():\n return {SENSOR1:[], SENSOR2:[], SENSOR3:[],SENSOR4:[], DATE:[]}", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def __init__(self):\n        self.list=[]\n        self.hashmap={}\n        ", "def __init__(self, data):\n self.products = dict()\n for item in data:\n style_number = item[\"Style\"]\n\n if style_number not in self.products:\n product = {\"price\": item[\"price\"]}\n self.products[style_number] = product", "def objects(self):\n _, c = self.get_column(0)\n size = len(c)\n headers = self.headers()\n for i in range(size):\n obj = {}\n for h in headers:\n _, col = self.get_column(h)\n val = col[i]\n obj[h] = val\n yield obj", "def construct_dict(cursor):\n rows = cursor.fetchall()\n return [dict((cursor.description[i][0], value) for i, value in enumerate(row))\n for row in rows]", "def GetDict(self):\n # CL actions are be stored in self._cl_action_list instead of\n # in self._metadata_dict['cl_actions'], because _cl_action_list\n # is potentially a multiprocess.lis. So, _cl_action_list needs to\n # be copied into a normal list.\n temp = self._metadata_dict.copy()\n temp['cl_actions'] = list(self._cl_action_list)\n\n # Similarly, the per-board dicts are stored in a flattened form in\n # _per_board_dict. Un-flatten into nested dict.\n per_board_dict = {}\n for k, v in self._per_board_dict.items():\n board, key = k.split(':')\n board_dict = per_board_dict.setdefault(board, {})\n if key:\n board_dict[key] = v\n\n temp['board-metadata'] = per_board_dict\n return temp", "def __init__(self):\n self.dict_val = {}\n self.list_val = []", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def asdict(self) -> dict[str, Any]:\n return {\n w.name: getattr(w, \"value\", None)\n for w in self._list\n if w.name and not w.gui_only\n }", "def dictOfDraws(self):\n return dict()", "def creer_dictionnaire_vide():\n dico = {}\n return dico", "def __init__(self):\n dict = defaultdict(list)\n self.conversion_dict = dict", "def from_list(self, lst: List[Tuple[keyType, valueType]]) -> None:\n key_size, value_size = self.size()\n if key_size > 0:\n # Clear the content of the existing custom dictionary object to the initial state.\n self.length = 10\n self.hashTable = [HeadNode() for i in range(self.length)]\n self.iter_head_node_index = 0\n self.iter_chain_node_index = -1\n self.iter_value_index = -1\n self.iter_values = []\n for element in lst:\n key = element[0]\n value = element[1]\n self.add(key, value)" ]
[ "0.69087183", "0.67974454", "0.6580766", "0.65108544", "0.64887786", "0.6461656", "0.64098716", "0.64027894", "0.64027894", "0.6329244", "0.63245296", "0.6318974", "0.62794995", "0.62457955", "0.6242187", "0.6208534", "0.61998814", "0.6194605", "0.6188527", "0.61769027", "0.61150765", "0.6101987", "0.6074699", "0.6067099", "0.6065764", "0.6065728", "0.6065728", "0.6020715", "0.6018162", "0.6006273", "0.59966075", "0.59966075", "0.59966075", "0.59820783", "0.59737575", "0.59671444", "0.5966353", "0.5962482", "0.5942655", "0.59420824", "0.59175044", "0.59175044", "0.5898645", "0.58936334", "0.5892992", "0.58817095", "0.58569986", "0.58555514", "0.58493954", "0.58432513", "0.58420897", "0.5841918", "0.5817289", "0.580735", "0.5801728", "0.5796066", "0.5794565", "0.5791916", "0.5787875", "0.57863545", "0.57826954", "0.57817996", "0.5781137", "0.5779617", "0.57741785", "0.5759119", "0.57302797", "0.5725942", "0.57244176", "0.57166487", "0.57115316", "0.57070965", "0.57070965", "0.57054454", "0.5702438", "0.57009065", "0.5699458", "0.56900305", "0.56874233", "0.56854975", "0.5677947", "0.5662817", "0.5660854", "0.56602913", "0.5659458", "0.56583285", "0.5638353", "0.5637308", "0.5627276", "0.5626171", "0.5624042", "0.56233853", "0.5600088", "0.559395", "0.5591515", "0.55905634", "0.55865824", "0.55786306", "0.5571022", "0.55590653", "0.5556223" ]
0.0
-1
Create a vocabulary from the training directory return a sorted vocabulary list
def create_vocabulary(directory, cutoff): top_level = os.listdir(directory) a = cutoff vocab = {} for d in top_level: subdir = d if d[-1] == '/' else d+'/' files = os.listdir(directory+subdir) for f in files: with open(directory+subdir+f,'r', encoding="utf-8") as doc: for word in doc: word = word.strip() if not word in vocab and len(word) > 0: vocab[word] = 1 elif len(word) > 0: vocab[word] += 1 return sorted([word for word in vocab if vocab[word] >= cutoff])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_vocabulary(vocabulary_path, json_vocab_path):\n if not gfile.Exists(vocabulary_path):\n print(\"Transform vocabulary to %s\" % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode=\"rb\") as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def create_vocabulary(vocabulary_path, data_paths, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n vocab = {}\n files = []\n files += [data_paths+f for f in os.listdir(data_paths) ]\n for one_file in files:\n with gfile.GFile(one_file, mode=\"rb\") as f:\n review = f.read()\n tokens = tokenizer(review) if tokenizer else character_tokenizer(review)\n for w in tqdm(tokens):\n word = _DIGIT_RE.sub(b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary already created.\")", "def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict", "def build_vocab(train_dir, vocab_dir, vocab_size=5000):\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')", "def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def create_vocabulary(vocabulary_path, words, max_vocabulary_size, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s with max size %d\" % (vocabulary_path, max_vocabulary_size))\n vocab = {}\n counter = 0\n for w in words:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing word %d = %s\" % (counter, w))\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n print(\"vocab too big\")\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True,\n _DIGIT_RE=re.compile(br\"\\d\"),\n _START_VOCAB=[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary %s from data %s exists\" % (vocabulary_path, data_path))", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def generate_vocabulary():\n stop_words = load_stop_words()\n words = ' '.join(generate_corpus()).split()\n print(len(words))\n vocabulary = {}\n for word in words:\n if word in stop_words:\n continue\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary = dict(sorted(vocabulary.items(), key=lambda x: x[1], reverse=True))\n return vocabulary", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])", "def initialize_vocabulary(vocabulary_path):\n if os.path.exists(vocabulary_path):\n rev_vocab = []\n with codecs_open(vocabulary_path, \"rb\", encoding=\"utf-8\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(self,vocabulary_path):\n if tf.gfile.Exists(vocabulary_path):\n vocab = corpora.Dictionary.load(vocabulary_path)\n print(\"vocab length: \",len(vocab.token2id))\n\n return vocab.token2id, vocab.token2id.keys()\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def compute_vocabulary(root_path: str) -> list:\n vocab = list()\n scenario_folders = [os.path.join(root_path, f) for f in os.listdir(root_path) if isdir(os.path.join(root_path, f))]\n for scenario_folder in scenario_folders:\n # Compute the path for the scenario folder\n files = [os.path.join(scenario_folder, f) for f in os.listdir(scenario_folder) if\n isfile(os.path.join(scenario_folder, f))]\n for file in files:\n file_vocab = parse_vocabulary_from_file(file)\n vocab = vocab + file_vocab\n return unique(vocab)", "def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def initialize_vocabulary(vocabulary_path):\n characters_class = 9999\n\n if os.path.exists(vocabulary_path):\n with codecs.open(vocabulary_path, 'r', encoding='utf-8') as voc_file:\n rev_vocab = [line.strip() for line in voc_file]\n\n vocab = {x: y for (y, x) in enumerate(rev_vocab)}\n\n reserved_char_size = characters_class - len(rev_vocab)\n if reserved_char_size < 0:\n raise ValueError(\"Number of characters in vocabulary is equal or larger than config.characters_class\")\n\n for _ in range(reserved_char_size):\n rev_vocab.append('')\n\n # put space at the last position\n vocab[' '] = len(rev_vocab)\n rev_vocab.append(' ')\n return vocab, rev_vocab\n\n raise ValueError(\"Initializing vocabulary ends: %s\" % vocabulary_path)", "def trainingsVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.training',\n 'sort_on':'sortable_title',\n 'review_state':'confirmed'}\n trainings = ct.searchResults(**dictSearch)\n trainings = [SimpleTerm(b.UID,b.UID,b.Title) for b in trainings]\n return SimpleVocabulary(trainings)", "def create_vocab(data_files, vocab_fname):\n chars = set()\n for data_fname in data_files:\n with io.open(data_fname, 'r', encoding='utf8') as fp:\n raw = fp.read().lower()\n chars.update(raw)\n\n vocab = list(chars - set(['\\t', '\\n'])) + SPECIALS\n tf.logging.info('Creating vocab file..')\n with io.open(vocab_fname, 'w', encoding='utf8') as fp:\n fp.write('\\n'.join(vocab))", "def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts(\n cache_dir='/tmp')\n return list(vocab_dict.keys())[:vocab_size]", "def save_vocabulary(self, save_directory: str, filename_prefix: str = None):\n return self.tokenizer.save_vocabulary(save_directory=save_directory, filename_prefix=filename_prefix)", "def load_vocab(path, encoding=\"UTF-9\"):\n vocab = []\n\n if not os.path.exists(path):\n return vocab\n\n with open(path, encoding=encoding) as fin:\n for line in fin.readlines():\n line = line.strip()\n word, freq = line.split(\"\\t\")\n vocab.append((word,int(freq)))\n\n return vocab", "def buildVocabulary(paragraphs, verbose=True):\n vocabulary = []\n \n for p in paragraphs:\n for word in p.split():\n vocabulary.append(word)\n\n vocabulary = set(vocabulary)\n if verbose:\n print('Built vocabulary of %d unique words'%len(vocabulary))\n \n return list(vocabulary)", "def build_vocab(path, fname):\r\n\twords = open(path, 'r', encoding='utf-8').read().split()\r\n\twordCount = Counter(words)\r\n\tif not os.path.exists(pm.vocab_path):\r\n\t\tos.makedirs(pm.vocab_path)\r\n\twith open(pm.vocab_path + fname, 'w', encoding='utf-8') as f:\r\n\t\tf.write(\"{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n\".format(\"<PAD>\", \"<UNK>\", \"<SOS>\", \"<EOS>\"))\r\n\t\tfor word, count in wordCount.most_common(len(wordCount)):\r\n\t\t\tf.write(u\"{}\\t{}\\n\".format(word, count))", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def make_vocab(corpus_dictionary, vocab_path):\n with open(vocab_path, 'wb') as fout:\n pickle.dump(corpus_dictionary, fout)\n print('Saved dictionary to', vocab_path)", "def vocab():\n symbols = DEFAULT_SPECIAL_SYMBOLS + [\"mouse\", \"dog\", \"tree\"]\n return Vocabulary(symbols)", "def load_training_data(vocab, directory):\n top_level = os.listdir(directory)\n dataset = []\n for d in top_level:\n if d[-1] == '/':\n label = d[:-1]\n subdir = d\n else:\n label = d\n subdir = d+\"/\"\n files = os.listdir(directory+subdir)\n for f in files:\n bow = create_bow(vocab, directory+subdir+f)\n dataset.append({'label': label, 'bow': bow})\n return dataset", "def _load_vocab(vocab_file_name, language):\n vocab = []\n vocab_size = 0\n #with codecs.getreader(\"utf-8\")(tf.gfile.GFile(vocab_file_name), \"rb\") as f:\n with tf.gfile.GFile(vocab_file_name) as f:\n for word in f:\n vocab.append(word.strip())\n vocab_size += 1\n\n if not EOS in vocab:\n vocab = [EOS] + vocab\n if not SOS in vocab:\n vocab = [SOS] + vocab\n if not UNK in vocab:\n vocab = [UNK] + vocab\n\n reverse_dictionary = {}\n new_vocab_file_name = vocab_file_name + \".new\"\n with tf.gfile.GFile(new_vocab_file_name, \"wb\") as f:\n reverse_dictionary = {}\n i = 0\n for word in vocab:\n f.write(\"%s\\n\" % word)\n reverse_dictionary.update({i : word})\n i+=1\n\n vocab_table = tf.contrib.lookup.index_table_from_file(new_vocab_file_name, default_value = 0)\n\n eos_id_tensor = tf.cast(vocab_table.lookup(tf.constant(EOS)), tf.int32)\n sos_id_tensor = tf.cast(vocab_table.lookup(tf.constant(SOS)), tf.int32)\n\n return Vocab(lang=language,\n table=vocab_table,\n size=vocab_size,\n reverse_dict=reverse_dictionary,\n sos_id_tensor=sos_id_tensor,\n eos_id_tensor=eos_id_tensor)", "def _create_vocab(captions):\n print(\"Creating vocabulary.\")\n min_word_count = 4\n word_counts_output_file = '/Users/lzg/Desktop/image_caption/word_count.txt'\n counter = Counter()\n for c in captions:\n counter.update(c)\n print(\"Total words:\", len(counter))\n\n # Filter uncommon words and sort by descending count.\n word_counts = [x for x in counter.items() if x[1] >= min_word_count]\n word_counts.sort(key=lambda x: x[1], reverse=True)\n print(\"Words in vocabulary:\", len(word_counts))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(word_counts_output_file, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word_counts]))\n print(\"Wrote vocabulary file:\", word_counts_output_file)\n\n # Create the vocabulary dictionary.\n reverse_vocab = [x[0] for x in word_counts]\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n # vocab = Vocabulary(vocab_dict, unk_id)\n\n return vocab_dict, unk_id", "def build_vocab(json_dir, vocab_file):\n start_time = time.time()\n print('Processing...')\n vocab = []\n no_bow = []\n json_files = [file for file in os.listdir(json_dir) if file.endswith('.json')]\n for file in json_files:\n doc_vocab = {}\n with open(os.path.join(json_dir, file), 'r') as f:\n doc = json.loads(f.read())\n doc_vocab['name'] = doc['name']\n doc_vocab['filename'] = file\n if 'bag_of_words' in doc:\n doc_vocab['term_counts'] = doc['bag_of_words']\n vocab.append(doc_vocab)\n else:\n no_bow.append(file)\n if len(no_bow) != len(json_files):\n with open(vocab_file, 'w') as f:\n f.write(json.dumps(vocab))\n print('Processed in %s seconds.' % (time.time() - start_time))\n display(HTML('<p>The vocab file was saved to ' + vocab_file + '.</p>'))\n msg = None\n if len(no_bow) > 0 and len(no_bow) < 20:\n msg = '<p style=\"color: red;\">Warning! The following file(s) could not be processed because they did not contain `bag_of_words` fields.</p>'\n msg += '<ul>'\n for item in no_bow:\n msg += '<li>' + item + '</li>'\n msg += '</ul>'\n elif len(no_bow) > 0 and len(no_bow) >= 20:\n msg = '<p style=\"color: red;\">Warning! 20 or more files could not be processed because they did not contain `bag_of_words` fields.</p>'\n if msg is not None:\n msg += '<p style=\"color: red;\">You may need to run the <a href=\"tokenize.ipynb\">tokenize</a> notebook to ensure that all your data '\n msg += 'has been tokenized. You can then try re-running this notebook.</p>'\n display(HTML(msg))", "def file_to_list(filename, dir=\"../resources\"):\n os.chdir(dir)\n vocabulary = []\n f = open(filename, \"r\")\n lines = f.readlines()\n for line in lines:\n vocabulary.append(line.replace(\"\\n\", \"\"))\n return vocabulary", "def getVocabList():\n vocab_list = []\n with open('vocab.txt') as f_obj:\n while True:\n vocab_line = f_obj.readline()\n if not vocab_line:\n break\n word = re.search(r'\\t(\\w+)', vocab_line).group(1)\n vocab_list.append(word)\n return vocab_list", "def create_vocab(input_iter, min_frequency):\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\n FLAGS.max_sentence_len,\n min_frequency=min_frequency,\n tokenizer_fn=tokenizer_fn)\n\n vocab_processor.fit(input_iter)\n return vocab_processor", "def create_vocab(df, datapath):\n if os.path.isfile(\"vocab_max_l.p\"):\n o = cPickle.load(open(\"vocab_max_l.p\", \"rb\")) # search if vocab file is already existing\n vocab = o[0]\n max_l = o[1]\n else:\n vocab = defaultdict(int)\n max_l = 0\n for d in read_data_files(df.file, datapath):\n words = clean_str(d).split(\" \")\n if len(words) > max_l:\n max_l = len(words)\n\n for w in words:\n vocab[w] += 1\n\n cPickle.dump([vocab, max_l], open(\"vocab_max_l.p\", \"wb\"))\n return vocab, max_l", "def getVocabList():\n vocabList = pd.read_csv(os.path.join(folder, 'vocab.txt'),\n delimiter='\\t',\n names=['index', 'vocab'],\n index_col='index')\n return vocabList", "def build_vocab(self, data_paths):\n\t\tfor data_path in data_paths:\n\t\t\tprint(\"Cur path: \" + data_path)\n\t\t\twith open(data_path, 'r', encoding='utf-8') as dataset:\n\t\t\t\tfor word in tqdm(dataset):\n\t\t\t\t\tword = word.strip('\\n')\n\n\t\t\t\t\tself.word_list.append(word)\n\t\t\t\t\tif self.max_length < len(word):\n\t\t\t\t\t\tself.max_length = len(word)\n\n\t\t\t\t\tfor char in word:\n\t\t\t\t\t\tif char not in self.all_table:\n\t\t\t\t\t\t\tself.all_table.append(char)\n\t\t\t\t\t\t\tself.all_ind[char] = len(self.all_table) - 1\n\t\t\t\t\t\t\tself.num_all += 1\n\n\t\tprint(self.all_table)", "def make_vocab(sent_list):\n counter = Counter()\n for sent in sent_list:\n counter.update(sent)\n\n ret = list(counter.items())\n ret.sort(key=lambda x: x[1], reverse=True)\n return ret", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def save_vocabulary(self, vocab_path):\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_NAME)\n else:\n vocab_file = vocab_path\n with open(vocab_file, 'w', encoding='utf-8') as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))\n index = token_index\n writer.write(token + u'\\n')\n index += 1\n return vocab_file", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def update_from_vocabulary(self, vocab_path):\n with open(vocab_path, 'r') as vocab_file:\n for word in vocab_file:\n word = word.strip()\n self._add_new_word(word)", "def get_word_embeddings(t, folder, lang=\"en\"):\n vecs_url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz\"\n vecs_gz_filename = vecs_url.rpartition(\"/\")[2]\n os.makedirs(folder, exist_ok=True)\n vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)\n\n tokenizer_vocab_size = len(t.vocab)\n\n if wait_for_file_stable(vecs_gz_filepath):\n print(\"Using existing embeddings file\")\n else:\n print(\"Downloading word vectors...\")\n subprocess.run([\" \".join([\"wget\", \"-NP\", folder, vecs_url])], check=True, shell=True)\n\n print(\"Loading into memory...\")\n embeddings_index = dict()\n with gzip.open(vecs_gz_filepath, \"rt\") as zipf:\n firstline = zipf.readline()\n emb_vocab_size, emb_d = firstline.split(\" \")\n emb_vocab_size = int(emb_vocab_size)\n emb_d = int(emb_d)\n for line in zipf:\n values = line.split()\n word = values[0]\n # Only load subset of the embeddings recognised by the tokenizer:\n if word in t.vocab.stoi:\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_index[word] = coefs\n print(\"Loaded {} of {} word vectors for tokenizer vocabulary length {}\".format(\n len(embeddings_index),\n emb_vocab_size,\n tokenizer_vocab_size,\n ))\n\n # create a weight matrix for words in training docs\n embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))\n for word, i in t.vocab.stoi.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def load_corpus():\n # Define directory structure\n parent_path = os.getcwd() + '/'\n corpus_path = parent_path + 'corpus_data/'\n corpus_name = corpus_path + 'train_corpus_vocab.pickle'\n # Load corpus vocabulary\n with open(corpus_name, 'rb') as handle:\n train_vocab = pickle.load(handle)\n return(corpus_path, train_vocab)", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def write_vocabulary():\n with open('../data/vocabulary.txt', 'w') as vocabulary_file:\n vocabulary = generate_vocabulary()\n word_count = sum(vocabulary.values())\n print(word_count)\n vocabs_str = [(\"%s %d\" % (key, value)) for key, value in vocabulary.items()]\n vocabulary_file.write('\\n'.join(vocabs_str))", "def vocabulary(corpus_tokenized):\n vocab = list()\n for element in corpus_tokenized:\n document = element['document']\n for word in document:\n if word not in vocab:\n vocab.append(word)\n return vocab", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def get_vocab(train_data, valid_data, test_data):\n \n print(\"-----------------------------------------------\")\n print(\"Constructing Vocabulary of Words and Characters\")\n print(\"-----------------------------------------------\")\n\n with open(train_data,'r') as f:\n train_corpus = f.readlines()\n f.close()\n\n with open(valid_data,'r') as f:\n valid_corpus = f.readlines()\n f.close()\n\n with open(test_data,'r') as f:\n test_corpus = f.readlines()\n f.close()\n\n word_vocab = {}\n char_vocab = {}\n max_len = 0\n\n word_vocab, char_vocab, max_len = make_vocab(train_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(valid_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(test_corpus, word_vocab, char_vocab, max_len)\n\n char_vocab['<SOT>'] = len(char_vocab)+1 \n char_vocab['<EOT>'] = len(char_vocab)+1\n\n print(\"Word Vocabulary Size : %d\"%len(word_vocab))\n print(\"Character Vocabulary Size : %d\"%len(char_vocab))\n print(\"Max Length of Word - 2 : %d\"%max_len)\n\n return word_vocab, char_vocab, max_len", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def get_glove_vocab(filename):\n print(\"Building vocab...\")\n vocab = set()\n with open(filename, encoding='utf-8') as f:\n for line in f:\n # print(line.split(' ')[0])\n word = line.strip().split(' ')[0]\n vocab.add(word)\n print(\"- done. {} tokens\".format(len(vocab)))\n return vocab", "def saveVocabulary(self, filepath, vocabulary):\n\t\timport numpy as np\n\t\tnp.save( filepath, list(vocabulary.items()) )\n\t\tprint ('\\tVocabulary saved in: {}'.format(filepath))", "def _make_vocab_files(self):\n self.logger.info('making question vocab...' + self.opt.QUESTION_VOCAB_SPACE)\n qdic, _ = self.load_data(self.opt.QUESTION_VOCAB_SPACE)\n question_vocab = VQADataProvider.make_question_vocab(qdic, self.max_length)\n self.logger.info('making answer vocab...' + self.opt.ANSWER_VOCAB_SPACE)\n qdic, adic = self.load_data(self.opt.ANSWER_VOCAB_SPACE)\n answer_vocab = VQADataProvider.make_answer_vocab(adic, qdic, self.opt.MAX_ANSWER_VOCAB_SIZE, self.use_ocr)\n return question_vocab, answer_vocab", "def load_words_from_file(path, voc_path=None):\n label_to_idx = {}\n dict_size = 0\n label_ids = []\n with open(path, \"r\") as fin:\n for label in fin:\n if label not in label_to_idx:\n label_to_idx[label] = dict_size\n dict_size += 1\n label_ids.append(label_to_idx[label])\n if voc_path:\n with open(voc_path, \"w+\") as fout:\n json.dump(label_to_idx, fout)\n return torch.tensor(label_ids)", "def save_vocabulary(self):\n out_vocab_file = 'xlnet_vocab.txt'\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)", "def gen_vocab(targets, fname):\n\n\tpath = os.path.join(\"data\", fname)\n\tif not os.path.isfile(path):\n\t\tworddict, wordcount = vocab.build_dictionary(targets)\n\t\tvocab.save_dictionary(worddict, wordcount, path)\n\n\treturn path", "def save_vocabulary(self, save_dir: str) -> None:\n vocab_f: str = os.path.join(save_dir, 'vocab.tsv')\n with open(vocab_f, 'w') as ofile:\n for i, word_type in enumerate(self.get_instruction_vocabulary()):\n ofile.write(str(i) + '\\t' + word_type + '\\n')", "def load_vocab(self):\n\n if self.vocabulary_path: \n # For now, the file format is derived from the file extension.\n if self.vocabulary_path.endswith('csv'):\n self.logger.info(\"Filter spymaster vocabulary by csv-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n reader = csv.reader(fin)\n header = next(reader)\n for row in reader:\n word = row[1].lower()\n self.update_vocab(word) \n elif self.vocabulary_path.endswith('txt'):\n self.logger.info(\"Filter spymaster vocabulary by txt-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n for line in fin:\n word = line.strip()\n self.update_vocab(word)\n else:\n raise ValueError(\"Unknown file format for filter spymaster vocabulary.\") \n else:\n self.logger.info(\"Load spymaster vocabulary from gensim.models.KeyedVectors.\")\n self.vocab = self.model.vocab\n self.vocab_size = len(self.vocab)\n\n self.logger.info(\"Spymaster vocabulary size is {}\".format(self.vocab_size))", "def build(corpus: List[List[str]], size=5000, freq_cutoff=5):\n vocab = VocabEntry()\n word2freq = Counter(chain(*corpus))\n word2freq = {word: freq for word, freq in word2freq.items() if freq > freq_cutoff}\n words_selected = sorted(word2freq.keys(), key=lambda w: word2freq[w], reverse=True)[:size]\n for w in words_selected:\n vocab.add(w)\n print(\"vocabulary constructing completed, %d/%d words included......\" % (len(words_selected), len(word2freq)))\n return vocab", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def tokenize(self, path, training_set=False):\n assert os.path.exists(path)\n with open(path, encoding='utf8') as fin:\n num_lines = sum(1 for _ in fin.readlines())\n with open(path, 'r', encoding=\"utf8\") as f:\n words = []\n for i, line in enumerate(tqdm(f, total=num_lines)):\n if self.max_lines > 0 and i > self.max_lines:\n break\n line = line.strip()\n if not line:\n continue # Skip empty lines.\n elif line.startswith('='):\n continue # Skip headers.\n else:\n sentence = (self.order - 1) * [SOS] + \\\n [process(word, self.lower) for word in line.split()] + [EOS]\n if training_set:\n words.extend(sentence)\n self.vocab.update(sentence)\n else:\n sentence = [word if word in self.vocab else UNK for word in sentence]\n words.extend(sentence)\n return words", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def hload_vocab(vocab_path):\n vocab = collections.OrderedDict()\n index = 0\n with open(vocab_path, \"r\", encoding=\"utf-8\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab", "def load_vocab(path: str) -> Vocab:\n return torch.load(path, map_location=lambda storage, loc: storage)['args'].vocab", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def create_vocab_tables(vocab_file):\n vocab_table = lookup_ops.index_table_from_file(\n vocab_file, default_value=0)\n return vocab_table", "def get_vocabulary(corpus,\n initial_vocab={\n '<unk>': 0,\n '<sssss>': 1\n },\n vocabsize=0):\n vocab = copy.copy(initial_vocab)\n word_count = Counter()\n for text in corpus:\n for w in text.split(' '):\n word_count[w] += 1\n\n # if vocabulary size is specified, most common words are selected\n if vocabsize > 0:\n for w in word_count.most_common(vocabsize):\n if w[0] not in vocab:\n vocab[w[0]] = len(vocab)\n if len(vocab) >= vocabsize:\n break\n else: # all observed words are stored\n for w in word_count:\n if w not in vocab:\n vocab[w] = len(vocab)\n return vocab", "def get_vocab(trainingData):\r\n return set(reduce(lambda x,y: x+y, map(lambda x: map(lambda y: y[0], x), trainingData), []))", "def load_training():\n for can in candidates:\n trainings[can] = []\n for subdir, dirs, files in os.walk(os.path.join(corpus_dir, can)):\n for doc in files:\n trainings[can].append(doc)", "def load_vocab(fn):\n return corpora.Dictionary.load(fn)", "def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)", "def load_target_vocab(self):\n vocab = [line.split()[0] for line in open(os.path.join('preprocessed', 'all_vocab.txt'), 'r').read().splitlines()]\n self.word2idx = {word: idx for idx, word in enumerate(vocab)}\n self.idx2word = {idx: word for idx, word in enumerate(vocab)}\n self.vocab_size = len(self.word2idx)", "def create_vocabulary(\n data: Series,\n tokenizer_type: str = \"space\",\n lowercase: bool = True,\n num_most_frequent: int = None,\n vocab_file: str = None,\n add_special_symbols: bool = True,\n unknown_symbol: str = UNKNOWN_SYMBOL,\n padding_symbol: str = PADDING_SYMBOL,\n start_symbol: str = START_SYMBOL,\n stop_symbol: str = STOP_SYMBOL,\n pretrained_model_name_or_path: str = None,\n ngram_size: Optional[int] = None,\n compute_idf: bool = False,\n processor: DataFrameEngine = PANDAS,\n) -> Vocabulary:\n vocab = None\n\n tokenizer = get_tokenizer_from_registry(tokenizer_type)(\n vocab_file=vocab_file,\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n ngram_size=ngram_size,\n )\n\n # Pre-trained huggingface tokenizer. Use the pre-existing vocabulary and special symbols.\n if tokenizer_type == \"hf_tokenizer\":\n try:\n vocab = tokenizer.get_vocab()\n vocab = list(vocab.keys())\n except NotImplementedError:\n logger.warning(\n \"HuggingFace tokenizer does not have a get_vocab() method. \"\n + \"Using tokenizer.tokenizer.vocab_size and tokenizer.tokenizer._convert_id_to_token \"\n + \"to build the vocabulary.\"\n )\n vocab = []\n for idx in range(tokenizer.tokenizer.vocab_size):\n vocab.append(tokenizer.tokenizer._convert_id_to_token(idx))\n vocab += tokenizer.tokenizer.added_tokens_encoder.keys()\n\n pad_token = tokenizer.get_pad_token()\n unk_token = tokenizer.get_unk_token()\n\n if unk_token is None:\n logger.warning(\n \"No unknown token found in HuggingFace tokenizer. Adding one. \"\n + \"NOTE: This will change the vocabulary size and may affect model \"\n + \"performance, particularly if the model weights are frozen.\"\n )\n vocab = [unknown_symbol] + vocab\n else:\n unknown_symbol = unk_token\n\n if pad_token is None and add_special_symbols:\n logger.warning(\n \"No padding token found in HuggingFace tokenizer. Adding one. \"\n + \"NOTE: This will change the vocabulary size and may affect model \"\n + \"performance, particularly if the model weights are frozen.\"\n )\n vocab = [padding_symbol] + vocab\n else:\n padding_symbol = pad_token\n elif hasattr(tokenizer, \"get_vocab\"):\n vocab = tokenizer.get_vocab()\n vocab = list(vocab.keys())\n elif vocab_file is not None:\n vocab = load_vocabulary(vocab_file)\n\n def process_line(line):\n return tokenizer(line.lower() if lowercase else line)\n\n processed_lines = processor.map_objects(data, process_line)\n processed_counts = processed_lines.explode().value_counts(sort=False)\n processed_counts = processor.compute(processed_counts)\n unit_counts = Counter(dict(processed_counts))\n\n doc_unit_counts = None\n if compute_idf:\n # The document frequency used for TF-IDF. Similar to unit_counts, but de-duped by document.\n document_counts = processed_lines.map(lambda x: set(x)).explode().value_counts(sort=False)\n document_counts = processor.compute(document_counts)\n doc_unit_counts = Counter(dict(document_counts))\n\n line_length_max = processor.compute(processed_lines.map(len).max())\n line_length_99ptile = processor.compute(processed_lines.map(len).quantile(0.99))\n\n if vocab is None:\n vocab = [unit for unit, _ in unit_counts.most_common(num_most_frequent)]\n\n vocab_set = set(vocab)\n\n if tokenizer_type != \"hf_tokenizer\":\n if add_special_symbols:\n add_or_move_symbol(vocab, vocab_set, stop_symbol, SpecialSymbol.STOP.value)\n add_or_move_symbol(vocab, vocab_set, start_symbol, SpecialSymbol.START.value)\n add_or_move_symbol(vocab, vocab_set, padding_symbol, SpecialSymbol.PADDING.value)\n # Always add the UNKNOWN symbol if we're using our own tokenizer.\n add_or_move_symbol(vocab, vocab_set, unknown_symbol, SpecialSymbol.UNKNOWN.value)\n\n str2idx = {unit: i for i, unit in enumerate(vocab)}\n str2freq = {unit: unit_counts.get(unit) if unit in unit_counts else 0 for unit in vocab}\n str2idf = (\n {unit: np.log(len(vocab) / (1 + doc_unit_counts.get(unit))) if unit in doc_unit_counts else 0 for unit in vocab}\n if compute_idf\n else None\n )\n\n pad_idx = None\n if padding_symbol in str2idx.keys():\n pad_idx = str2idx[padding_symbol]\n\n return Vocabulary(\n vocab=vocab,\n str2idx=str2idx,\n str2freq=str2freq,\n str2idf=str2idf,\n line_length_max=line_length_max,\n line_length_99ptile=line_length_99ptile,\n pad_idx=pad_idx,\n padding_symbol=padding_symbol,\n unknown_symbol=unknown_symbol,\n )", "def get_ordered_vocabulary(self):\n idx_rev = dict((y, x) for x, y in self.ngram_to_idx.items())\n ordered_vocab = list(map(lambda i: idx_rev[i], range(len(self.vocab))))\n return ordered_vocab", "def load_embeddings(filepath, vocabulary, retain):\n \n word2index = dict()\n word_vectors = list()\n\n def add_entry(word, vector):\n word2index[word] = len(word2index)\n word_vectors.append(vector)\n\n model = gensim.models.KeyedVectors.load(filepath)\n\n # adding special tokens <FIL>, <UNK> and <NUM>\n dim = model.vector_size\n add_entry('<fil>', np.zeros((dim,)))\n for special in ['<unk>', '<num>']:\n vector = np.random.uniform(-0.025, 0.025, (dim,))\n add_entry(special, vector)\n\n if retain:\n for word, _ in model.vocab.items():\n add_entry(word, model[word])\n else:\n for word in vocabulary:\n if word in model:\n add_entry(word, model[word])\n\n vocabulary = vocabulary.intersection(word2index.keys())\n return word2index, np.asarray(word_vectors)", "def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def process_vocabulary(args, data, quiet=False):\n if not quiet:\n out(args.logfile, \"initializing vacabularies... \", end=\"\")\n seq_vocab = vocabulary.Vocabulary()\n bracket_vocab = vocabulary.Vocabulary()\n # loop_type_vocab = vocabulary.Vocabulary()\n\n for vocab in [seq_vocab, bracket_vocab]: # , loop_type_vocab]:\n vocab.index(START)\n vocab.index(STOP)\n for x in data[:100]:\n seq = x[\"sequence\"]\n dot = x[\"structure\"]\n # loop = x[\"predicted_loop_type\"]\n for character in seq:\n seq_vocab.index(character)\n for character in dot:\n bracket_vocab.index(character)\n # for character in loop:\n # loop_type_vocab.index(character)\n for vocab in [seq_vocab, bracket_vocab]: # , loop_type_vocab]:\n # vocab.index(UNK)\n vocab.freeze()\n if not quiet:\n out(args.logfile, \"done.\")\n\n def print_vocabulary(name, vocab):\n # special = {START, STOP, UNK}\n special = {START, STOP}\n out(args.logfile, \"{}({:,}): {}\".format(\n name, vocab.size,\n sorted(value for value in vocab.values if value in special) +\n sorted(value for value in vocab.values if value not in special)))\n\n if not quiet:\n print_vocabulary(\"Sequence\", seq_vocab)\n print_vocabulary(\"Brackets\", bracket_vocab)\n return seq_vocab, bracket_vocab", "def load_vocab(vocab_files, preserve_token=None):\n if preserve_token is None:\n preserve_token = []\n vocab = collections.OrderedDict()\n index = 0\n if preserve_token is not None:\n for token in preserve_token:\n vocab[token] = index\n index += 1\n vocab_files = vocab_files.split(\",\")\n for vocab_file in vocab_files:\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = utils.convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n if token not in vocab:\n vocab[token] = index\n index += 1\n return vocab", "def build_vocab(self, top=None):\n\n self._logger.info(\"Building vocab from corpus\")\n\n vocab = Counter()\n for list_ in self._lists:\n vocab.update(list_)\n\n self._logger.info(\"Done building vocab from corpus.\")\n\n if top is not None and top < len(vocab):\n words = sorted(vocab.items(), key=lambda x: -x[1])[:top]\n else:\n words = vocab.items()\n\n self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}", "def generate_vocabSet(original_data):\n education = convert_key(original_data, 'education')\n experience = convert_key(original_data, 'experience')\n header = convert_key(original_data, 'header')\n vocabSet = []\n vocabSet.append(education)\n vocabSet.append(experience)\n vocabSet.append(header)\n vocabSet = [y for i in vocabSet for y in i]\n vocabSet = list(set(vocabSet))\n return vocabSet" ]
[ "0.72303003", "0.7164037", "0.71459424", "0.7054131", "0.7033178", "0.700483", "0.68919396", "0.68904585", "0.68769383", "0.6866679", "0.6859989", "0.6854887", "0.6839105", "0.6836058", "0.68341655", "0.6810794", "0.6810794", "0.6805193", "0.6802589", "0.6783411", "0.6731821", "0.6689364", "0.6675953", "0.66615444", "0.6661086", "0.6641328", "0.65732366", "0.65720415", "0.6559415", "0.6511555", "0.6507885", "0.6486582", "0.6467811", "0.64378244", "0.64191747", "0.6390665", "0.63313013", "0.6312922", "0.62976044", "0.6286723", "0.6284564", "0.6280589", "0.6238675", "0.6230933", "0.6213836", "0.6206569", "0.62058645", "0.6205781", "0.6171308", "0.6152242", "0.6135649", "0.61217916", "0.6118665", "0.60660124", "0.6063942", "0.60612845", "0.60488236", "0.6038126", "0.6034356", "0.6031617", "0.6013679", "0.6008816", "0.6003596", "0.5998219", "0.598469", "0.5982821", "0.5975153", "0.5947066", "0.5947066", "0.5947066", "0.59389454", "0.59331894", "0.5919609", "0.59194845", "0.59142715", "0.5900304", "0.58993137", "0.5899087", "0.5892211", "0.58918935", "0.588498", "0.5882386", "0.58808523", "0.58789617", "0.5877157", "0.58711433", "0.5866909", "0.586009", "0.58468145", "0.5845097", "0.5842773", "0.5833744", "0.58189106", "0.58063895", "0.5796419", "0.57906735", "0.57761663", "0.5775307", "0.57706714", "0.5762425" ]
0.7643122
0
Create a single dictionary for the data
def create_bow(vocab, filepath): bow = {} # TODO: add your code here wordcount = 0 wordcountnone = 0 c = 0 for i in vocab: c+=1 with open(filepath, 'r', encoding="utf-8") as doc: ############################################### for word in doc: word = word.strip() if(c==1): if (word not in vocab): wordcountnone += 1 if(i == str(word)): wordcount += 1 #print(wordcount) if(wordcount > 0): bow[i] = wordcount wordcount = 0 if(wordcountnone != 0): bow[None] = wordcountnone return bow
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_to_create_object(self):\n return {}", "def as_dict(self) -> dict[str, Any]:\n return {\n \"type\": self.type,\n \"timestamp\": self.timestamp,\n \"data\": self.data or {},\n }", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def asdict():\n pass", "def data(self) -> dict[str, Any]:\n raise NotImplementedError()", "def make_feed_dict(self, data):\r\n raise NotImplementedError", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def as_dict(self) -> Dict[str, Any]:\n return {\"ttl\": self.ttl, \"type\": self.type.value, \"data\": self.data}", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def dict(self):\n return self.__data_dict", "def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data", "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "def get_dict(self):\n return", "def dict(self):\r\n d = {\r\n \"key\": self.field,\r\n \"value_count\": self.value_count,\r\n \"record_count\": self.record_count,\r\n \"value_ratio\": self.value_ratio,\r\n \"storage_types\": list(self.storage_types),\r\n \"null_count\": self.null_count,\r\n \"null_value_ratio\": self.null_value_ratio,\r\n \"null_record_ratio\": self.null_record_ratio,\r\n \"empty_string_count\": self.empty_string_count,\r\n \"unique_storage_type\": self.unique_storage_type\r\n }\r\n\r\n if self.distinct_overflow:\r\n d[\"distinct_overflow\"] = self.distinct_overflow,\r\n d[\"distinct_values\"] = []\r\n else:\r\n d[\"distinct_values\"] = list(self.distinct_values)\r\n\r\n return d", "def dict(self):\n return self.data", "def return_as_dictionary(self):\n out_put_dict = {}\n out_put_dict['productCode'] = self.product_code\n out_put_dict['description'] = self.description\n out_put_dict['marketPrice'] = self.market_price\n out_put_dict['rentalPrice'] = self.rental_price\n\n return out_put_dict", "def data(self) -> dict:\n raise NotImplementedError()", "def data(self):\n return dict(name=self.name, ra=self.ra, dec=self.dec, mjd=self.mjd,\n type_=self.type, cosmo= (self.cosmo.name if self.cosmo is not None else None),\n zcmb = self.zcmb, zcmb_err=self.zcmb_err)", "def _store(self):\n store_dict = {}\n store_dict.update(self._data)\n return store_dict", "def data(self, **kw):\n return dict(params=kw)", "def data(self, **kw):\n return dict(params=kw)", "def to_data(self, *, defaults = False):\n data = {}\n put_required_into(self.required, data, defaults)\n put_title_into(self.title, data, defaults)\n put_type_into(self.type, data, defaults)\n put_values_into(self.values, data, defaults)\n return data", "def to_data(self, *, defaults = False):\n return {}", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = distance_matrix.tolist()\r\n data['time_matrix'] = time_matrix.tolist()\r\n data['time_windows'] = time_windows.tolist()\r\n data['pickups_deliveries'] = pickup_deliveries.tolist()\r\n data['demands'] = demand\r\n data['num_vehicles'] = 20\r\n data['vehicle_capacities'] = [20 * i / i for i in range(1, num_vehicles+1)]\r\n data['depot'] = (2 * length) - 1\r\n return data", "def to_dictionary(self):\n new_dict = {}\n new_dict['id'] = self.id\n new_dict['size'] = self.size\n new_dict['x'] = self.x\n new_dict['y'] = self.y\n return new_dict", "def to_dict(self) -> dict:", "def get_data(self) -> dict:\n\n data = {\n 'title': self.get_title(),\n 'link' : self.get_link(),\n 'store': self.get_store_name()\n }\n\n return data", "def get_data(self):\n data = {\n \"ts\": self.drone.pos[0][0],\n \"drone\": self.drone,\n \"subject\": self.subject,\n \"peds\": self.peds, # can be None\n \"objs\": self.objs # can be None\n }\n self.empty_bag()\n return data", "def _data(self):\n if self._data_ is None:\n self._data_ = {}\n return self._data_", "def to_dict(self) -> Dict:\n return {'object_id': self.object_id, 'data_id': self.data_id}", "def get_dict(self):\n return self._data", "def _store(self):\n if self._data is not None:\n store_dict = {\"data\": ObjectTable(data={\"data\": [self._data]})}\n\n if self.f_has_range():\n store_dict[\"explored_data\"] = ObjectTable(\n data={\"data\": self._explored_range}\n )\n\n self._locked = True\n\n return store_dict", "def getData(self):\n return dict(self._dump_data)", "def make_feed_dict(data):\n raise NotImplemented()", "def get_data(self):\n return {\n \"fd\": self.get_fd_j(self.id),\n \"fd_part\": self.get_fd_part_j(self.id),\n \"config\": self.config,\n # \"config\": self.get_config_j(self.id),\n \"prp\": self.get_prp_j(self.id),\n \"alll\": self.my_alll(self.id)\n }", "def generate_dict(self):\n # verify preferred timestamp exists in the structure...\n if not self._check_preferred_timestamps():\n raise SampleException(\"Preferred timestamp not in particle!\")\n\n # build response structure\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n\n return result", "def __initializeData():\n\tdata = OrderedDict()\n\tdata['Saved_LIVE'] = False\n\tdata['Saved_POST'] = False\n\tdata['Time_Written_POST'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\tdata['Time_Written_LIVE'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\treturn data", "def generate_object_data(self):\n object_dict = {\n 'content_type' : str(self.target_object._meta),\n 'object_id' : str(self.target_object._get_pk_val()),\n }\n return object_dict", "def generate_object_data(self):\n object_dict = {\n 'content_type' : str(self.target_object._meta),\n 'object_id' : str(self.target_object._get_pk_val()),\n }\n return object_dict", "def dict(self):\n return {\"data\": self.data.dict(), \"inventory\": self.inventory.dict()}", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = mtrx.create_distance_matrix(mtrx.create_data()) \r\n data['demands'] = clean.demands\r\n # Each location has a demand corresponding to the quantity—for example, \r\n # weight or volume—of the item to be picked up.\r\n data['vehicle_capacities'] = capacity\r\n # Each vehicle has a capacity: the maximum quantity that the vehicle can hold. \r\n # As a vehicle travels along its route, the total quantity of the items it is carrying \r\n # can never exceed its capacity.\r\n data['num_vehicles'] = number\r\n data['depot'] = 0\r\n return data", "def extract_data(self):\n values = {}\n for injkey in self.data_sets.keys():\n values[injkey] = {}\n alldata = self.data_sets[injkey]\n paramkeys = alldata['params'].keys()\n for datakey in alldata.keys():\n if not datakey == 'params':\n values[injkey][datakey] = {}\n values[injkey][datakey]['metric_val'] = {}\n values[injkey][datakey]['metric_val']['vals'] = []\n for paramkey in paramkeys:\n values[injkey][datakey][paramkey] = {}\n values[injkey][datakey][paramkey]['vals'] = []\n trials = alldata[datakey]\n for trial_num in trials.keys():\n trial = trials[trial_num]\n values[injkey][datakey]['metric_val']['vals'] \\\n .append(trial['metric_val'])\n values[injkey][datakey]['metric_val']['type'] \\\n = trial['metric']\n values[injkey][datakey]['metric_val']['units'] \\\n = 'dimensionless'\n param_vals = trial['params']\n for param_name in param_vals.keys():\n val, units = self.parse_pint_string(\n pint_string=param_vals[param_name]\n )\n values[injkey][datakey][param_name]['vals'] \\\n .append(float(val))\n values[injkey][datakey][param_name]['units'] \\\n = units\n self.values = values", "def metadata(self):\n self.data_as_dict = {}\n for ele in self.data:\n self.data_as_dict[ele.name] = ele.value\n return self.data_as_dict", "def _get_gedi1b_main_data_dict(self) -> dict:\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n # Quality data\n \"degrade\": self[\"geolocation/degrade\"][:],\n \"stale_return_flag\": self[\"stale_return_flag\"][:],\n \"solar_elevation\": self[\"geolocation/solar_elevation\"][:],\n \"solar_azimuth\": self[\"geolocation/solar_elevation\"][:],\n \"rx_energy\": self[\"rx_energy\"][:],\n # DEM\n \"dem_tandemx\": self[\"geolocation/digital_elevation_model\"][:],\n \"dem_srtm\": self[\"geolocation/digital_elevation_model_srtm\"][:],\n # geolocation bin0\n \"latitude_bin0\": self[\"geolocation/latitude_bin0\"][:],\n \"latitude_bin0_error\": self[\"geolocation/latitude_bin0_error\"][:],\n \"longitude_bin0\": self[\"geolocation/longitude_bin0\"][:],\n \"longitude_bin0_error\": self[\"geolocation/longitude_bin0_error\"][:],\n \"elevation_bin0\": self[\"geolocation/elevation_bin0\"][:],\n \"elevation_bin0_error\": self[\"geolocation/elevation_bin0_error\"][:],\n # geolocation lastbin\n \"latitude_lastbin\": self[\"geolocation/latitude_lastbin\"][:],\n \"latitude_lastbin_error\": self[\"geolocation/latitude_lastbin_error\"][:],\n \"longitude_lastbin\": self[\"geolocation/longitude_lastbin\"][:],\n \"longitude_lastbin_error\": self[\"geolocation/longitude_lastbin_error\"][:],\n \"elevation_lastbin\": self[\"geolocation/elevation_lastbin\"][:],\n \"elevation_lastbin_error\": self[\"geolocation/elevation_lastbin_error\"][:],\n # relative waveform position info in beam and ssub-granule\n \"waveform_start\": self[\"rx_sample_start_index\"][:] - 1,\n \"waveform_count\": self[\"rx_sample_count\"][:],\n }\n return data", "def createDict( self ):\n d = {}\n devTup = ( 'endcap', 'comp', 'shutter','397intensity' )\n for dev in devTup:\n d[dev] = {'devChannels':{}}\n endcap = ( ( 1, 1 ), ( 2, 0 ) )\n comp = ( ( 1, 4 ), ( 2, 2 ), ( 'common', 3 ) )\n shutter = ( ( 1, 5 ), ( 2, 6 ), ( 3, 7 ) )\n intensity397 = (('397intensity',8),)\n chanTup = ( endcap, comp, shutter ,intensity397 )\n for dev, value in zip( devTup, chanTup ):\n for chanPair in value:\n d[dev]['devChannels'][chanPair[0]] = {'value':None, 'channel':chanPair[1]}\n ecRange = ( 0.0, 40.0 )\n compRange = ( -40.0, 40.0 )\n shutterRange = ( 0.0, 5.0 )\n intensity397Range = (0.0,2500.0)\n rangeTup = ( ecRange, compRange, shutterRange, intensity397Range )\n for dev, value in zip( devTup, rangeTup ): d[dev]['range'] = value\n self.dcDict = d", "def into_data(self) -> Dict[str, Any]:\n data = dict(producer=self.producer)\n if self.mtime_ns > 0:\n data[\"mtime\"] = str(_datetime_from_nanoseconds(self.mtime_ns))\n return data", "def createdictionary(bpm, extremes, duration, numbeats, time_beats):\n dict = {}\n dict[\"mean_hr_bpm\"] = bpm\n dict[\"voltage_extremes\"] = extremes\n dict[\"duration\"] = duration\n dict[\"num_beats\"] = numbeats\n dict[\"beats\"] = time_beats\n return dict", "def to_obj(self):\n return dict()", "def data_to_dict(self):\n return self.data.copy()", "def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"toppings\": self.toppings,\n \"price\": self.get_price()\n }", "def get_dic(self):\n dic = {\n 'size': self.size,\n 'bounds': self.bounds,\n 'visible': self.visible,\n 'is_static': self.is_static,\n 'options': self.options,\n 'primitive_type': self.primitive_type,\n 'constrain_ratio': self.constrain_ratio,\n 'constrain_navigation': self.constrain_navigation,\n 'framebuffer': self.framebuffer,\n # 'beforeclear': self.beforeclear,\n 'variables': self.get_variables_list(),\n 'vertex_shader': self.vertex_shader,\n 'fragment_shader': self.fragment_shader,\n }\n return dic", "def to_dict(self) -> dict:\n\n return {\n \"data\": {\n \"avg_tone\": self.average_tone,\n \"goldstein\": self.goldstein,\n \"actor_code\": self.actor_code,\n \"lat\": self.latitute,\n \"lon\": self.longitude,\n \"date\": self.timestamp.strftime(r\"%Y-%m-%d %H:%M:%S\"),\n }\n }", "def _collect_data(self):\n data = {\n \"K\": self.K,\n \"root\": self.root\n }\n return data", "def valid_data():\n return dict(\n id=str(uuid4()),\n created_at=1559933807392,\n name='my project',\n description='a good project',\n status='in-progress'\n )", "def dict(self):\n dict = {}\n dict[\"gt_mask\"] = self.gt_mask\n dict[\"instance_id\"] = self.instance_id\n dict[\"label_id\"] = self.label_id\n dict[\"instance_count\"] = self.instance_count\n dict[\"med_dist\"] = self.med_dist\n dict[\"dist_conf\"] = self.dist_conf\n return dict", "def create_data(self, prefix='prefix', num=10):\n values = {}\n with self.db.transaction() as tr:\n for i in range(num):\n key = '{}-{}'.format(prefix, i).encode('utf-8')\n value = '{}-val-{}'.format(prefix, i).encode('utf-8')\n tr[key] = values[key] = value\n tr.commit()\n return values", "def as_dict(self):\n return dict(self.as_OD())", "def _get_gedi2a_main_data_dict(self) -> dict:\n gedi_l2a_count_start = pd.to_datetime(\"2018-01-01T00:00:00Z\")\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n \"absolute_time\": (\n gedi_l2a_count_start\n + pd.to_timedelta(list(self[\"delta_time\"]), unit=\"seconds\")\n ),\n # Quality data\n \"sensitivity\": self[\"sensitivity\"][:],\n \"quality_flag\": self[\"quality_flag\"][:],\n \"solar_elevation\": self[\"solar_elevation\"][:],\n \"solar_azimuth\": self[\"solar_elevation\"][:],\n \"energy_total\": self[\"energy_total\"][:],\n # DEM\n \"dem_tandemx\": self[\"digital_elevation_model\"][:],\n \"dem_srtm\": self[\"digital_elevation_model_srtm\"][:],\n # Processing data\n \"selected_algorithm\": self[\"selected_algorithm\"][:],\n \"selected_mode\": self[\"selected_mode\"][:],\n # Geolocation data\n \"lon_lowestmode\": self[\"lon_lowestmode\"][:],\n \"longitude_bin0_error\": self[\"longitude_bin0_error\"][:],\n \"lat_lowestmode\": self[\"lat_lowestmode\"][:],\n \"latitude_bin0_error\": self[\"latitude_bin0_error\"][:],\n \"elev_lowestmode\": self[\"elev_lowestmode\"][:],\n \"elevation_bin0_error\": self[\"elevation_bin0_error\"][:],\n \"lon_highestreturn\": self[\"lon_highestreturn\"][:],\n \"lat_highestreturn\": self[\"lat_highestreturn\"][:],\n \"elev_highestreturn\": self[\"elev_highestreturn\"][:],\n } | {f\"rh{i}\": self[\"rh\"][:, i] for i in range(101)}\n return data", "def create_data_model():\n data = {}\n data['distance_matrix'] = transit_c\n data['post'] = pospt_c\n data['fixed_cost'] = fc*1000\n data['demands'] = total_demand\n data['vehicle_capacities'] = capacity_list_function(routes,S)\n data['time_capacities'] = time_list_function(routes,Tmax)\n data['num_vehicles'] = routes+1\n data['depot'] = 0\n return data", "def create_data_model():\n data = {}\n # Locations in block units\n data['locations'] = [\n ] # yapf: disable\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data", "def create(data, next=None):\n return {'data': data, 'next': next}", "def create_data_record(self, data_dict):\n source_dict = deepcopy(data_dict)\n assert not self.is_conflicting_keys(data_dict,\n self.default_values), \"Conflicting keys between default_values and extra_values\"\n source_dict.update(self.default_values)\n return {\n '_index': self.get_full_index(),\n '_type': 'python_log',\n '_source': source_dict\n }", "def prepare_data(data: list) -> dict:\n d = {}\n for t in data:\n d[t[0]] = read_text(t[1])\n return d", "def toDict(self):\n\n aDict = {}\n\n # Required Keys\n try:\n aDict[self.E0_KEY] = self.e0.toDict()\n aDict[self.E1_KEY] = self.e1.toDict()\n aDict[self.E2_KEY] = self.e2.toDict()\n aDict[self.MAXIMUM_HORIZONTAL_KEY] = self.maximumHorizontalProjection\n aDict[self.MAXIMUM_VERTICAL_KEY] = self.maximumVerticalProjection\n aDict[self.EQUIVALENT_HORIZONTAL_KEY] = self.equivalentHorizontalRadius\n\n except (NameError, AttributeError) as e:\n print(\"Missing required data error: %s\" % e)\n\n return aDict", "def createDictionary(self):\n\t\tdictionary: dict = {}\n\t\tdictionary.update({'deckname': self.mDeckName})\n\t\tdictionary.update({'filename': self.autoFilename})\n\t\tdictionary.update({'creatorname': str(self.mCreatorname)})\n\t\tdictionary.update({'maxAttrPoints': str(self.mMaxAttributePoints)})\n\t\tminionListDict: dict = {}\n\t\tfor minion in self.mMinionSet:\n\t\t\tminionDict: dict = {}\n\t\t\tminionDict.update({'minionName': str(minion.mMinionName)})\n\t\t\tminionDict.update({'attack': str(minion.mAttackPoints)})\n\t\t\tminionDict.update({'hp': str(minion.mHealthPoints)})\n\t\t\tskillList: list = minion.mSkills\n\t\t\tskillNames: list = []\n\t\t\tfor skill in skillList:\n\t\t\t\tskillNames.append(skill.mSkillName)\n\t\t\tminionDict.update({'skills': skillNames})\n\t\t\tminionListDict.update({minion.mMinionName: minionDict})\n\t\tdictionary.update({'minions': minionListDict})\n\t\tdictionary.update({'id' : hash(str(dictionary))}) # TODO LPO: let DB handle that\n\t\tself.mDeckDict = dictionary\n\t\treturn dictionary", "def _full_mapping(self, data):\r\n x = self._empty_mapping()\r\n for key, value in data.items():\r\n x[key] = value\r\n return x", "def get_data_to_update_object(self):\n return {}", "def _dictobj(self):\n\n theDict = {\"__table__\":\"Room\",\n \"id\":1,\n \"name\":\"Test Room\",\n \"roomTypeId\" : 1 }\n return theDict", "def dict() -> Dict[str, Pin]:", "def init_data(stats_list):\n\n data = {stats_name: {} for stats_name in stats_list}\n return data", "def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict", "def get_dict(self):\n new_source_data = self.data.to_dict(orient=\"list\")\n new_source_data[\"index\"] = self.data.index\n for k in list(new_source_data):\n if isinstance(k, tuple):\n new_source_data[\"_\".join(k)] = new_source_data.pop(k)\n\n return new_source_data", "def _store(self):\n store_dict = {}\n for key in self._data:\n val = self._data[key]\n if SparseParameter._is_supported_matrix(val):\n serial_string = SparseParameter._serialize_matrix(val)\n store_dict[\"%s%s\" % (key, SparseParameter.IDENTIFIER)] = serial_string\n else:\n store_dict[key] = val\n\n return store_dict", "def __load(self) -> Dict:\n return dict()", "def AsDict(self):\n data = {}\n if self.id:\n data['id'] = self.id\n if self.created_at:\n data['created_at'] = self.created_at\n if self.sender_id:\n data['sender_id'] = self.sender_id\n if self.sender_screen_name:\n data['sender_screen_name'] = self.sender_screen_name\n if self.recipient_id:\n data['recipient_id'] = self.recipient_id\n if self.recipient_screen_name:\n data['recipient_screen_name'] = self.recipient_screen_name\n if self.text:\n data['text'] = self.text\n return data", "def to_dict(self):\n return {}", "def getDataDict(self):\n # Used to compare data in MATLAB\n d = {'Vm': self.r_Vm,\n 'Va': self.r_Va,\n 'BusName': self.Busnam,\n 'BusNum': self.Extnum,\n }\n return d", "def _empty_data(self):\n return {\n \"info\": {\n \"root_cache_dir\": self._get_default_cache_dir(),\n \"root_downloads_dir\": self._get_default_downloads_dir(),\n },\n \"dataset\": {},\n \"category\": {}\n }", "def npdict(self):\n\n d = {}\n\n # per profile\n d['cruise'] = self.cruise()\n d['day'] = self.day()\n d['latitude'] = self.latitude()\n d['latitude_unc'] = self.latitude_unc()\n d['longitude'] = self.longitude()\n d['longitude_unc'] = self.longitude_unc()\n d['month'] = self.month()\n d['n_levels'] = self.n_levels()\n d['primary_header_keys'] = self.primary_header_keys()\n d['probe_type'] = self.probe_type()\n d['time'] = self.time()\n d['uid'] = self.uid()\n d['year'] = self.year()\n d['PIs'] = self.PIs()\n d['originator_station'] = self.originator_station()\n d['originator_cruise'] = self.originator_cruise()\n d['originator_flag_type'] = self.originator_flag_type()\n d['t_metadata'] = self.t_metadata()\n d['s_metadata'] = self.s_metadata()\n # per level\n d['s'] = self.s()\n d['s_unc'] = self.s_unc()\n d['s_level_qc'] = self.s_level_qc()\n d['s_profile_qc'] = self.s_profile_qc()\n d['s_qc_mask'] = self.s_qc_mask()\n d['t'] = self.t()\n d['t_unc'] = self.t_unc()\n d['t_level_qc'] = self.t_level_qc()\n d['t_profile_qc'] = self.t_profile_qc()\n d['t_qc_mask'] = self.t_qc_mask()\n d['z'] = self.z()\n d['z_unc'] = self.z_unc()\n d['z_level_qc'] = self.z_level_qc()\n d['oxygen'] = self.oxygen()\n d['phosphate'] = self.phosphate()\n d['silicate'] = self.silicate()\n d['pH'] = self.pH()\n d['p'] = self.p()\n\n return d", "def make():\n data = {}\n data.update({'earth' : {'description': 'Planet with 20% O2 with 75% of surface covered by H2O. Humans inhabitants enjoy both of these aspects.',\n 'order' : 1,\n 'type': 'planet',\n }})\n \n return data", "def to_dict(self):\n return self._data_dict", "def get_structure(self):\n main = {}\n for line in self.load():\n match = re.match('^\\s*([A-Za-z0-9_]+)(\\((\\d+)\\))?=(.*)$', line)\n if match:\n key = match.group(1)\n index = match.group(3)\n value = match.group(4)\n if index is None:\n main[key] = self.parse_data_value(value)\n else:\n if key not in main:\n main[key] = []\n main[key].append(self.parse_data_value(value))\n #else:\n # print(line)\n return main", "def create_data_model():\n data = {}\n data['distance_matrix'] =[]\n data['num_vehicles'] = 1\n data['depot'] = 0\n data['demands'] = [0, 1, 1, 2, 4, 2, 4, 8, 8, 1, 2, 1, 2, 4, 4, 8, 8]\n data['vehicle_capacities'] = [15, 15, 15, 15]\n return data", "def get_entry_dict(self):\n\n # generating thumbnail URLs is slow, so only generate the ones\n # that will definitely be used.\n ret = {\n 'id': self.id,\n 'vertices': self.vertices,\n 'triangles': self.triangles,\n 'segments': self.segments,\n 'photo': self.photo.get_entry_dict(),\n }\n if self.dominant_rgb0:\n ret['dominant_rgb0'] = self.dominant_rgb0\n #if self.image_pbox:\n #ret['pbox'] = self.pbox\n #ret['image_pbox'] = {\n #'300': self.image_pbox_300.url,\n #'512': self.image_pbox_512.url,\n #'1024': self.image_pbox_1024.url,\n #'orig': self.image_pbox.url,\n #}\n if self.image_bbox:\n ret['image_bbox'] = {\n #'512': self.image_bbox_512.url,\n '1024': self.image_bbox_1024.url,\n #'orig': self.image_bbox.url,\n }\n return ret", "def to_dict(self):", "def as_dict(self):\n item = {}\n item['data'] = self.data\n item['created'] = str(self.created)\n item['tags'] = list(self.tags)\n item['path'] = str(self.path)\n\n #TODO\n #is item equivalent to a json.loads(json.dumps(self)) ???\n\n return item", "def to_dict(self) -> Dict[str, Union[str, Number, dict, list]]:\n model = dict()\n model[\"name\"] = self.get_model_name()\n model[\"allocation_paradigm\"] = self.allocation_paradigm.name\n model[\"cpu_count\"] = self.cpu_count\n model[\"time_range\"] = self.time_range.to_dict()\n model[\"hydrofabric_data_id\"] = self.hydrofabric_data_id\n model[\"hydrofabric_uid\"] = self.hydrofabric_uid\n model[\"config_data_id\"] = self.config_data_id\n model[\"bmi_config_data_id\"] = self._bmi_config_data_id\n if self.catchments is not None:\n model[\"catchments\"] = self.catchments\n if self.partition_cfg_data_id is not None:\n model[\"partition_config_data_id\"] = self.partition_cfg_data_id\n\n return {\"model\": model, \"session-secret\": self.session_secret}", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def getData(self):\n data = {}\n data.update ( self._original_values, self._modified_values )\n return data", "def get_data(self):\n return {\n self.PRODUCT_RANGE_ID: self.product_id,\n self.SALES_CHANNEL_ID: self.SALES_CHANNEL_ID_VALUE,\n }", "def data(request, data_pandas, data_pandas_truth, data_binary, data_binary_truth, data_tracks,\n data_tracks_no_detections, data_tracks_truth):\n return {\"pandas\": data_pandas,\n \"pandas_truth\": data_pandas_truth,\n \"binary\": data_binary,\n \"binary_truth\": data_binary_truth,\n \"tracks\": data_tracks,\n \"tracks_nd\": data_tracks_no_detections,\n \"tracks_truth\": data_tracks_truth}[request.param]", "def get_wiki_data() -> Dict:\n return wiki_data.copy()", "def to_dictionary(self):\n dict_contents = [\"id\", \"size\", \"x\", \"y\"]\n new_dict = {}\n for key in dict_contents:\n new_dict[key] = getattr(self, key)\n return new_dict", "def to_dict(self):\n return dict(self.__data)", "def form_dictionary_by_crystal(data_obj) -> dict:\n if isinstance(data_obj, Crystal):\n ddict = data_obj.get_dictionary()\n else:\n ddict = {}\n \n return ddict", "def creer_dictionnaire_vide():\n dico = {}\n return dico", "def data_for_question(self, question_type):\n\t\treturn {}", "def prep_data(data: list):\n book = {\n 'title': data['title'],\n 'authors': [],\n 'categories': []\n }\n try:\n for author in data['authors']:\n author_obj, created = Author.objects.get_or_create(name=author)\n book['authors'].append(author_obj.id)\n except KeyError:\n pass\n try:\n for category in data['categories']:\n category_obj, created = Category.objects.get_or_create(name=category)\n book['categories'].append(category_obj.id)\n except KeyError:\n pass\n book['published_date'] = data.get('publishedDate', None)\n book['average_rating'] = data.get('averageRating', None)\n book['ratings_count'] = data.get('ratingsCount', None)\n try:\n book['thumbnail'] = data['imageLinks']['thumbnail']\n except KeyError:\n book['thumbnail'] = None\n return book", "def _get_base_dict(self):\n res = dict(\n task=self._task,\n timestamp=self._timestamp,\n metric=self._metric,\n variant=self._variant\n )\n if self._iter is not None:\n res.update(iter=self._iter)\n if self._model_event is not None:\n res.update(model_event=self._model_event)\n return res", "def dict(self) -> Dict:\r\n return super().dict()" ]
[ "0.74594545", "0.6839548", "0.675466", "0.66805357", "0.666756", "0.6639752", "0.6629121", "0.65938157", "0.658765", "0.6585759", "0.6554827", "0.6547288", "0.65392995", "0.65266854", "0.651302", "0.64879346", "0.6485962", "0.64473516", "0.6439832", "0.6439223", "0.64320755", "0.64320755", "0.64267683", "0.6416165", "0.6370147", "0.6369755", "0.6367662", "0.6364883", "0.6364721", "0.63483065", "0.63479507", "0.6338648", "0.6329674", "0.63216233", "0.6320251", "0.631986", "0.63103443", "0.6304187", "0.6298534", "0.6298534", "0.6297853", "0.62935084", "0.6289767", "0.628889", "0.62880707", "0.6272372", "0.626856", "0.62497383", "0.6240286", "0.6233794", "0.6233615", "0.6204262", "0.6204223", "0.62028855", "0.6201942", "0.6199313", "0.6192279", "0.6192026", "0.617439", "0.6167972", "0.6160324", "0.6158936", "0.6158666", "0.6156649", "0.6147866", "0.61467606", "0.61404914", "0.61396915", "0.61395884", "0.61380196", "0.6134366", "0.6130642", "0.6127317", "0.61205214", "0.6107579", "0.6098343", "0.60976595", "0.6096748", "0.60963553", "0.60948586", "0.6094716", "0.6088623", "0.608745", "0.60849303", "0.6082986", "0.6082641", "0.6081364", "0.6080289", "0.60797596", "0.6075249", "0.60698706", "0.6064461", "0.6064098", "0.606382", "0.6061124", "0.60565996", "0.6055036", "0.60546005", "0.60541975", "0.6047233", "0.6046204" ]
0.0
-1
return the prior probability of the label in the training set => frequency of DOCUMENTS
def prior(training_data, label_list): smooth = 1 # smoothing factor logprob = {} # TODO: add your code here numfile1 = 0 numfile2 = 0 for dic in training_data: if(dic["label"] == label_list[0]): numfile1 += 1 elif(dic["label"] == label_list[1]): numfile2 += 1 numtotal = numfile1 + numfile2 prob1 = (numfile1+smooth)/(numtotal+2) prob2 = (numfile2 + smooth) / (numtotal + 2) logprob[label_list[0]] = math.log(prob1) logprob[label_list[1]] = math.log(prob2) return logprob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def prior(self, c, labeled):\n return log(len(labeled[c])/self.N_features)", "def _predict_doc(self, x, flag):\n\n if flag == 1:\n denom = self.X.num_positive()\n else:\n denom = self.X.num_negative()\n denom += self.X.vocab_size()\n\n # multiply word probabilities for all words in x\n words = tokenize(x)\n # prob = 1.0\n # for word in words:\n # wi = self._doc_count_for_word(word, flag=flag)\n # # utilize the Laplace Smooth\n # prob *= ((float(wi)+1.0) / (float(denom)+2.0))\n\n prob = math.log(self.X.priors[str(flag)])\n for word in words:\n wi = self._doc_count_for_word(word, flag=flag)\n # utilize the Laplace Smooth\n prob += math.log((float(wi)+1.0) / (float(denom)+2.0))\n\n # prob *= math.log(self.X.priors[str(flag)])\n\n return prob", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def doc_prob(self, doc, cat):\n features = self.get_features(doc) \n # Multiply the probabilities of all the features together\n p = Decimal(1)\n for f in features:\n p *= Decimal(str(self.weighted_prob(f, cat, self.feature_prob))) \n return p", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def predict(self, doc):\n \n prob_positive = self._predict_doc(doc, 1)\n prob_negative = self._predict_doc(doc, 0)\n\n if prob_positive > prob_negative:\n return 1\n return 0", "def perplexity(self, corpus):\n l = 0\n total_word_count = 0\n for sentence in corpus :\n l += self.sentence_logprob(sentence)\n # 2 extra START tokens and 1 extra STOP token\n total_word_count += len(sentence)\n l /= total_word_count\n return math.pow(2, -l)", "def learn(self, docs, labels, alpha=1.0):\n assert len(docs)==len(labels)\n labelCounts = {l: 0 for l in self.CLASSES}\n wordCounts = {l: Counter() for l in self.CLASSES}\n totalWordCounts = {l: 0 for l in self.CLASSES}\n # iterate over documents in order to record\n for i in range(0, len(labels)):\n # count(y) in labelCounts\n l = labels[i]\n labelCounts[labels[i]] +=1\n # count(y,w) for all words in totalWordCounts\n totalWordCounts[labels[i]] += len(docs[i])\n words = docs[i]\n # count(y,word) in wordCounts,\n \n for word in words:\n wordCounts[labels[i]][word] += 1\n # and to store the training vocabulary in self.trainVocab\n self.trainVocab.add(word)\n # compute and store prior distribution over classes\n # (unsmoothed) in self.priorProbs\n print(\"Label,priorProbs,Label Count\", file=sys.stderr)\n for l in self.priorProbs:\n self.priorProbs[l] = np.divide(labelCounts[l], len(labels))\n print(l +\",\"+str(self.priorProbs[l])+\",\"+str(labelCounts[l]), file=sys.stderr) #This was for part one\n for word in self.trainVocab: \n self.likelihoodProbs[l][word] = np.divide(wordCounts[l][word]+self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n self.likelihoodProbs[l]['**OOV**'] = np.divide(self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n # Sanity checks--do not modify\n assert len(self.priorProbs)==len(self.likelihoodProbs)==len(self.CLASSES)>2\n assert .999 < sum(self.priorProbs.values()) < 1.001\n for y in self.CLASSES:\n assert .999 < sum(self.likelihoodProbs[y].values()) < 1.001,sum(self.likelihoodProbs[y].values())\n assert 0 <= self.likelihoodProbs[y]['**OOV**'] < 1.0,self.likelihoodProbs[y]['**OOV**']", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def calculate_prior_probability(y):\n unique, counts = np.unique(y, return_counts=True)\n u_c = dict(zip(unique, counts))\n instances = len(y)\n for u in u_c:\n u_c[u] = float(u_c[u] / instances)\n return u_c", "def compute_class_freqs(gen):\r\n labels = gen.labels\r\n N = labels.shape[0]\r\n positive_frequencies = np.sum(labels, axis=0) / N\r\n negative_frequencies = np.sum(1 - labels, axis=0) / N\r\n return positive_frequencies, negative_frequencies", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass", "def assign_labels_first_freq(document, label_encoder):\n for sentence in document.sentences:\n for word in sentence.words:\n labels = [\n label_encoder.inv_label_map[np.argmax(p[1])]\n for p in word.tokens[0].predictions\n ]\n word.predicted_label = most_common(labels)", "def classify(self, document):\n f_vector = self.extract_f_vector(document)\n f_vector = np.append(f_vector, np.array([1])) # adding last \"feature\" for prior log probability\n all_log_prob = self.my_model[\"all_log_prob\"]\n sum_of_probabilities = f_vector.dot(all_log_prob)\n index = np.argmax(sum_of_probabilities)\n return self.my_model[\"col_to_label\"][index]", "def predict(self, docs):\n positivecount = 0\n negativecount = 0\n doc = 0\n for i in range(len(docs)):\n predictpos = np.log(self.priorpos)\n predictneg = np.log(self.priorneg)\n for x in range(len(docs[0])):\n num = docs[i][x]\n if num != 0 and x < len(self.condpos):\n predictpos += np.log(self.condpos[x])\n predictneg += np.log(self.condneg[x])\n print(\"doc # %d\" % doc)\n if predictpos > predictneg:\n positivecount += 1\n print(\"POSITIVE prediction\")\n else:\n negativecount += 1\n print(\"NEGATIVE prediction\")\n doc += 1\n totalcount = positivecount + negativecount\n return totalcount, positivecount, negativecount", "def PredictiveDist(self, label='pred'):\n # TODO: fill this in\n lam = 1\n pred = thinkbayes2.MakePoissonPmf(lam, 15)\n return pred", "def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score", "def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)", "def learn_prior(file_name, pseudo_count=0):\n true_count = pseudo_count\n false_count = pseudo_count\n with open(file_name) as in_file:\n training_examples = [tuple(row) for row in csv.reader(in_file)]\n \n for i in training_examples:\n if i[-1] == str(\"1\"):\n true_count += 1\n elif i[-1] == str(\"0\"):\n false_count += 1\n \n result = true_count / (true_count + false_count)\n \n return result", "def p_word_given_label(vocab, training_data, label):\n\n smooth = 1 # smoothing factor\n word_prob = {}\n # TODO: add your code here\n total_word = 0\n\n word_prob[None] = 0\n\n\n for dic in training_data:\n\n for index0, i0 in enumerate(dic['bow']):\n if (list(dic['bow'])[index0] in word_prob):\n continue;\n word_prob[list(dic['bow'])[index0]] = 0\n #word_prob[None] = 0\n if(dic[\"label\"] == label):\n for index, i in enumerate(dic[\"bow\"]):\n if(list(dic['bow'])[index] in vocab):\n if(list(dic['bow'])[index] in word_prob):\n\n word_prob[list(dic['bow'])[index]] += dic[\"bow\"][i]\n else:\n word_prob[list(dic['bow'])[index]] = dic[\"bow\"][i]\n else:\n if(None in word_prob):\n word_prob[None] += dic[\"bow\"][i]\n else:\n word_prob[None] = 0\n\n total_word += dic[\"bow\"][i]\n #word_prob [None] = 5\n\n for h in word_prob:\n word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1)))\n\n\n return word_prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction", "def get_ngram_prob(self, label_seq):\n curr_ngram = self.all_grams\n for i in range(0, len(label_seq)):\n label = label_seq[i]\n if i == len(label_seq) - 1:\n denom = curr_ngram.get_count() + self.SMOOTHING_VALUE * 9\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # For smoothing, just add self.SMOOTHING_VALUE\n numer = curr_ngram.get_count() + self.SMOOTHING_VALUE\n return float(numer) / denom", "def estimate_probability(word, previous_n_gram, \r\n n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0):\r\n # Note : 1 . Here we are actually not considering the end token or start token as a part of a vocabulary.\r\n # 2 . Although the literature says we need to prepend the n-1 SOS tokens but in reality we are prepending n SOS tokens\r\n \r\n # convert list to tuple to use it as a dictionary key\r\n previous_n_gram = tuple(previous_n_gram)\r\n\r\n previous_n_gram_count = n_gram_counts.get(previous_n_gram, 0)\r\n \r\n\r\n denominator = float(previous_n_gram_count + (k*vocabulary_size))\r\n\r\n n_plus1_gram = previous_n_gram + (word,)\r\n \r\n\r\n n_plus1_gram_count = n_plus1_gram_counts.get(n_plus1_gram, 0)\r\n \r\n\r\n numerator = float(n_plus1_gram_count + k)\r\n\r\n probability = float(numerator/denominator)\r\n \r\n \r\n return probability", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def compute_class_freqs(labels):\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # total number of patients (rows)\n N = len(labels)\n \n positive_frequencies = np.sum(labels,axis=0)/N\n negative_frequencies = 1-positive_frequencies\n\n ### END CODE HERE ###\n return positive_frequencies, negative_frequencies", "def _prob_of_next_word(next_word, prev_word_array, review_batch):\n compare_phrase = np.append(prev_word_array, next_word)\n resized_batch = np.resize(review_batch, (len(compare_phrase)))\n count = 0\n\n for phrase in resized_batch:\n if np.array_equal(phrase, compare_phrase):\n count += 1\n\n return count / (resized_batch.shape[0] * resized_batch.shape[1])", "def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-self.omega) * p1 + self.omega * p2", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n omega = self.alpha / (doc_length + self.alpha)\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-omega) * p1 + omega * p2", "def _cal_label_distribution(self):\n if self.split == 'train':\n self.label_weights = np.zeros(21, dtype=np.float32)\n for label_list in self.label:\n self.label_weights += np.histogram(label_list, range(22))[0]\n self.label_weights /= np.sum(self.label_weights)\n print(\"per class weight:\", self.label_weights)\n self.label_weights = 1. / np.log(1.2 + self.label_weights)\n print(\"np.log(1.2 + self.label_weights):\", self.label_weights)\n elif self.split == 'test':\n self.label_weights = np.ones(21)\n return self.label_weights.tolist()", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def perplexity(self, corpus):\n M = 0\n prob = 0\n\n for line in corpus:\n M += len(line)\n M += 1 # consider \"STOP\"\n prob += self.sentence_logprob(line)\n result = 2**(-(prob/M))\n\n return result", "def calc_transmission_prob():\n counter = Counter()\n prob = defaultdict(lambda: 1.0e-7)\n\n valid_chars = set(TRAIN_LETTERS)\n text = open(train_txt_fname, 'r').read().strip()\n text = [c for c in text.replace('\\n', ' ') if c in valid_chars]\n\n for c1, c2 in zip(text, text[1:]):\n counter[(c1, c2)] += 1\n\n total = len(text)\n for k, v in counter.items():\n prob[k] = float(v) / total\n return prob", "def train_model(filename):\n counts = get_file_counts(filename)\n new_file = open(filename, \"r\")\n num_lines = 0\n for line in new_file:\n num_lines += 1 \n #number of lines in file\n return counts_to_probs(counts, num_lines)", "def feature_prob(self, f, cat):\n if self.category_count(cat) == 0:\n return 0\n # The total number of times this feature appeared in this \n # category divided by the total number of items in this category\n pfc = self.feature_count(f, cat)\n pc = self.category_count(cat)\n return float(pfc)/pc", "def cond_prob(self, token, prev_tokens=None):\n\n if not prev_tokens:\n assert self.n == 1\n prev_tokens = tuple()\n # ngram condicional probs are based on relative counts\n hits = self.count((tuple(prev_tokens)+(token,)))\n sub_count = self.count(tuple(prev_tokens))\n\n return hits / float(sub_count)", "def prob(self, w):\n return self.counts[w] / self.total_count", "def predict_one(self):\n return (self.contexted.calc_p(\"\", self.seen + \"1\") /\n float(self.contexted.calc_p(\"\", self.seen)))", "def cond_prob(self, token, prev_tokens=()):\n return float(self.count(list(prev_tokens) + [token]) + 1) / float(self.count(prev_tokens) + self._V)", "def _kneser_ney_probability(self, count: int, sequence: str,\n sequence_total_count: int) -> float:\n assert self.count_map is not None and \\\n self.n_1_gram_map is not None, 'count map or n minus 1 gram map not initialized'\n\n count_previous_and_current: Optional[int] = None\n if sequence == unseen_output or sequence not in self.n_1_gram_map:\n # did not see given sequence, default count to 1\n count_previous_and_current = 1\n else:\n count_word = len(self.n_1_gram_map[sequence])\n count_previous_and_current = sequence_total_count + count_word\n d = count - self._good_turing_new_c(count)\n # first term is the term on the left of the equation\n first_term = max([count_previous_and_current - d, 0]\n ) / float(sequence_total_count)\n\n if sequence == unseen_output:\n # if sequence is not seen, use frequency of unknown\n # lmbda = d / count * freq(unknown)\n sequence = unknown_token\n different_final_word_types: int = 0\n if sequence in self.model:\n current_sequence_data: NGramsSequence = self.model[sequence]\n different_final_word_types = len(current_sequence_data.next_count)\n # lambda is part of the second term\n lmbda = d / float(sequence_total_count) * different_final_word_types\n\n different_preceding_final_word_types: int = 0\n if sequence in self.n_1_gram_map:\n different_preceding_final_word_types = len(\n self.n_1_gram_map[sequence])\n\n num_n_grams = len(self.model)\n if num_n_grams == 0:\n return 0.\n\n # p_cont is the second part of the second term\n p_cont = float(different_preceding_final_word_types) / num_n_grams\n\n # return probability of the current sequence\n return first_term + lmbda * p_cont", "def cond_prob(self, token, prev_tokens=()):\n assert len(prev_tokens) < self._n\n if self.count(prev_tokens) == 0:\n return 0.0\n return float(self.count(list(prev_tokens) + [token])) / float(self.count(prev_tokens))", "def get_ngramlogprobs(freqdict):\n return", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def predict_proba_confidence(clf, X, y_true):\n class_labels = clf.classes_\n y_pred_proba = clf.predict_proba(X)[:,1]\n ent = [entropy(i) for i in y_pred_proba]\n\n return sum(ent)/len(ent)", "def get_proba_by_label(self, label=None):\n if self.get_count_by_label(label) == 0:\n if label == 0:\n # REMEMBER: this is a display only, not a math model, in display we sub neg from 1, so return 1 to get zero\n return 1\n else:\n return 0\n elif len(self.data) - self.get_count_by_label(-1) == 0:\n # they're all unpredictable\n return 0\n elif label is None:\n # weird case, change neg's to 1-proba, which is different than rest of display\n pos_proba = sum(d.proba for d in self.data if d.pred == 1)\n neg_proba = sum(1 - d.proba for d in self.data if d.pred == 0)\n return (pos_proba + neg_proba) / (len(self.data) - self.get_count_by_label(-1))\n else:\n return sum(d.proba for d in self.data if d.pred == label) / self.get_count_by_label(label)", "def next_word_probability(self, observation, partial_out):\n if not hasattr(self, 'prev_enc'):\n self.prev_enc = None\n self.last_text = None\n if observation['text'] != self.last_text:\n self.prev_enc = None\n self.last_text = observation.get('text')\n self.observe(observation)\n\n obs = self.observation\n obs['eval_labels'] = [' '.join(partial_out)]\n batch = self.vectorize([obs])\n self.model.eval()\n self.model.longest_label = 1 # no need to predict farther ahead\n out = self.model(\n batch[0], # xs\n ys=(batch[1] if len(partial_out) > 0 else None),\n prev_enc=self.prev_enc)\n scores, self.prev_enc = out[1], out[3]\n # scores is bsz x seqlen x num_words, so select probs of current index\n assert len(partial_out) == scores.size(1) - 1\n probs = F.softmax(scores.select(1, len(partial_out)), dim=1).squeeze().cpu()\n dist = self.probs\n for i in range(len(probs)):\n try:\n val = probs[i].item()\n except AttributeError:\n val = probs[i][0]\n dist[self.dict[i]] = val\n self.batch = batch\n return dist", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def get_lexical_generation_prob(self, word, label):\n word = word.lower()\n numer = self.SMOOTHING_VALUE\n if word in self.words_labels_counts[label] and self.words_labels_counts[label][word] != 0:\n numer += self.words_labels_counts[label][word]\n elif word in self.words_labels_counts[label]:\n numer += self.words_labels_counts[label][self.UNKNOWN_TOKEN]\n denom = self.label_counts[label] + self.SMOOTHING_VALUE * self.all_grams.get_count()\n return float(numer) / denom", "def predict(self, X):\r\n return 1 if self.predict_prob(X) > 0.5 else 0", "def get_relative_frequency_of_verbs(self): \n number_verbs_in_blob = count_words_in_blob_if_tag_meets_criteria(\n self.blob, is_verb)\n return float(number_verbs_in_blob) / len(self.blob.words)", "def lnprobability(self):\n return", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n), cfg['dtype'])\n h_prior = tf.reshape(\n h_prior, [cfg['q/n_samples'], cfg['batch_size'], -1])\n h = [None] * cfg['p/n_layers']\n h[cfg['p/n_layers'] - 1] = h_prior\n for n in range(cfg['p/n_layers'] - 1, 0, -1):\n p_h_n = self.build_stochastic_layer(n, h_above=h[n])\n h[n - 1] = tf.cast(p_h_n.sample(), cfg['dtype'])\n return self.likelihood(h[0])", "def estimate_prob(self, history, word):\n\t\t# YOUR CODE HERE\n\n\t\tif history == '':\n\t\t\t# unigram\n\t\t\tword_frequency = self.ngram_counts[tuple([word])]\n\t\t\treturn word_frequency/self.total_counts\n\n\t\telse:\n\t\t\t# bigram\n\t\t\tword_frequency = self.ngram_counts[tuple([history, word])]\n\t\t\t# history_count = sum([self.ngram_counts[key] for key in self.ngram_counts if key[0] == history])\n\t\t\t# history_count = self.history_count[history]\n\t\t\thistory_count = self.ngram_counts[tuple([history])]\n\t\t\t# print('his: {}',format(history))\n\t\t\t# print('his count {}'.format(history_count))\n\t\t\treturn word_frequency/history_count", "def probabilities(self):\n raise NotImplementedError", "def score(self, doc, c):\n # >>> YOUR ANSWER HERE\n # the inner loop in the TEST NAIVE BAYES, sum up the logprior of the class and all words' loglikelihood\n sum = self.logprior[c]\n words = doc.split()\n for w in words:\n if w in self.vocabulary:\n sum += self.loglikelihood[(w, c)]\n return sum\n # >>> END YOUR ANSWER", "def pred_prob(hp, ss, y):\n K = len(ss['counts'])\n N = sum(ss['counts'])\n assert y >= 0 and y <= K\n if y < K:\n return log((ss['counts'][y] - hp['d']) / (hp['alpha'] + N))\n elif y == K:\n return log((hp['alpha'] + hp['d'] * K) / (hp['alpha'] + N))", "def predict_next(self, seq):\n context = tuple(seq[-2:]) # last two words\n pc = self.probas[context] # conditional distribution\n words, probs = zip(*pc.items()) # convert to list\n return np.random.choice(words, p=probs)", "def lnprior(self):\n \n return", "def probability(self, words):\n if len(words) == 0:\n return 0\n \n prob = 1\n model = self.mdl\n \n words_ngram = NGramLM(self.N, []).create_ngrams(words) # Create NGram model for words\n for ngram in words_ngram:\n # Never seen before ngram or n-1gram\n if (ngram not in list(model['ngram'])) or (ngram[:-1] not in list(model['n1gram'])):\n return 0\n if isinstance(self, NGramLM):\n prob *= model[model['ngram'] == ngram]['prob'].values[0]\n \n def recur_prob(model, w):\n prob = 1\n prev_mod = model.prev_mdl\n if isinstance(prev_mod, UnigramLM): # Unigram base case\n prob *= prev_mod.mdl[w[0]]\n else:\n words_n1gram = NGramLM(prev_mod.N, []).create_ngrams(w) # Create NGram model for words\n prob *= prev_mod.mdl[prev_mod.mdl['ngram'] == words_n1gram[0]]['prob'].values[0]\n prob *= recur_prob(prev_mod, words_n1gram[0]) # Recursive call\n return prob\n\n prob *= recur_prob(self, words_ngram[0])\n \n return prob", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def score(self, data):\n\n score_mappings = {\n \"0\": np.log(self.class_zero_doc_count / self.total_docs),\n \"1\": np.log(self.class_one_doc_count / self.total_docs)\n }\n\n features = self.featurize(data)\n\n for f in features:\n\n if(f[0] in self.class_zero):\n cond_prob_zero = np.log((self.class_zero[f[0]] + 1) / (self.class_zero_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_zero = np.log(1 / (self.class_zero_feature_count + len(self.vocab)))\n else:\n cond_prob_zero = 0\n\n if(f[0] in self.class_one):\n cond_prob_one = np.log((self.class_one[f[0]] + 1) / (self.class_one_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_one = np.log(1 / (self.class_one_feature_count + len(self.vocab)))\n else:\n cond_prob_one = 0\n\n score_mappings[\"0\"] += cond_prob_zero\n score_mappings[\"1\"] += cond_prob_one\n\n score_mappings[\"0\"] = np.exp(score_mappings[\"0\"])\n score_mappings[\"1\"] = np.exp(score_mappings[\"1\"])\n\n return score_mappings", "def estimate_probabilities(previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0):\r\n \r\n previous_n_gram = tuple(previous_n_gram)\r\n \r\n # add <e> <unk> to the vocabulary\r\n # <s> is not needed since it should not appear as the next word\r\n vocabulary = vocabulary + [\"<e>\", \"<unk>\"]\r\n vocabulary_size = len(vocabulary)\r\n \r\n probabilities = {}\r\n for word in vocabulary:\r\n probability = estimate_probability(word, previous_n_gram, \r\n n_gram_counts, n_plus1_gram_counts, \r\n vocabulary_size, k=k)\r\n probabilities[word] = probability\r\n\r\n return probabilities", "def calculate_probability(self):\n return 0", "def get_e_probs(dataset):\n\n # Number of times that the state s is seen paired with observation x in the corpus\n e_word_tag_counts = {}\n\n for sentence in dataset:\n\n for word_to_tag in sentence:\n # Foreach (word, tag) tuple we are calculating number of incstances\n if word_to_tag in e_word_tag_counts:\n e_word_tag_counts[word_to_tag] += 1\n else:\n e_word_tag_counts[word_to_tag] = 1\n\n return e_word_tag_counts", "def _calc_train_class_prb(self, labels_list=None):\n if not labels_list:\n return {}\n\n n = len(labels_list)\n label_num = len(self.labels)\n prb = {}\n for l in self.labels:\n # tmp = (l, sum(1 if v == l else 0 for k, v in train_data)/n)\n prb[l] = (labels_list.count(l) + 1.0) / (n + label_num)\n return prb", "def predict(self, doc):\n # >>> YOUR ANSWER HERE\n # For each class c, calculate the corresponding score of the doc\n scores = [(self.score(doc, c), c) for c in self.classes]\n # after the sort by score, return the most likely class\n scores.sort(key=lambda x: x[0])\n return scores[-1][1]\n # >>> END YOUR ANSWER", "def tf(word, document):\n return freq(word,document) / wordCount(document)", "def predict_proba(self):\n if self.rank_prob is None:\n raise ValueError('No results available. Did you already call predict(...)?')\n\n return np.array([sum(map(lambda x: x[1], result)) / len(result) for result in self.rank_prob])", "def find_probabilities_of_labels(examples_set):\n labelsCount = {\n \"yes\": 0,\n \"no\": 0\n }\n probOfLabels = list()\n\n if isinstance(examples_set[0], list):\n totalCount = len(examples_set)\n\n for example in examples_set:\n nextLabel = example[14]\n labelsCount[nextLabel] += 1\n\n # There is only one example in the set\n else:\n totalCount = 1\n nextLabel = examples_set[14]\n labelsCount[nextLabel] += 1\n\n for label in labelsCount.keys():\n labelProbability = (labelsCount[label] / totalCount)\n probOfLabels.append(labelProbability)\n\n return probOfLabels", "def prob_given(self, posterior, prior):\n\t # print \"posterior, prior\", posterior, prior\n\t return self.prob(merge(prior, posterior)) / self.prob(prior) if self.prob(prior) else 0", "def word_probability(self, word: str) -> int:\n try:\n return self.fdist[word.lower()] / len(self.fdist.keys())\n except KeyError:\n return 0.0", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def get_probability(self, sentence):\n if len(sentence) == 1:\n return Decimal(10) ** self.get_unigram_log_prob(sentence)\n elif len(sentence) == 2:\n return Decimal(10) ** self.get_bigram_log_prob(sentence)\n else:\n log_prob = Decimal(0.0)\n for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]):\n log_prob += self.get_trigram_log_prob((w1, w2, w3))\n log_prob = Decimal(log_prob)\n return Decimal(10) ** log_prob", "def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # add bias variable 1\n prob = np.zeros(X.shape[0], self.num_classes)\n ### YOUR CODE HERE\n z = X.dot(self.w)\n prob = soft_reg.softmax(z)\n ### END CODE\n return prob", "def predict_proba(self, x):\n e = self.predict_evidence(x)\n a = e + self.prior\n return a / torch.sum(a, dim=-1, keepdim=True)", "def log_prob(self):", "def get_relative_frequency_of_nouns(self):\n number_nouns_in_blob = count_words_in_blob_if_tag_meets_criteria(\n self.blob, is_noun)\n return float(number_nouns_in_blob) / len(self.blob.words)", "def compute_pos_weights(self, label_tensor):\n label = label_tensor.numpy()\n batch_size = label.shape[0]\n frequencies = np.sum(label, axis=0)\n \n pos_weights = np.ones((1, NUM_CLASSES))\n indices = frequencies != 0.\n pos_weights[indices] = np.divide(batch_size - frequencies[indices], frequencies[indices])\n print(pos_weights)\n return pos_weights", "def predict(self,x):\n preds = [tree.predict(x) for tree in self.forest]\n if self.classify:\n cls_counts = [0] * self.param['numClasses']\n for p in preds:\n cls_counts[p] += 1\n return argmax(cls_counts)\n else:\n return sum(preds) / (len(preds)*1.0)", "def predict(self, key):\n return self.counts.get(key, 1.0)", "def calculate_perplexity(sentence, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0):\r\n # length of previous words\r\n n = len(list(n_gram_counts.keys())[0]) \r\n \r\n # prepend <s> and append <e>\r\n sentence = [\"<s>\"] * n + sentence + [\"<e>\"]\r\n \r\n # Cast the sentence from a list to a tuple\r\n sentence = tuple(sentence)\r\n \r\n # length of sentence (after adding <s> and <e> tokens)\r\n N = len(sentence)\r\n \r\n\r\n product_pi = 1.0\r\n \r\n \r\n # Index t ranges from n to N - 1, inclusive on both ends\r\n for t in range(n, N): \r\n\r\n # get the n-gram preceding the word at position t\r\n n_gram = sentence[t-n:t]\r\n \r\n # get the word at position t\r\n word = sentence[t]\r\n \r\n\r\n probability = estimate_probability(word, n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0)\r\n \r\n\r\n product_pi *= 1/probability\r\n\r\n # Take the Nth root of the product\r\n perplexity = product_pi**(1/float(N))\r\n return perplexity", "def conditional_prob(self, label, datapoint):\r\n\r\n # REPLACE THE COMMAND BELOW WITH YOUR CODE\r\n feat_vec = self.x[datapoint]\r\n\r\n if label == 1:\r\n return self.conditional_prob_1(feat_vec)\r\n\r\n return 1 - self.conditional_prob_1(feat_vec)", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def raw_unigram_probability(self, unigram):\n\n assert len(unigram) == 1\n count_unigram = self.unigramcounts[unigram]\n if self.total_word_count == 0 :\n return 1 / self.total_sentence_count\n else :\n return (count_unigram / self.total_word_count)", "def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product", "def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score", "def label_accuracies(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (num_correct / preds.size(0)) * 100.0", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def probability_of_default(model, prediction_features):\n return model.predict_proba(prediction_features)[:, 1]", "def estimate_nb(x,y,smoothing):\n labels = set(y)\n doc_counts = defaultdict(float)\n weights = defaultdict(float)\n\n vocab = set()\n for base_features in x:\n for word in base_features.keys():\n vocab.add(word)\n\n for label in y:\n doc_counts[label] += 1\n\n\n for label in labels:\n weights[(label, OFFSET)] = np.log(doc_counts[label] / sum(doc_counts.values()))\n log_probabilities = estimate_pxy(x, y, label, smoothing, vocab)\n for word in log_probabilities:\n weights[(label, word)] = log_probabilities[word]\n\n return weights", "def probabilities(doc, doc_length, prob_dict):\n\tfor elem in doc:\n\t\tdoc[elem] = doc[elem]/doc_length\n\tfor key in doc.keys():\n\t\tif key in stop_words:\n\t\t\tdoc.pop(key)\n\tfor key in doc.keys():\n\t\ttry:\n\t\t\tdoc[key] = prob_dict[key]\n\t\texcept KeyError:\n\t\t\tdoc[key] = 0.0\n\t\t\t#doc[key] = doc[key]/doc_length\n\treturn doc", "def predict_proba(self):\n ..." ]
[ "0.71765524", "0.7150891", "0.71340966", "0.6940001", "0.6911364", "0.68786913", "0.6726276", "0.6663634", "0.66374254", "0.6611508", "0.6575516", "0.6557912", "0.6525762", "0.65140253", "0.64985853", "0.64976215", "0.64904463", "0.6479281", "0.64610314", "0.6453354", "0.64350647", "0.6392512", "0.6390381", "0.63798016", "0.63796854", "0.636444", "0.636444", "0.63628894", "0.6361096", "0.63285863", "0.62781274", "0.6273347", "0.62470376", "0.62454885", "0.62347263", "0.6233218", "0.6228391", "0.6208629", "0.6180371", "0.61626333", "0.61583227", "0.6138598", "0.6136727", "0.6135156", "0.611291", "0.61015755", "0.6092007", "0.6086202", "0.60841554", "0.608294", "0.6081733", "0.60738623", "0.6073533", "0.60498697", "0.60435224", "0.6041088", "0.6038326", "0.6035766", "0.6031415", "0.6029699", "0.6027846", "0.6019086", "0.60072064", "0.6001052", "0.5997753", "0.59970987", "0.5995656", "0.59949666", "0.5990841", "0.5984102", "0.59836817", "0.5982981", "0.59809065", "0.5965016", "0.5958857", "0.5943206", "0.5941859", "0.59399927", "0.5937486", "0.5931675", "0.59200865", "0.59132427", "0.5912063", "0.59077305", "0.59027296", "0.5898989", "0.5886347", "0.5878046", "0.58775514", "0.5876304", "0.5874243", "0.5873203", "0.5871779", "0.5867166", "0.5863504", "0.5859961", "0.5854602", "0.5848645", "0.58475375", "0.5846889" ]
0.71356946
2
return the class conditional probability of label over all words, with smoothing
def p_word_given_label(vocab, training_data, label): smooth = 1 # smoothing factor word_prob = {} # TODO: add your code here total_word = 0 word_prob[None] = 0 for dic in training_data: for index0, i0 in enumerate(dic['bow']): if (list(dic['bow'])[index0] in word_prob): continue; word_prob[list(dic['bow'])[index0]] = 0 #word_prob[None] = 0 if(dic["label"] == label): for index, i in enumerate(dic["bow"]): if(list(dic['bow'])[index] in vocab): if(list(dic['bow'])[index] in word_prob): word_prob[list(dic['bow'])[index]] += dic["bow"][i] else: word_prob[list(dic['bow'])[index]] = dic["bow"][i] else: if(None in word_prob): word_prob[None] += dic["bow"][i] else: word_prob[None] = 0 total_word += dic["bow"][i] #word_prob [None] = 5 for h in word_prob: word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1))) return word_prob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction", "def predict(self, sentence, smoothing=None):\n words = sentence.split()\n words.append(\"STOP\")\n probability = 1.0\n\n words = [self.START_SYMBOL, self.START_SYMBOL] + words\n ###################\n # Compute the probability of a sentence under the trigram model\n # p(x1,..,xn)= \\prod {q(x_i| x_{i-2}x_{i-1}}\n for i in xrange(len(words)-2):\n probability *= self.trigram_prob(words[i], words[i+1], words[i+2])\n\n return probability", "def get_ngram_prob(self, label_seq):\n curr_ngram = self.all_grams\n for i in range(0, len(label_seq)):\n label = label_seq[i]\n if i == len(label_seq) - 1:\n denom = curr_ngram.get_count() + self.SMOOTHING_VALUE * 9\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # For smoothing, just add self.SMOOTHING_VALUE\n numer = curr_ngram.get_count() + self.SMOOTHING_VALUE\n return float(numer) / denom", "def classify(self, sText):\n words = self.tokenize(sText)\n #print \"words here, \", words\n words = [s.lower() for s in words]\n words = set(words)\n #words = set(words) potentially bring this back in here again.\n\n num_docs = self.freq_dist['num_good'] + self.freq_dist['num_bad']\n prob_neg = 0\n prob_pos = 0\n\n for word in words:\n if word in self.freq_dist['freq_dist']:\n prob_neg += math.log((self.freq_dist['freq_dist'][word]['bad']+1) / 0.175)\n prob_pos += math.log((self.freq_dist['freq_dist'][word]['good']+1) / 0.825)\n\n prob_pos = abs(prob_pos)\n prob_neg = abs(prob_neg)\n if prob_pos > prob_neg:\n return 1\n return 0", "def classify(self, sText):\n threshold = .1\n posCount = float(sum(self.posFreqDict.itervalues()))\n negCount = float(sum(self.negFreqDict.itervalues()))\n negProbability=0.0\n posProbability=0.0\n for word in self.tokenize(sText):\n if word in self.posFreqDict:\n posProbability+= log10(float( (1.0+float(self.posFreqDict[word]))/posCount))\n else:\n posProbability+=log10(float(1.0/posCount))\n if word in self.negFreqDict:\n negProbability+= log10(float( (1.0+float(self.negFreqDict[word]))/negCount))\n else:\n negProbability+= log10(float(1.0/negCount))\n if abs(posProbability-negProbability)< .1 :\n return \"neutral\"\n elif posProbability>negProbability:\n return \"positive\"\n else:\n return \"negative\"", "def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}", "def prior(training_data, label_list):\n\n smooth = 1 # smoothing factor\n logprob = {}\n # TODO: add your code here\n numfile1 = 0\n numfile2 = 0\n for dic in training_data:\n if(dic[\"label\"] == label_list[0]):\n numfile1 += 1\n elif(dic[\"label\"] == label_list[1]):\n numfile2 += 1\n numtotal = numfile1 + numfile2\n\n prob1 = (numfile1+smooth)/(numtotal+2)\n prob2 = (numfile2 + smooth) / (numtotal + 2)\n\n logprob[label_list[0]] = math.log(prob1)\n logprob[label_list[1]] = math.log(prob2)\n\n\n return logprob", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def classify_spam(sms):\n return naive_bayes_predict(spam_ratio, words, spamicity, sms) > seuil", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def classify(words, all_tags):\n answer = []\n for word in words:\n label, score = clf_base.predict({word:1},weights,list(all_tags))\n answer.append(label)\n return answer", "def most_probable_class(text, weights):\n\n pos_weights = weights['positive']\n neg_weights = weights['negative']\n neu_weights = weights['neutral']\n features = calculate_features(text)\n pos_numerator = 0.0\n neg_numerator = 0.0\n neu_numerator = 0.0\n denominator = 0.0\n for f in features:\n if f in pos_weights and f in neg_weights and f in neu_weights:\n pos_numerator += pos_weights[f] * features[f]\n neg_numerator += neg_weights[f] * features[f]\n neu_numerator += neu_weights[f] * features[f]\n denominator += pos_numerator + neg_numerator + neu_numerator\n else:\n pos_numerator += 0\n neg_numerator += 0\n neu_numerator += 0\n denominator += pos_numerator + neg_numerator + neu_numerator\n\n pos_prob = (\"positive\", exp(pos_numerator))# /exp(denominator))\n neg_prob = (\"negative\", exp(neg_numerator))# /exp(denominator))\n neu_prob = (\"neutral\", exp(neu_numerator))# /exp(denominator))\n return max(neu_prob, neg_prob, pos_prob, key=lambda x: x[1])", "def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def compute_propability(word, label, dict):\n return dict[label][word] / sum(dict[label].values())", "def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def classify_message(message_words, ham_l, spam_l):\n data_ham_words, data_spam_words = train_function(ham_l, spam_l)\n message_unique_words = set(message_words)\n message_ham_words, message_spam_words = [], []\n for word in message_unique_words:\n if word in data_ham_words:\n message_ham_words.append(word)\n if word in data_spam_words:\n message_spam_words.append(word)\n probability_ham = ((len(ham_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_ham_words, data_ham_words)\n probability_spam = ((len(spam_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_spam_words, data_spam_words)\n print(probability_ham, probability_spam)\n if probability_ham > probability_spam:\n return \"This letter is ham.\"\n else:\n return \"This letter is spam.\"", "def get_lexical_generation_prob(self, word, label):\n word = word.lower()\n numer = self.SMOOTHING_VALUE\n if word in self.words_labels_counts[label] and self.words_labels_counts[label][word] != 0:\n numer += self.words_labels_counts[label][word]\n elif word in self.words_labels_counts[label]:\n numer += self.words_labels_counts[label][self.UNKNOWN_TOKEN]\n denom = self.label_counts[label] + self.SMOOTHING_VALUE * self.all_grams.get_count()\n return float(numer) / denom", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def test_lcwa_label_smoothing(self):\n # Create dummy dense labels\n labels = torch.zeros(self.batch_size, self.num_entities)\n for i in range(self.batch_size):\n labels[i, self.random.randint(self.num_entities)] = 1.0\n # Check if labels form a probability distribution\n np.testing.assert_allclose(torch.sum(labels, dim=1).numpy(), 1.0)\n\n # Apply label smoothing\n smooth_labels = apply_label_smoothing(labels=labels, epsilon=self.epsilon, num_classes=self.num_entities)\n # Check if smooth labels form probability distribution\n np.testing.assert_allclose(torch.sum(smooth_labels, dim=1).numpy(), 1.0, rtol=self.relative_tolerance)", "def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observation[i]\n\t\t\t\tpossiblity *= self.gaussians_calc(value, mean, std)\n\t\t\tif p_max < possiblity:\n\t\t\t\tp_max = possiblity\n\t\t\t\tpredict = label\n\n\t\treturn predict", "def predict_next(self, seq):\n context = tuple(seq[-2:]) # last two words\n pc = self.probas[context] # conditional distribution\n words, probs = zip(*pc.items()) # convert to list\n return np.random.choice(words, p=probs)", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def sample_labels(self, y, num_of_sents = 5, num_of_samples = 10,\n num_of_classes = 3, start_index = 5, get_prob = True):\n classes = self.classes_()\n ret = []\n for sent in y[:num_of_sents]:\n cur = []\n for word in sent[start_index: start_index + num_of_samples]:\n sorted_prob = am(word)\n cur.append([(classes[ind], word[ind]) if get_prob else classes[ind]\n for ind in sorted_prob[:num_of_classes]])\n ret.append(cur)\n return ret", "def label_smoothing_regularization(self, chars_labels, weight=0.1):\n one_hot_labels = tf.one_hot(\n chars_labels, depth=self.num_char_classes, axis=-1)\n pos_weight = 1.0 - weight\n neg_weight = weight / self.num_char_classes\n return one_hot_labels * pos_weight + neg_weight", "def compute_class_freqs(gen):\r\n labels = gen.labels\r\n N = labels.shape[0]\r\n positive_frequencies = np.sum(labels, axis=0) / N\r\n negative_frequencies = np.sum(1 - labels, axis=0) / N\r\n return positive_frequencies, negative_frequencies", "def classify(tweets,table,positives,negatives,p_tweets,n_tweets):\n\n\n st = LancasterStemmer()\n\n n_words = len(table)\n in_table = 0\n not_in_table = 0\n\n\n y_pred = np.zeros(len(tweets)).astype('int32')\n\n for i in range(len(tweets)):\n likelihood_pos = 0\n likelihood_neg = 0\n \n # MAP negatives and positives\n for word in tweets[i].split():\n word = st.stem(word.decode('utf-8'))\n if word in table:\n in_table += 1\n likelihood_pos += m.log((table[word][0]+1)/float(positives + 1*n_words))\n likelihood_neg += m.log((table[word][1]+1)/float(negatives + 1*n_words))\n \n else:\n not_in_table += 1\n likelihood_pos += m.log(1/float(positives + 1*n_words))\n likelihood_neg += m.log(1/float(negatives + 1*n_words))\n\n likelihood_pos += m.log(p_tweets/float(p_tweets + n_tweets))\n likelihood_neg += m.log(n_tweets/float(p_tweets + n_tweets))\n\n\n\n # Classify as positive or negative\n if likelihood_neg < likelihood_pos: \n y_pred[i] = 1\n\n prediction = np.bincount(y_pred)\n\n print \"Known words: %d\" % in_table\n print \"Unknown words %d\\n\" % not_in_table\n\n positive_ratio = prediction[1]/float(prediction[1] + prediction[0])\n\n group = \"Positive\" if positive_ratio > 0.5 else \"Negative\" \n\n\n return positive_ratio,group", "def threshold_probs(probs):\n classes = np.ones(len(probs),)\n classes[probs < 0.5] = 0\n return classes", "def naive_bayes_predict(spam_ratio, words, spamicity, sms):\n res = set(sms.split())\n\n product = 1\n for word in res:\n if word in words:\n heur = spamicity[words[word]]\n product *= heur\n \n is_spam = spam_ratio * product\n # print(is_spam)\n return is_spam", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def extract_probs(label, x):\n\tb = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1])))\n\tunique_array, unique_indices, unique_inverse_x, unique_counts = \\\n\t\tnp.unique(b, return_index=True, return_inverse=True, return_counts=True)\n\tunique_a = x[unique_indices]\n\tb1 = np.ascontiguousarray(unique_a).view(np.dtype((np.void, unique_a.dtype.itemsize * unique_a.shape[1])))\n\tpxs = unique_counts / float(np.sum(unique_counts))\n\tp_y_given_x = []\n\tfor i in range(0, len(unique_array)):\n\t\tindexs = unique_inverse_x == i\n\t\tpy_x_current = np.mean(label[indexs, :], axis=0)\n\t\tp_y_given_x.append(py_x_current)\n\tp_y_given_x = np.array(p_y_given_x).T\n\treturn p_y_given_x, b1, b, unique_a, unique_inverse_x, pxs", "def classify(self, sText):\n\n sum1, sum2 = self.count()\n\n #len1 = len(self.posRev)\n #len2 = len(self.negRev)\n\n probPos = 0 #math.log(float(sum1)/(sum1+sum2))\n probNeg = 0 #math.log(float(sum2)/(sum1+sum2))\n\n ls = self.tokenize(sText)\n\n #test Positive case\n for word in ls:\n prob = float(self.posRev.get(word, 0) + 1)/(sum1)\n if prob != 0:\n probPos += math.log(prob)\n\n #test Negative case\n for word in ls:\n prob = float(self.negRev.get(word, 0) + 1)/(sum2)\n if prob != 0:\n probNeg += math.log(prob)\n\n print probPos\n print probNeg\n\n print probPos-probNeg\n if (probPos - probNeg) > 1:\n return \"positive\"\n elif (probNeg - probPos) > 1:\n return \"negative\"\n else:\n return \"neutral\"", "def conditional_prob(self, label, datapoint):\r\n\r\n # REPLACE THE COMMAND BELOW WITH YOUR CODE\r\n feat_vec = self.x[datapoint]\r\n\r\n if label == 1:\r\n return self.conditional_prob_1(feat_vec)\r\n\r\n return 1 - self.conditional_prob_1(feat_vec)", "def _cal_label_distribution(self):\n if self.split == 'train':\n self.label_weights = np.zeros(21, dtype=np.float32)\n for label_list in self.label:\n self.label_weights += np.histogram(label_list, range(22))[0]\n self.label_weights /= np.sum(self.label_weights)\n print(\"per class weight:\", self.label_weights)\n self.label_weights = 1. / np.log(1.2 + self.label_weights)\n print(\"np.log(1.2 + self.label_weights):\", self.label_weights)\n elif self.split == 'test':\n self.label_weights = np.ones(21)\n return self.label_weights.tolist()", "def calculate_likelihoods_bernoulli(data, labels, vocab):\r\n classes = set(labels)\r\n likelihoods = {}\r\n # Calculate likelihood for each class\r\n for cls in classes:\r\n documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls]\r\n numDocsInClass = len(documentsInClass)\r\n results = {}\r\n for word in vocab:\r\n numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass))\r\n # Binary variable-- either present or not present\r\n results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2)\r\n # Special laplace smoothing for words not found in training data\r\n results[None] = laplace_smooth(0, numDocsInClass, 2)\r\n likelihoods[cls] = results\r\n return likelihoods", "def PredictLabel(sentence, model_main, word2vec, boundary=0.5):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_main.predict_proba(features.reshape(1,-1))[0]\n if model_main.classes_[prediction.argmax()]!=\"clerical\":\n return model_main.classes_[prediction.argmax()]\n else:\n if np.max(prediction)>boundary:\n return \"clerical\"\n else:\n ranger = range(len(prediction))\n del ranger[prediction.argmax()]\n return model_main.classes_[ranger][prediction[ranger].argmax()]", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\r\n # TODO: Write your code here\r\n # return predicted labels of development set\r\n retval = []\r\n smoothing_parameter = 0.0055\r\n # Generate a unigram BOW for both positive and negative reviews, choose the top 2500 words\r\n pos_bow, neg_bow = generate_unigram_BOW(train_set, train_labels)\r\n sorted_pos = sorted(pos_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_neg = sorted(neg_bow.items(), key=lambda x: x[1], reverse = True)\r\n pos_words = sorted_pos[:].copy()\r\n neg_words = sorted_neg[:].copy()\r\n\r\n pos_bi_bow, neg_bi_bow = generate_bigram_BOW(train_set, train_labels)\r\n sorted_bi_pos = sorted(pos_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n sorted_bi_neg = sorted(neg_bi_bow.items(), key=lambda x: x[1], reverse = True)\r\n bi_pos_words = sorted_bi_pos[:].copy()\r\n bi_neg_words = sorted_bi_neg[:].copy()\r\n\r\n # Calculate the log probabilities each word given type\r\n pos_count = sum(pair[1] for pair in pos_words)\r\n neg_count = sum(pair[1] for pair in neg_words)\r\n bi_pos_count = sum(pair[1] for pair in bi_pos_words)\r\n bi_neg_count = sum(pair[1] for pair in bi_neg_words)\r\n\r\n log_probability_pos = {} #(word)->P(word|positive)\r\n log_probability_neg = {} #(word)->P(word|negative)\r\n log_prob_bi_pos = {}\r\n log_prob_bi_neg = {}\r\n\r\n for pair in pos_words:\r\n pos_prob = np.log((pair[1]+smoothing_parameter)/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n log_probability_pos[pair[0]] = pos_prob\r\n\r\n for pair in neg_words:\r\n neg_prob = np.log((pair[1]+smoothing_parameter)/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n log_probability_neg[pair[0]] = neg_prob\r\n\r\n for pair in bi_pos_words:\r\n bi_pos_prob = np.log((pair[1]+smoothing_parameter)/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n log_prob_bi_pos[pair[0]] = bi_pos_prob\r\n\r\n for pair in bi_neg_words:\r\n bi_neg_prob = np.log((pair[1]+smoothing_parameter)/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n log_prob_bi_neg[pair[0]] = bi_neg_prob\r\n # Finished training\r\n\r\n # For each of the new reviews from development data\r\n for review in dev_set:\r\n uni_pos = np.log(pos_prior)\r\n uni_neg = np.log(1 - pos_prior)\r\n for word in review:\r\n if word in log_probability_pos:\r\n uni_pos += log_probability_pos[word]\r\n elif word not in log_probability_pos:\r\n uni_pos += np.log(smoothing_parameter/(pos_count+smoothing_parameter*(len(pos_words) + 1)))\r\n\r\n if word in log_probability_neg:\r\n uni_neg += log_probability_neg[word]\r\n elif word not in log_probability_neg:\r\n uni_neg += np.log(smoothing_parameter/(neg_count+smoothing_parameter*(len(neg_words) + 1)))\r\n\r\n bi_pos = np.log(pos_prior)\r\n bi_neg = np.log(1 - pos_prior)\r\n for i in range(len(review)-1):\r\n currTuple = (review[i], review[i+1])\r\n if currTuple in log_prob_bi_pos:\r\n bi_pos += log_prob_bi_pos[currTuple]\r\n elif currTuple not in log_prob_bi_pos:\r\n bi_pos += np.log(smoothing_parameter/(bi_pos_count+smoothing_parameter*(len(bi_pos_words) + 1)))\r\n\r\n if currTuple in log_prob_bi_neg:\r\n bi_neg += log_prob_bi_neg[currTuple]\r\n elif currTuple not in log_prob_bi_neg:\r\n bi_neg += np.log(smoothing_parameter/(bi_neg_count+smoothing_parameter*(len(bi_neg_words) + 1)))\r\n\r\n MAP_pos = (1-0.4)*uni_pos + 0.4*bi_pos\r\n MAP_neg = (1-0.4)*uni_neg + 0.4*bi_neg\r\n\r\n if MAP_pos >= MAP_neg:\r\n retval.append(1)\r\n else:\r\n retval.append(0)\r\n\r\n return retval", "def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))", "def calc_feature_probs(image_type, image_data, smoothing):\n counts = np.array([np.sum(image_data.features[image_data.labels == value], axis=0) + smoothing for value in range(image_type.categories)])\n denoms = np.array([np.count_nonzero(image_data.labels == value) + (smoothing * image_type.feature_kinds) for value in range(image_type.categories)])\n return counts / denoms[:, np.newaxis, np.newaxis]", "def classify(sent, classifier=None):\n if classifier == None:\n try:\n classifier=pickle.load(open('nb_classifier', 'rb'))\n except IOError as e:\n print(\"Error: nb_classifier file not found\")\n return\n except:\n print(\"Unexpected Error\")\n return\n cat = classifier.classify(bag_of_words(word_tokenize(sent)))\n weight = classifier.prob_classify(bag_of_words(word_tokenize(sent))).prob(cat)\n return cat, weight", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def sentiment_score(text, loaded_model = loaded_model, vectorizer = tokenizer):\n # tweet_tf_idf = vect_char.transform(text)\n tweet_token = tokenizer.texts_to_sequences(text)\n tweet_token = pad_sequences(tweet_token, maxlen = 40)\n sentiment = loaded_model.predict_proba(tweet_token)\n neg_prob = sentiment[0][0]\n pos_prob = sentiment[0][1]\n return neg_prob, pos_prob", "def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def estimate_pxy(x,y,label,smoothing,vocab):\n log_probabilities = defaultdict(float)\n corpus_counts = get_corpus_counts(x, y, label)\n total = sum(corpus_counts.values())\n for word in vocab:\n log_probabilities[word] = np.log(((corpus_counts[word] if word in corpus_counts else 0) + smoothing) / (total + len(vocab) * smoothing))\n return log_probabilities", "def prob(self, word, context=None):\n if not context:\n context = ()\n else:\n context = tuple(context)\n prob = 0\n for i in range(len(context) + 1):\n prob += self.weights[i] * self.ngram_cpd[context[i:]][word]\n return prob", "def estimate_nb(x,y,smoothing):\n labels = set(y)\n doc_counts = defaultdict(float)\n weights = defaultdict(float)\n\n vocab = set()\n for base_features in x:\n for word in base_features.keys():\n vocab.add(word)\n\n for label in y:\n doc_counts[label] += 1\n\n\n for label in labels:\n weights[(label, OFFSET)] = np.log(doc_counts[label] / sum(doc_counts.values()))\n log_probabilities = estimate_pxy(x, y, label, smoothing, vocab)\n for word in log_probabilities:\n weights[(label, word)] = log_probabilities[word]\n\n return weights", "def get_label(prob_label, target):\n return target if random.random() <= prob_label else 1 - target", "def Fit(text):\n article_tfidf = TransformData([text])\n global CLASSIFIER\n predicted_probs = CLASSIFIER.predict_proba(article_tfidf)\n # the output shoud be an array with two elements, one corresponding to\n # probability it's a positive sentiment and the other corresponding to\n # probability it's a negative sentiment.\n return list(zip(CLASSIFIER.classes_, predicted_probs[0]))", "def PredictiveDist(self, label='pred'):\n # TODO: fill this in\n lam = 1\n pred = thinkbayes2.MakePoissonPmf(lam, 15)\n return pred", "def class_conditional_word_dist(self, Mprint=20):\n self.class_word_dist = np.array(np.vstack([self.data[self.labels == ci, :].sum(0)/self.data[self.labels == ci, :].sum() for ci in np.unique(self.labels)])) # num of classes x num of words\n self.labels_word = self.class_word_dist.argmax(0)\n for i in range(self.class_word_dist.shape[0]):\n print('top {} frequent words in class {}'.format(Mprint, i))\n idx = np.argsort(self.class_word_dist[i, :])[::-1][:Mprint]\n for j in range(Mprint):\n print(' {:3d}: {:10s} {:.4f}'.format(j, self.vocab[idx[j]], self.class_word_dist[i, idx[j]]))", "def compute_class_freqs(labels):\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # total number of patients (rows)\n N = len(labels)\n \n positive_frequencies = np.sum(labels,axis=0)/N\n negative_frequencies = 1-positive_frequencies\n\n ### END CODE HERE ###\n return positive_frequencies, negative_frequencies", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score", "def propagate_labels_majority(image,labels):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels)\n outputs = zeros(amax(rlabels)+1,'i')\n counts = zeros(amax(rlabels)+1,'i')\n for rlabel, label_, count in cors.T:\n if not rlabel or not label_:\n # ignore background correspondences\n continue\n if counts[rlabel] < count:\n outputs[rlabel] = label_\n counts[rlabel] = count\n outputs[0] = 0\n return outputs[rlabels]", "def featureLikelihood():\r\n\r\n\t# Lists\r\n\twords = []\r\n\tfinalWords = []\r\n\tposWords = []\r\n\tnegWords = []\r\n\tfeatureListPos = []\r\n\tfeatureListNeg = []\r\n\r\n\t# Counters\r\n\tposCount = 0.0\r\n\tnegCount = 0.0\r\n\r\n\t# Temporary Lists for formating\r\n\tfeatureListPosFormat = []\r\n\tfeatureListNegFormat = []\r\n\r\n\t# Strings\r\n\ts = \" \"\r\n\tposString = \"\"\r\n\tnegString = \"\"\r\n\r\n\tseen = set()\r\n\r\n\t# Add all words to words list and count positive & negative occurences\r\n\tfor item in trainingData:\r\n\t\tfor word in item[2]:\r\n\t\t\twords.append(word)\r\n\t\tif item[1] == '0':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tposWords.append(word)\r\n\t\t\t\tposCount += 1\r\n\t\tif item[1] == '1':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tnegWords.append(word)\r\n\t\t\t\tnegCount +=1\r\n\r\n\t# Adds all values into finalWords, skipping duplicates\r\n\tfor values in words:\r\n\t\tif values not in seen:\r\n\t\t\tfinalWords.append(values)\r\n\t\t\tseen.add(values)\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t# Add positive and negative counts to feature list and dictionaries\r\n\tfor word in finalWords:\r\n\t\ts += '{:12s}'.format(word)\r\n\t\t\r\n\t\tpCount = 0\r\n\t\tnCount = 0\r\n\t\t\r\n\t\tfor row in trainingData:\r\n\t\t\tif row[1] == '0':\r\n\t\t\t\tif word in row[2]: pCount += 1\r\n\t\t\tif row[1] == '1':\r\n\t\t\t\tif word in row[2]: nCount += 1\r\n\t\t\t\t\r\n\t\tfeatureListPos.append((pCount + 1) / (posCount + 9))\r\n\t\tclass0Dict[word] = ((pCount + 1) / (posCount + 9))\r\n\t\t\r\n\t\tfeatureListNeg.append((nCount + 1) / (negCount + 9))\r\n\t\tclass1Dict[word] = ((nCount + 1) / (negCount + 9))\r\n\r\n\t\t\r\n\t\t\r\n\t# Formatting for the positive feature list\r\n\tfor item in featureListPos:\r\n\t\tfeatureListPosFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListPosFormat:\r\n\t\tposString += '{:12s}'.format(item)\r\n\r\n\t# Formatting for the negative feature list\r\n\tfor item in featureListNeg:\r\n\t\tfeatureListNegFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListNegFormat:\r\n\t\tnegString += '{:12s}'.format(item)\r\n\r\n\r\n\t\t\r\n\treturn(s, posString, negString)", "def classify(strings: List[str], params: Any) -> List[str]:\n \n # ############################ REPLACE THIS WITH YOUR CODE #############################\n def predict_one_sample(sample, train_dict, ngram_lvl=1):\n ngrams = [sample[i:i+ngram_lvl] for i in", "def label_predict(self, sentence):\n index_words = FileUtils.index_sentence(sentence, self.word_to_index)\n chunks = FileUtils.divide_sentence(index_words, Settings.seq_size)\n result = np.zeros(Settings.class_num)\n if Settings.cuda:\n self.model.cuda()\n \n for chunk in chunks:\n with torch.no_grad():\n chunk = torch.from_numpy(np.asarray(chunk)).view(1, Settings.seq_size)\n if Settings.cuda:\n chunk = chunk.cuda()\n \n predict = self.model(chunk)\n if Settings.cuda:\n predict = predict.cpu()\n predict = predict.numpy()[0]\n result += predict\n result /= len(chunks)\n\n target_index = np.argmax(result) + 1\n label = self.index_to_label.get(str(target_index))\n score = np.max(result)\n return label, score", "def thresholding(pred,label,thres):\n \n conf =[]\n \n for i in thres:\n \n pr_th,lab_th = (pred>i),(label>i)\n conf += confusion(pr_th,lab_th)\n \n return np.array(conf).reshape(-1,4)", "def predict_proba_confidence(clf, X, y_true):\n class_labels = clf.classes_\n y_pred_proba = clf.predict_proba(X)[:,1]\n ent = [entropy(i) for i in y_pred_proba]\n\n return sum(ent)/len(ent)", "def _predict_doc(self, x, flag):\n\n if flag == 1:\n denom = self.X.num_positive()\n else:\n denom = self.X.num_negative()\n denom += self.X.vocab_size()\n\n # multiply word probabilities for all words in x\n words = tokenize(x)\n # prob = 1.0\n # for word in words:\n # wi = self._doc_count_for_word(word, flag=flag)\n # # utilize the Laplace Smooth\n # prob *= ((float(wi)+1.0) / (float(denom)+2.0))\n\n prob = math.log(self.X.priors[str(flag)])\n for word in words:\n wi = self._doc_count_for_word(word, flag=flag)\n # utilize the Laplace Smooth\n prob += math.log((float(wi)+1.0) / (float(denom)+2.0))\n\n # prob *= math.log(self.X.priors[str(flag)])\n\n return prob", "def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product", "def label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels,[-1,1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)", "def sample_labels(self, y, num_of_sents = 5, num_of_samples = 10,\n num_of_classes = 3, start_index = 5, get_prob = True):\n classes = self.classes_()\n ret = []\n am = lambda myList: [i[0] for i in sorted(enumerate(myList), key=lambda x:x[1], reverse= True)]\n\n for sent in y[:num_of_sents]:\n cur = []\n for word in sent[start_index: start_index + num_of_samples]:\n sorted_prob = am(word)\n cur.append([(classes[ind], word[ind]) if get_prob else classes[ind]\n for ind in sorted_prob[:num_of_classes]])\n ret.append(cur)\n return ret", "def calculate_class_weights(labels):\n class_counts = sorted(Counter(labels).items())\n num_items = [x[1] for x in class_counts]\n weights = [min(num_items)/x for x in num_items]\n\n return torch.tensor(weights)", "def classifier(text):\n return random.choice([True, False])", "def probability_categorical(feature, label):\n assert feature.nunique()>2, 'feature category nums must be greater than 2.'\n t = pd.DataFrame({'feature':feature, 'label':label})\n cat = label.unique()\n cat = [(cat[i], cat[i+1]) for i in range(len(cat)-1)]\n prob = label.value_counts(1).to_dict()\n slope = [prob.get(i[0], 0)-prob.get(i[1], 0) for i in cat]\n \n slope_dict = t.feature.value_counts(1).to_dict()\n prob = t.groupby([ 'feature']).label.value_counts(1).to_dict()\n slope_dict = {i:{'category_rate':slope_dict[i], 'slope':[prob.get((i,j[0]), 0)-prob.get((i,j[1]), 0) for j in cat]} for i in slope_dict}\n for i in slope_dict:\n slope_dict[i]['slope_diff'] = sum([abs(slope[j]-slope_dict[i]['slope'][j]) for j in range(len(slope))])\n value1 = sorted([[[i], slope_dict[i]['slope_diff'], slope_dict[i]['category_rate']] for i in slope_dict], key=lambda x:x[1], reverse=1)\n distance = sorted([value1[i][1]-value1[i+1][1] for i in range(len(value1)-1)])\n std = pd.Series([i[1] for i in value1]).std()\n coupe = value1\n dis = distance[0]\n for k in distance:\n value = value1\n while 1:\n for i in range(len(value)-1):\n if value[i][1]-k<value[i+1][1]:\n value[i+1][0] = value[i][0]+value[i+1][0]\n value[i+1][1] = value[i][1]*value[i][2]/(value[i][2]+value[i+1][2])+value[i+1][1]*value[i+1][2]/(value[i][2]+value[i+1][2])\n value[i+1][2] = value[i][2]+value[i+1][2]\n value.remove(value[i])\n break\n if i==len(value)-2:\n break\n if pd.Series([i[1] for i in value]).std()>std:\n coupe = value\n std = pd.Series([i[1] for i in value]).std()\n dis = k\n return {'group':{k:i for i,j in enumerate(coupe) for k in j[0]}, 'data':coupe, \n 'distance':dis, 'distance_index':f'{distance.index(dis)+1}/{len(distance)}', 'std':std}", "def cond_prob(self, token, prev_tokens=None):\n\n if not prev_tokens:\n assert self.n == 1\n prev_tokens = tuple()\n # ngram condicional probs are based on relative counts\n hits = self.count((tuple(prev_tokens)+(token,)))\n sub_count = self.count(tuple(prev_tokens))\n\n return hits / float(sub_count)", "def compute_pos_weights(self, label_tensor):\n label = label_tensor.numpy()\n batch_size = label.shape[0]\n frequencies = np.sum(label, axis=0)\n \n pos_weights = np.ones((1, NUM_CLASSES))\n indices = frequencies != 0.\n pos_weights[indices] = np.divide(batch_size - frequencies[indices], frequencies[indices])\n print(pos_weights)\n return pos_weights", "def transform_output_probs(self, y, get_prob = False):\n return np.array(self.sample_labels(y,\n num_of_sents = len(y), # all sentences\n num_of_samples = max(map(len, y)), # all words\n num_of_classes = 1, # Only top probability\n start_index = 0, # all sentences\n get_prob = get_prob, # Indicate whether to get only labels\n ))", "def transform_output_probs(self, y, get_prob = False):\n return np.array(self.sample_labels(y,\n num_of_sents = len(y), # all sentences\n num_of_samples = max(map(len, y)), # all words\n num_of_classes = 1, # Only top probability\n start_index = 0, # all sentences\n get_prob = get_prob, # Indicate whether to get only labels\n ))", "def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n #print vec2Classify\n # [0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]\n \n #print p0Vec\n \n #print p1Vec\n \"\"\"[-3.04452244 -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244 -3.04452244\n -3.04452244 -2.35137526 -2.35137526 -2.35137526 -2.35137526 -2.35137526\n -3.04452244 -1.94591015 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -1.94591015 -3.04452244 -1.65822808 -3.04452244 -2.35137526 -3.04452244\n -3.04452244 -3.04452244]\"\"\" \n \n #print vec2Classify * p1Vec\n \"\"\"\n [-0. -3.04452244 -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -3.04452244\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -3.04452244]\n \"\"\"\n \n #print sum(vec2Classify * p1Vec)\n # -9.13356731317\n \n p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult\n p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)\n \n if p1 > p0:\n return 1\n else: \n return 0", "def calc_class_weights(self):\n y = self.train_eval_data[\"sentiment\"]\n self.class_weights = {}\n classes = np.unique(y)\n for cls in classes:\n self.class_weights[cls] = len(y) / (len(classes) * (y == cls).sum())", "def prior(self, c, labeled):\n return log(len(labeled[c])/self.N_features)", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def condentropy(truelabels, labels):\n labels=array(labels)\n truelabels=array(truelabels)\n \n condent=0.\n for l in xrange(min(labels),max(labels)+1):\n sublabels = truelabels[ labels==l ]\n condent += len(sublabels)*chl_entropy( sublabels )\n return condent/float(len(labels))", "def next_word_probability(self, observation, partial_out):\n if not hasattr(self, 'prev_enc'):\n self.prev_enc = None\n self.last_text = None\n if observation['text'] != self.last_text:\n self.prev_enc = None\n self.last_text = observation.get('text')\n self.observe(observation)\n\n obs = self.observation\n obs['eval_labels'] = [' '.join(partial_out)]\n batch = self.vectorize([obs])\n self.model.eval()\n self.model.longest_label = 1 # no need to predict farther ahead\n out = self.model(\n batch[0], # xs\n ys=(batch[1] if len(partial_out) > 0 else None),\n prev_enc=self.prev_enc)\n scores, self.prev_enc = out[1], out[3]\n # scores is bsz x seqlen x num_words, so select probs of current index\n assert len(partial_out) == scores.size(1) - 1\n probs = F.softmax(scores.select(1, len(partial_out)), dim=1).squeeze().cpu()\n dist = self.probs\n for i in range(len(probs)):\n try:\n val = probs[i].item()\n except AttributeError:\n val = probs[i][0]\n dist[self.dict[i]] = val\n self.batch = batch\n return dist", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def get_ngramlogprobs(freqdict):\n return", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def posterior(self, model, sentence, label):\r\n\r\n if model == \"Simple\":\r\n cost = sum(\r\n [\r\n (\r\n (math.log(self.emission_probability[label[i]][sentence[i]]))\r\n + (math.log(self.posterior_probability[label[i]]))\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (math.log(1 / float(10 ** 10)))\r\n + (math.log(self.posterior_probability[label[i]]))\r\n )\r\n for i in range(len(sentence))\r\n ]\r\n )\r\n return cost\r\n elif model == \"Complex\":\r\n post_array = []\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * self.initial_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10)) * self.initial_probability[label[i]]\r\n )\r\n elif i == 1:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n )\r\n else:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 2]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * (\r\n self.transition_probability[label[i - 2]][label[i]]\r\n * self.posterior_probability[label[i - 2]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n )\r\n post_array = [math.log(p) for p in post_array]\r\n cost = sum(post_array)\r\n return cost\r\n\r\n elif model == \"HMM\":\r\n post_array = []\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n post_array.append(\r\n (\r\n self.initial_probability[label[i]]\r\n * self.emission_probability[label[i]][sentence[i]]\r\n )\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (self.initial_probability[label[i]] * (1 / float(10 ** 8)))\r\n )\r\n else:\r\n emi = (\r\n (self.emission_probability[label[i]][sentence[i]])\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n )\r\n\r\n min_val = post_array[i - 1] * (\r\n (self.transition_probability[label[i - 1]][label[i]])\r\n )\r\n\r\n post_array.append(emi * min_val)\r\n\r\n post_array = [math.log(p) for p in post_array]\r\n\r\n cost = sum(post_array)\r\n\r\n return cost\r\n else:\r\n print(\"Unknown algorithm!\")", "def predict(self, threshold=0.5):\n probabilities = self.probability_array()\n classes = np.zeros(self.N)\n classes[probabilities > threshold] = 1\n return classes", "def classify(texts: List[str], params: Any) -> List[str]:\n\n alpha = 1\n token_probs_pos = params[\"token_probs_pos\"]\n token_probs_neg = params[\"token_probs_neg\"]\n all_words = params[\"all_words\"]\n M = len(all_words)\n cnt_pos_docs = params[\"cnt_pos_docs\"]\n cnt_neg_docs = params[\"cnt_neg_docs\"]\n\n sum_len_neg = params[\"sum_len_neg\"]\n sum_len_pos = params[\"sum_len_pos\"]\n pos_dict = params[\"pos_dict\"]\n neg_dict = params[\"neg_dict\"]\n\n\n test_texts = preprocessing(texts)\n test_tokenized_texts = text_to_tokens(test_texts)\n \n res = []\n log_pos_probablity = 0\n log_neg_probablity = 0\n i = 0\n for text in test_tokenized_texts:\n if (i % 5000 == 0):\n print(\"Classified\", i, \"texts\")\n i += 1\n log_pos_probablity = log(cnt_pos_docs)\n log_neg_probablity = log(cnt_neg_docs)\n for token in text:\n if (token_probs_pos[token] == 0):\n token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)\n else:\n log_pos_probablity += log(token_probs_pos[token])\n if (token_probs_neg[token] == 0):\n token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)\n else:\n log_neg_probablity += log(token_probs_neg[token])\n if (log_neg_probablity > log_pos_probablity):\n res.append(\"neg\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # neg_dict[token] += text[token]\n # sum_len_neg += text[token]\n # token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n\n else:\n res.append(\"pos\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # pos_dict[token] += text[token]\n # sum_len_pos += text[token]\n # token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n\n\n \n print('Predicted labels counts:')\n print(count_labels(res))\n return res", "def pred(self, w):\n pr = 0;\n res = ''\n for item in self.counts:\n if w in item[:-1] and self.prob(item) > pr:\n# print(\"HIT\")\n# print(item)\n i = item.index(w) + len(w)\n res = item[i]\n pr = self.prob(item)\n if res == '':\n res = '*'\n return res", "def naive_bayes_classify(df: pd.DataFrame, vect, names):\n features = vect\n target = df.success_lvl\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, target, test_size=0.2, random_state=42)\n\n nb_clf = MultinomialNB()\n nb_clf.fit(X_train, y_train)\n nb_predictions = nb_clf.predict(X_test)\n print('Accuracy score for Naive Bayes:', accuracy_score(y_test, nb_predictions))\n\n\n # Find Top/Bottom num of terms used to describe the classes.\n num = 10\n low_class_prob_sorted = nb_clf.feature_log_prob_[0, :].argsort()[::-1]\n hi_class_prob_sorted = nb_clf.feature_log_prob_[1, :].argsort()[::-1]\n print('\\n', f'Low score Top{num} phrases:', np.take(names, low_class_prob_sorted[:num]))\n print('\\n', f'Low score Bot{num} phrases:', np.take(names, low_class_prob_sorted[-num:]))\n print('\\n', f'High score Top{num} phrases:', np.take(names, hi_class_prob_sorted[:num]))\n print('\\n', f'High score Bot{num} phrases:', np.take(names, hi_class_prob_sorted[-num:]))", "def log_prob(sentence, LM, smoothing=False, delta=0, vocabSize=0):\n word_list = sentence.split()\n log_prob = 0\n for i in range(len(word_list)-1):\n print(word_list[i], word_list[i+1])\n bi_count = LM['bi'][word_list[i]][word_list[i+1]]\n uni_count = LM['uni'][word_list[i]]\n if uni_count == 0 and smoothing:\n return float('-inf')\n log_prob += log(((bi_count + delta)/(uni_count + delta * vocabSize)))\n return log_prob", "def calc_class_weights(label_freq):\n\n most_common_label_freq = label_freq[0]\n weighted_slots = sorted([(index, most_common_label_freq[1] / freq) for (index, freq) in label_freq])\n return [weight for (_, weight) in weighted_slots]", "def classify(some_string, trained_pos, trained_neg):\n pos_probability = get_probability(trained_pos, some_string)\n neg_probability = get_probability(trained_neg, some_string)\n if (pos_probability >= neg_probability):\n return \"positive\"\n elif pos_probability < neg_probability: \n return \"negative\"", "def get_class_weights(img_paths: List[str], class_to_idx: Dict[str, int], label_names: List[str]):\n labels = list()\n for img_path in img_paths:\n label = os.path.basename(os.path.dirname(img_path))\n labels.append(class_to_idx[label]) \n\n counts = Counter(labels) + Counter([class_to_idx[name] for name in label_names])\n counts = np.array(sorted(counts.items()))[:,1]\n \n return counts.max()/counts", "def word_probability(self, word: str) -> int:\n try:\n return self.fdist[word.lower()] / len(self.fdist.keys())\n except KeyError:\n return 0.0", "def probability(self, words):\n if len(words) == 0:\n return 0\n \n prob = 1\n model = self.mdl\n \n words_ngram = NGramLM(self.N, []).create_ngrams(words) # Create NGram model for words\n for ngram in words_ngram:\n # Never seen before ngram or n-1gram\n if (ngram not in list(model['ngram'])) or (ngram[:-1] not in list(model['n1gram'])):\n return 0\n if isinstance(self, NGramLM):\n prob *= model[model['ngram'] == ngram]['prob'].values[0]\n \n def recur_prob(model, w):\n prob = 1\n prev_mod = model.prev_mdl\n if isinstance(prev_mod, UnigramLM): # Unigram base case\n prob *= prev_mod.mdl[w[0]]\n else:\n words_n1gram = NGramLM(prev_mod.N, []).create_ngrams(w) # Create NGram model for words\n prob *= prev_mod.mdl[prev_mod.mdl['ngram'] == words_n1gram[0]]['prob'].values[0]\n prob *= recur_prob(prev_mod, words_n1gram[0]) # Recursive call\n return prob\n\n prob *= recur_prob(self, words_ngram[0])\n \n return prob", "def fit(self, X: List[str], y: List[str]) -> None:\n # 1. Add all unique vectors from messages (X)\n for i in range(len(X)):\n for word in X[i].split():\n if self.vectors.get(word):\n if self.vectors[word]['n'].get(y[i]):\n self.vectors[word]['n'][y[i]] += 1\n else:\n self.vectors[word]['n'][y[i]] = 1\n else:\n self.vectors[word] = {'n': {y[i]: 1}}\n self.d += 1\n\n if self.labels_d.get(y[i]):\n self.labels_d[y[i]] += 1\n else:\n self.labels_d[y[i]] = 1\n\n self.labels_p[y[i]] = 1 if not self.labels_p.get(y[i]) else self.labels_p[y[i]] + 1\n\n # 2. Count probabilities in each added vector of each class (label)\n for vector in self.vectors:\n for label in self.labels_d:\n n = 0 if not self.vectors[vector]['n'].get(label) else self.vectors[vector]['n'][label]\n p = (n + self.alpha) / (self.labels_d[label] + (self.d * self.alpha))\n\n if self.vectors[vector].get('p'):\n self.vectors[vector]['p'][label] = p\n else:\n self.vectors[vector]['p'] = {label: p}\n\n # 3. Count probability of each class\n sum = 0\n for label in self.labels_p:\n sum += self.labels_p[label]\n\n for label in self.labels_p:\n self.labels_p[label] = self.labels_p[label] / sum" ]
[ "0.7158563", "0.68635947", "0.6840666", "0.66153693", "0.6609253", "0.6576637", "0.6564863", "0.653211", "0.6521681", "0.6498006", "0.6471072", "0.640781", "0.63957083", "0.6352886", "0.6348521", "0.6348173", "0.634401", "0.634401", "0.6340316", "0.63331866", "0.6293823", "0.62921864", "0.6284099", "0.62624526", "0.6261897", "0.6255573", "0.62498116", "0.62477356", "0.62358195", "0.6231648", "0.62258786", "0.6223893", "0.62139016", "0.6209874", "0.6199859", "0.61998343", "0.6198512", "0.619429", "0.61863524", "0.6176778", "0.6176208", "0.6175101", "0.6166364", "0.6163395", "0.6145888", "0.6141486", "0.6128763", "0.6125941", "0.61257195", "0.6115996", "0.6113607", "0.6110395", "0.60875934", "0.6068707", "0.6059873", "0.6057305", "0.6036051", "0.6011489", "0.59985536", "0.59766704", "0.59764963", "0.59747994", "0.5970934", "0.5966488", "0.59554046", "0.5955147", "0.5952732", "0.5952276", "0.5950563", "0.5937298", "0.5936225", "0.5934529", "0.5927987", "0.5925533", "0.59198356", "0.59198356", "0.59115255", "0.5911218", "0.5908025", "0.59061205", "0.5901722", "0.58999616", "0.58995944", "0.58823997", "0.58823997", "0.58747345", "0.58613276", "0.5859348", "0.58592594", "0.58552545", "0.5842158", "0.5839053", "0.58385897", "0.58348745", "0.5830144", "0.5829052", "0.58263886", "0.58253133", "0.5824438", "0.5823572" ]
0.7195733
0
Check if the string is palindrome.
def isPalindrome(string): for i in range(len(string)//2): if string[i] != string[(i*-1)-1]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_palindrome(string):\n return", "def check_palindrome(s):\n return s[::-1] == s", "def is_palindrome(string):\n return string[::-1] == string", "def is_palindrome_v1(s):\n return reverse(s) == s", "def is_palindrome(string):\n if string == string[::-1]:\n return True\n return False", "def is_palindrome2(some_string):\n return reverse_string(some_string) == some_string", "def is_palindrome(s):\n return s == s[::-1]", "def checkPalindrome(self, s: str) -> str:\n # return s[:len(s) // 2] == s[(len(s) + 1) // 2::-1]\n return s == s[::-1]", "def isPalindrome(s):\r\n return isPal(toChars(s))", "def check_palindrome():", "def palindrome(string):\r\n if len(string) <= 1:\r\n return True\r\n else:", "def is_palindrome(s):\n rev_str = \"\"\n for let in range(1, len(s) + 1):\n rev_str += s[-let]\n return rev_str == s", "def check_palindrome(inp_string):\n if len(inp_string) <= 2:\n return False\n elif inp_string == inp_string[::-1]:\n return True\n else:\n return False", "def palindrome(string: str) -> Bool:\n string_lower = string.lower()\n string_clean =", "def isPalindrome(self, s: str) -> bool:\n s = s.translate(str.maketrans('','',string.punctuation)) # drop punctuation\n s = \"\".join(s.split()) # drop spaces\n s = s.lower() # casefold more agressive (ASCII vs UNICODE)\n\n return s == s[::-1]", "def string_palidrome(word):\n if word == string_reverse(word):\n return True\n else:\n return False", "def is_palindrome(string):\n string = string.upper()\n if reverse(string) == string:\n #use function reverse() to check if the reverse of the string is same as the original string\n return True\n else:\n return False", "def palindrome(word):\n reverse = word[::-1]\n return word == reverse", "def is_palindrome(string):\n k, mid = len(string), len(string) // 2\n # checking even palindromes\n if k % 2 == 0:\n return string[:mid] == get_reverse_complement(string[mid:])\n # checking odd palindromes\n else:\n return string[:mid] == get_reverse_complement(string[mid + 1:])", "def check_palindrome(string):\n palindrome = ''.join(string).lower()\n if palindrome == palindrome[::-1]:\n return True\n else:\n return False", "def is_palindrome(word: str) -> bool:\n\n # Todo\n return False", "def is_palindrome(word):\n if word == word[::-1]:\n return True\n else:\n return False", "def is_palindrome_v2(s):\n n = len(s)\n\n return s[:n/2] == reverse(s[n-n/2:])", "def is_palindrome(s):\n i, end = 0, len(s) // 2\n while i < end:\n if s[i] != s[len(s) - i - 1]:\n return False\n i += 1\n return True", "def is_palindrome(string):\r\n r_string = string[::-1]\r\n cnt = 0\r\n while cnt < len(string):\r\n if string[cnt] == r_string[cnt]:\r\n cnt += 1\r\n continue\r\n else:\r\n return False\r\n #cnt += 1\r\n return True", "def validPalindrome(self, s: str) -> bool:\n n = len(s)\n i = 0\n j = n - 1\n while i < j:\n if s[i] == s[j]:\n i += 1\n j -= 1\n else:\n # error, for -1, start > end. Indexing is like range\n # return s[i:j] == s[i:j:-1] or s[i+1:j+1] == s[i+1:j+1:-1]\n return self.is_palindrome(s[i:j]) or self.is_palindrome(s[i+1:j+1])\n\n return True", "def is_palindrome(str):\n n = len(str)\n for i in range(n//2): # loop to middle of string str\n if str[i] != str[-1-i]:\n return False # find a character doesn't match with its mirror-positioned character\n return True # reach this, then str is palindrome", "def isPalindrome(self, s: str) -> bool:\n if not s:\n return True\n # process the string\n s_processed = \"\"\n for i in s:\n if i.isalnum():\n s_processed += i.lower()\n # Check if palindrome\n return s_processed == s_processed[::-1] # String[::-1], O(n)", "def isPalindrome(self, s: str) -> bool:\n left, right = 0, len(s) - 1\n while left < right:\n while left < right and not s[left].isalnum(): # 注意while loop的嵌套\n left += 1\n while left < right and not s[right].isalnum():\n right -= 1\n if s[left].lower() != s[right].lower():\n return False\n left += 1\n right -= 1\n return True", "def is_palindrome(some_string):\n\n if some_string == \"\":\n return True\n elif len(some_string) == 1:\n return True\n elif some_string[0] != some_string[-1]:\n return False\n else:\n return is_palindrome(some_string[1:-1])", "def is_palindrome(number):\r\n str_input = str(number)\r\n return str_input == reversed(str_input)", "def is_palindrome(string):\n\n # Base case: If the string is one character long or fewer, it's a palindrome.\n if len(string) <= 1:\n return True\n \n # Base case: If the first and last characters in the string don't match, it isn't a palindrome.\n if string[0] != string[-1]:\n return False\n \n # At this point, the first and last character in the string are identical.\n # Recursive case: Check if the string remaining after removing the first and last character\n # is a palindrome. If so, this string is a palindrome too.\n return is_palindrome(string[1:-1])", "def ispalindrome(string):\n if isinstance(string, (str, int, float)):\n string = str(string).replace(\" \", \"\").lower()\n if len(string) in [0,1]:\n return True\n half_index = len(string) // 2\n first = string[:half_index]\n second = string[-half_index:]\n if first == second[::-1]:\n return True\n else:\n return False\n else:\n print(\"ERROR: ispalindrome received invalid input.\\nREASON: input\",\n \"must be convertible to string format.\")", "def is_palindrome(s):\n if isinstance(s, str):\n if s == s[::-1]:\n return True\n else:\n return False\n if isinstance(s, int):\n if str(s) == str(s)[::-1]:\n return True\n else:\n return False\n else:\n raise AssertionError(\"Please enter str\")", "def is_palindrome(n):\n ns = str(n)\n for i in range(0, len(ns) // 2):\n if ns[i] != ns[len(ns) - 1 - i]: return False\n return True", "def is_palindrome(word):\n word = word.replace(\" \", \"\")\n new_word = word.lower()\n\n return new_word == new_word[::-1]", "def is_palindrome(n):\n return(n == reverse(n))", "def is_palendrome(my_string):\n characters = []\n for char in my_string:\n characters.append(char)\n characters.reverse()\n reversed_string = \"\".join(characters)\n if my_string == reversed_string:\n return True\n else:\n return False", "def _is_palindrome(input_str, start, end):\n if start >= end:\n return True\n if input_str[start] != input_str[end]:\n return False\n else:\n return _is_palindrome(input_str, start + 1, end - 1)", "def is_palindrome(s):\n\n def to_chars(s):\n s = s.lower()\n letters = ''\n for char in s:\n if char in 'abcdefghijklmnopqrstuvwxyz':\n letters += char\n return letters\n\n def is_pal(s):\n if len(s) <= 1:\n return True\n else:\n return s[0] == s[-1] and is_pal(s[1:-1])\n\n return is_pal(to_chars(s))", "def isPalindrome(word):\n\n input_str = IGNORE_NON_ALPHA_CHARACTER.sub(\"\", str(word)).casefold()\n return input_str == input_str[::-1]", "def isPalindrome(n):\n str_n = str(n)\n return list(str_n) == list(reversed(str_n))", "def is_palindrome(word):\n word = validator(word)\n\n if word == \"\":\n return True\n\n elif word[0] == word[-1]:\n return is_palindrome(word[1:-1])\n\n return False", "def is_palindrome(num):\n\treversed_num = str(num)[::-1]\n\tif reversed_num == str(num): return True\n\telse: return False", "def is_palindrome_v3(s):\n i = 0\n j = len(s)-1\n\n while i < j and s[i] == s[j]:\n i = i + 1\n j = j -1\n\n return j <= i", "def is_palindrome(the_string: str) -> bool:\n s = the_string.lower()\n # left pointer\n i = 0\n # right pointer\n j = len(s)-1\n # before the left pointer and right pointer meets\n while i < j:\n # if meet white space, the left pointer will move forward without doing anything.\n while i < j and s[i] == \" \":\n i += 1\n # if meet white space, the right pointer will move forward without doing anything.\n while i < j and s[j] == \" \":\n j -= 1\n # test if the char at left pointer equals with char at the right pointer , if yes,\n # left and right pointer move forward to test next pair of chars\n if s[i] == s[j]:\n i += 1\n j -= 1\n # if not, the string is not a palindrome, return False\n else:\n return False\n else:\n return True", "def is_palindrome(phrase):\n # remove spaces\n phrase = phrase.replace(' ', '').lower()\n\n # reverse phrase\n ans = ''\n for index in range(len(phrase)-1, -1, -1):\n ans += phrase[index]\n\n return True if ans == phrase else False", "def is_palindrome(word):\n\t# The degenerate case is 1 letter or none. We consider these palindromes\n\tif len(word) <= 1:\n\t\treturn True\n\n\t# For something to be a palindrome, the first and last letters much match\n\t# and the string between them must also be a palindrome\n\tif first(word) == last(word) and is_palindrome(middle(word)):\n\t\treturn True\n\telse:\n\t\treturn False", "def is_palindrome(word):\n if len(word)>2 and not is_palindrome(middle(word)):\n return False\n else:\n return first(word) == last(word)", "def is_palindrome(num_in):\n if str(num_in) == str(num_in)[::-1]:\n return True\n\n return False", "def is_palindrome(x):\n strx = str(x)\n return strx == strx[::-1]\n # chars = [c for c in x] if not is_number(x) else digits(x)\n # for i in range(len(chars) // 2):\n # if chars[i] != chars[len(chars) - i - 1]:\n # return False\n # return True", "def isPalindromes(s):\n\n def toChar(s):\n s= s.lower()\n letters=''\n for c in s:\n if c in \"abcdefgh\":\n letters= letters+c\n return letters\n\n def isPal(s):\n if len(s) <=1:\n return True\n else:\n return s[0]==s[-1] and isPal(s[1:-1])\n return isPal(toChar(s))", "def __isPalindrome(self, string: str) -> bool:\r\n logger.debug('Checking substring {} for palindrome...'.format(string))\r\n palindrome = False\r\n if string:\r\n palindrome = True\r\n midPoint = int(len(string)/2)\r\n logger.debug('midpoint: {}'.format(midPoint))\r\n for index in range(midPoint+1):\r\n respectiveRightIndex = (len(string) - index - 1)\r\n logger.debug('index: {}, respectiveRightIndex: {}'.format(index, respectiveRightIndex))\r\n if index < respectiveRightIndex:\r\n if respectiveRightIndex < 0:\r\n respectiveRightIndex = 0\r\n if string[index] != string[respectiveRightIndex]:\r\n palindrome = False\r\n\r\n logger.debug('{} is {}'.format(string, 'palindrome' if palindrome else 'not palindrome'))\r\n return palindrome", "def is_palindromic(phrase):\n\n val = str(phrase).lower().replace(\" \", \"\")\n if val == val[::-1]: # Reverse order\n return True\n else:\n return False", "def is_palindrome(sentence: str) -> bool:\n\n words = extract_words(sentence)\n middle = floor(len(words)/2)\n\n for index in range(middle):\n if words[index].lower() != words[len(words)-index-1].lower():\n return False\n\n return True", "def part3(string):\n palindrome = True\n for i in range(0, int(len(string)/2) + 1):\n if(string[i] != string[int(len(string))-i-1]):\n palindrome = False\n print(palindrome)", "def is_palindrome(s):\n s_copy = s.replace(\" \", \"\")\n n = len(s_copy)\n for i in range(n // 2):\n left = s_copy[i]\n right = s_copy[n - 1 - i]\n if left.upper() != right.upper():\n return False\n return True", "def palindrome_check(num):\n num= str(num)\n len_num= len(num)\n for i in range(len_num/2):\n if num[i] == num[len_num-i-1]:\n ans= True\n else:\n ans= False\n break\n return ans", "def isPalindrome(S):\n if len(S) == 0:\n return True\n \n if S[0] != S[-1]:\n return False\n \n return isPalindrome(S[1:len(S) - 1])", "def is_palindrome(text):\n # implement is_palindrome_iterative and is_palindrome_recursive below, then\n # change this to call your implementation to verify it passes all tests\n assert isinstance(text, str), 'input is not a string: {}'.format(text)\n # return is_palindrome_iterative(text)\n return is_palindrome_recursive(text)", "def is_palindrome(input_string):\n\n def _is_palindrome(input_str, start, end):\n \"\"\"Returns whether a string is a palindrome or not\n using recursion. Assumes string is not empty.\n \"\"\"\n if start >= end:\n return True\n if input_str[start] != input_str[end]:\n return False\n else:\n return _is_palindrome(input_str, start + 1, end - 1)\n\n # remove spaces and special characters and convert to lower case\n input_string = ''.join(ch for ch in input_string if ch.isalnum()).lower()\n\n return _is_palindrome(input_string, 0, len(input_string) - 1)", "def isPalindrome(x):\n # Write the functionality:\n\n if x == str(x)[::-1]:\n return True\n elif x==121:\n return True\n else:\n return False", "def is_palindrome(text):\n # implement is_palindrome_iterative and is_palindrome_recursive below, then\n # change this to call your implementation to verify it passes all tests\n assert isinstance(text, str)\n # return is_palindrome_iterative(text)\n return is_palindrome_recursive(text)", "def is_palindrome(text):\n # implement is_palindrome_iterative and is_palindrome_recursive below, then\n # change this to call your implementation to verify it passes all tests\n assert isinstance(text, str), 'input is not a string: {}'.format(text)\n #return is_palindrome_iterative(text)\n return is_palindrome_recursive(text)", "def is_palindrome(text):\n\n assert isinstance(text, str), 'input is not a string: {}'.format(text)\n # return is_palindrome_iterative(text)\n return is_palindrome_recursive(text)", "def is_palindrome(word):\n if first(word) != last(word):\n print 'not a palindrome'\n return False\n else:\n return is_palindrome(middle(word))", "def is_palindrome(sub):\n for i in range(len(sub)):\n if sub[i] != sub[len(sub) - i - 1]:\n return False\n return True", "def part3():\n Input = raw_input('please enter a string:')\n for i in range(len(Input)):\n if Input[i] != Input[len(Input)-i-1]:\n print('It is not a palidrome')\n break\n else:\n i = i + 1", "def is_palindrome(n):\n num = list(str(n))\n h1 = num[:int(len(num)/2)] # first half of palindrome\n if len(num) % 2 == 0:\n h2 = num[int(len(num)/2):] # second half of palindrome\n else:\n h2 = num[int(len(num)/2) + 1:]\n return h1 == list(reversed(h2))", "def palindromeString(str, first = 0, end = -1):\n assert len(str) > 0, \"String Must be hava one character at least.\"\n\n # first bigger or equal then length's of string then yes\n if first >= len(str):\n return True\n # Compare first with end character recursively.\n if str[first] == str[end]:\n return palindromeString(str, first + 1, end - 1)\n else :\n return False", "def is_palindrome(num):\n str_num = str(num)\n\n if len(str_num) == 1:\n return True\n elif len(str_num) == 2:\n return str_num[0] == str_num[1]\n\n if str_num[0] == str_num[len(str_num)-1]:\n return is_palindrome(str_num[1:len(str_num)-1])\n else:\n return False", "def is_palindromic(n: int) -> bool:\n str_n = str(n)\n if str_n == str_n[::-1]:\n return True\n return False", "def check_palindrome():\n # Your code goes here...", "def has_palindrome(i, start, length):\r\n s = str(i)[start:start+length]\r\n return s[::-1] == s", "def check_is_palindrome(mer1, mer2):\n return mer1.find(mer2[::-1]) == 0", "def palCheck(input_string):\n\n # ADD NECESSARY LINES OF CODE SO THAT ALL UNITTESTS PASS\n\n d = Deque()\n for char in input_string:\n d.addFront(char)\n\n while d.size() > 1:\n firstChar = d.removeRear()\n lastChar = d.removeFront()\n if firstChar != lastChar:\n print(\"No, '\" + input_string + \"', is not a palindrom\")\n return False\n\n print(\"Yes, '\" + input_string + \"', is a palindrom!!\")\n return True", "def check_palindrome(string):\n queue = Deque() # creating object of Deque class\n\n for ch in string:\n queue.insert_rear(ch) # adds all the characters from back\n\n is_equal = True\n\n while queue.size() > 1 and is_equal:\n first = queue.remove_front() # removes the characters from the front of the queue\n last = queue.remove_rear() # removes the characters from the rear of the queue\n if first != last:\n is_equal = False\n\n return is_equal", "def isPalindromic(n: int):\n return str(n) == str(n)[::-1]", "def palindrome(x):\n pass", "def is_palindrome2(word):\n\n word = word.lower()\n\n if not word.isalnum():\n word = ''.join(character for character in word if character.isalnum())\n\n if word == \"\":\n return True\n\n while len(word) > 1:\n\n if word[0] == word[-1]:\n word = word[1:-1]\n\n else:\n return False\n\n return True", "def palindrome_integer(string):\n integers = string.split(\" \")\n for integer in integers:\n reverse = str(int)[::-1]\n '''Check if palindrome not then return False'''\n if integer != reverse:\n return False\n return True", "def is_palindrome(element):\n if int(element) < 0:\n return False\n if element == element[::-1]:\n return True\n else:\n return False", "def is_palindrome(text):\n\n # Property of a palindrome:\n # There be a maximum of only one letter that sums to an odd number\n \n char_count = {}\n # edge cases\n # Consider empty text as palindrome\n \n for char in text:\n if char in char_count:\n char_count[char] += 1\n else:\n char_count[char] = 1\n \n odd_count = 0\n for count in char_count.values():\n if count % 2 == 1:\n odd_count += 1\n if odd_count > 1:\n return False\n \n return True", "def is_permutation_palindrome(str):\n for s in permutations(str): # loop through all permutations of str\n if is_palindrome(s):\n return True # successfully find a palindrome permutation\n return False # reach this, then no possible permutation is palindrome", "def is_palindrome(n):\n d = digits(n)\n r = int(\"\".join([str(i) for i in d]))\n return n == r", "def is_palindrome(n):\n # store locally\n temp = n\n rev = 0\n while n > 0:\n # get digit one by one\n digit = n % 10\n # find reverse number\n rev = rev * 10 + digit\n # divide the number\n n = n // 10\n return temp == rev", "def is_palindrome3(word):\n\n i = 0\n j = -1\n\n word = word.lower()\n\n if not word.isalnum():\n word = ''.join(character for character in word if character.isalnum())\n\n if word == \"\":\n return True\n\n while len(word) > 1:\n\n if word[i] == word[j]:\n\n i += 1\n j += 1\n\n else:\n return False\n\n return True", "def is_palindrome(x):\n \n # Assume negative number is not a palindromic number.\n if x < 0:\n return False\n\n ten_base = 1\n \n # Determine num size with a base of 10\n while x / ten_base >= 10:\n ten_base *= 10\n\n while x > 0:\n left_num, right_num = x / ten_base, x % 10\n if left_num != right_num:\n return False\n \n # Update and prep for next iteration.\n x = (x % ten_base) / 10\n ten_base /= 100\n\n return True", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def is_palindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] != v[len(v)-i-1]:\n return False\n return True", "def is_pal(string):\n a = reversed(list(string))\n if list(a) == list(string):\n return True\n else:\n return False", "def is_palindrome(a):\n\tmax = a\n\tmin = 0\n\twhile max > 0:\n\t\tmin = (min * 10 + max % 10)\n\t\tmax /= 10\n\treturn min == a", "def is_palindrome(number_):\n temp = number_\n reverse = 0\n while number_ > 0:\n digit = number_ % 10\n reverse = reverse * 10 + digit\n number_ = number_ // 10\n if temp == reverse:\n return True\n else:\n return False", "def is_palindrome_ingoring_case_and_non_letter_chars(text):", "def is_palindrome_permutation(string):\n\n letter_to_count = dict()\n\n for letter in string:\n letter_to_count[letter] = letter_to_count.get(letter, 0) + 1\n\n residual = 0\n for count in letter_to_count.values():\n residual += count % 2\n\n # there are can be a single letter with an odd character count when the palindrome is of odd length\n return residual <= 1", "def check_pal(s):\n counts = df(int)\n len_without_spaces = 0\n # Count all nonspaces\n for c in s:\n if c != ' ':\n counts[c.lower()] += 1\n len_without_spaces += 1\n # Now find out how many chars occur an odd number of times\n odd_chars = 0\n for c in counts:\n if counts[c] % 2 != 0:\n odd_chars += 1\n # If string length is even there must be no odd counts\n if len_without_spaces % 2 == 0 and odd_chars == 0:\n return True\n # If string is odd there must be exactly one odd count\n if len_without_spaces % 2 != 0 and odd_chars == 1:\n return True\n # Else, it's not a palindrome\n return False", "def check_palindrome(word):\r\n char_count = {} #char count hash\r\n for char in word:\r\n if char in char_count:\r\n char_count[char] += 1\r\n else:\r\n char_count[char] = 1\r\n odd_count = 0 #counting number of odd nos encountered\r\n for count in char_count.values():\r\n if count % 2 != 0:\r\n odd_count += 1\r\n len_word = len(word)\r\n if len_word % 2 == 0:\r\n if odd_count >= 1:\r\n return False\r\n else:\r\n if odd_count > 1:\r\n return False\r\n return True", "def isPalendrome(number):\n\t\n\tnum = str(number)\n\ti \t= 0\n\tj \t= len(num) - 1\n\tmid = len(num) // 2\n\n\t#print(mid)\n\t\n\t# While i and j are not in the middle\n\twhile( i != mid):\n\t\t#print(i,j,sep=\"\\t\")\n\t\t#print(num[i],num[j], sep=\"\\t\")\n\t\tif(num[i] != num[j]):\n\t\t\treturn(False)\n\t\telse:\n\t\t\ti = i + 1\n\t\t\tj = j - 1\n\n\treturn(True)", "def check_pal(num):\r\n \r\n num = str(num) #Convert number to string.\r\n \r\n #If a number is a palindrome, rreturn True \r\n if num[0] == num[len(num)-1] and len(num) <= 3:\r\n return True\r\n \r\n #If the first and last digits of a number are equal when its length is > 3,\r\n #strip the end digits away analyse the resulting number.\r\n elif num[0] == num[len(num)-1]:\r\n return check_pal(num[1:len(num)-1])\r\n \r\n #If a number is not a palindrome, return False\r\n else:\r\n return False", "def check_palindrome(number):\r\n \r\n number = str(number) #Converting a number to a string.\r\n \r\n #If the number is a palindrome then it will return True \r\n if number[0] ==number[len(number)-1] and len(number) <= 3:\r\n return True\r\n #If the first and last digits of a number are equal when its length is > 3,\r\n #strip the end digits away analyse the resulting number.\r\n elif number[0] == number[len(number)-1]:\r\n return check_palindrome(number[1:len(number)-1])\r\n \r\n #If a number is not a palindrome, return False\r\n else:\r\n return False" ]
[ "0.89631575", "0.88805836", "0.8844723", "0.87995744", "0.8799044", "0.8768949", "0.8720113", "0.87144065", "0.86752564", "0.8644568", "0.8638425", "0.8585656", "0.85773665", "0.8561977", "0.8518276", "0.8494618", "0.84669435", "0.84236455", "0.8422641", "0.8409156", "0.84011394", "0.8390576", "0.8371396", "0.8355742", "0.82834005", "0.8274095", "0.8273739", "0.8272204", "0.8081084", "0.80704457", "0.8064851", "0.8063844", "0.8058595", "0.8043144", "0.79827374", "0.7980964", "0.7979319", "0.7975098", "0.7960228", "0.7940002", "0.7938119", "0.79214334", "0.7909743", "0.7906723", "0.7901615", "0.78931856", "0.78438175", "0.7835123", "0.783355", "0.7828202", "0.7804346", "0.77995026", "0.7791356", "0.77872926", "0.77756643", "0.7766099", "0.7754807", "0.7745631", "0.77369183", "0.77149737", "0.7706344", "0.7692275", "0.7688277", "0.7673719", "0.76619434", "0.7647283", "0.76294285", "0.7622036", "0.76143515", "0.76095855", "0.7601722", "0.7593796", "0.7592166", "0.7572514", "0.75442046", "0.75347054", "0.75129443", "0.75058156", "0.74975246", "0.7494968", "0.7470981", "0.7449977", "0.7440611", "0.73518264", "0.73384535", "0.7237789", "0.7214523", "0.7205062", "0.71334994", "0.7130315", "0.7111807", "0.7109981", "0.70883447", "0.7075817", "0.7073039", "0.70699066", "0.70362085", "0.6992321", "0.69679034", "0.6965996" ]
0.84398353
17
Find all pairs of unique indices which form a palindrome.
def palindromePairs(lst): results = [] for i, e1 in enumerate(lst): for j, e2 in enumerate(lst): if i != j: if isPalindrome(e1+e2): results.append((i, j)) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindromePairs(self, words: List[str]) -> List[List[int]]:\n d = {w : i for i, w in enumerate(words)}\n \n res = []\n for idx, word in enumerate(words):\n for i in range(len(word)+1):\n str1 = word[:i]\n str2 = word[i:]\n # first part should be palindrome, second part (reverse) should be in w\n if str1 == str1[::-1]:\n back = str2[::-1]\n if back in d and back != word:\n res.append([d[str2[::-1]], idx])\n # second part should be palindrome, first part (reverse) should be in w\n if str2 and str2 == str2[::-1]: # if the last part is empty, it is calculated before \n back = str1[::-1]\n if back in d and back != word: \n res.append([idx, d[str1[::-1]]])\n # print(res)\n return res", "def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n", "def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal", "def find_palindromes(self):\n\t\tself.square_palindromes = [x for x in self.squares if self.is_palindrome(x)]", "def is_palindrome_v3(s):\n i = 0\n j = len(s)-1\n\n while i < j and s[i] == s[j]:\n i = i + 1\n j = j -1\n\n return j <= i", "def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True", "def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True", "def palindromePermutation(s):\n char_count = {}\n for character in s:\n if character == ' ': continue # skip the spaces.\n char_count[character] = char_count.get(character, 0) + 1\n\n odd = False\n for key in char_count:\n if char_count[key] % 2 != 0:\n if odd:\n return False\n odd = True\n\n return True \n\n # Time Complexity: O(n)\n # Space Complexity: O(m), where m is the number of unique characters", "def palindromes(n: int) -> int:\n # 1 -> 2 -> 3 ... 9 -> 11 -> 22 -> 33 -> 44 .. 99 -> 101\n # 101 -> 111 -> 121 -> 131 -> ... -> 191 -> 202 -> 212\n # 989 -> 999 -> 1001 -> 1111 -> 1221\n # 9889 -> 9999 -> 10001 -> 10101 -> 10201\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]])) # join '12' with '21'\n else:\n pal = int(''.join([s, s[-2::-1]])) # join '12' with '1'\n if prev <= pal:\n yield pal\n \n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even: n //= 10\n s = str(n)", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def palindrome():\n c = 0\n d = ''\n e = 0\n f = 0\n g = 0\n for a in range(100, 1000):\n for b in range(100, 1000):\n c = a * b\n d = str(c)\n if d == d[::-1] and c > e:\n e = c\n f = a\n g = b\n return e", "def num_palindrome():\n nums = map(str, range(1000000))\n odo = []\n for i in range(len(nums)):\n if len(nums[i]) < 6:\n odo.append('0'*(6-len(nums[i])) + nums[i])\n elif len(nums[i]) == 6:\n odo.append(nums[i])\n \n for i in range(len(odo)-3): \n first = odo[i][2:] == odo[i][:1:-1]\n second = odo[i+1][1:] == odo[i+1][:0:-1]\n third = odo[i+2][1:5] == odo[i+2][4:0:-1]\n fourth = odo[i+3][:] == odo[i+3][::-1]\n if first & second & third & fourth:\n print 'A possible odometer reading is '+odo[i]", "def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]", "def repeated_palindrome(palindromes_list):\n # the list is ordered in the reversed form (long to short)\n ordered_palindrome = sorted(palindromes_list)\n longest_first = ordered_palindrome[::-1]\n # initialize a new list to receive unique plaindromes data\n pal_list = [longest_first[0]]\n # the longest palindrome cannot fit in any other sequence \n # iterates over the longest_first original palindromes\n # get the start and end positions \n for data in longest_first:\n start = data[1]\n end = start + data[0]\n # iterate through the pal_list and \n # compare the start and end of the potential and palindromes \n # to check if the potential palindrome is unique.\n unique_palindrome = None\n for dat in pal_list:\n start_unique = dat[1]\n end_unique = start_unique + dat[0]\n # statement should test to check if the test palindrome fits\n # inside any of the identified 'real/unique' palindromes.\n if start >= start_unique and end <= end_unique:\n # if the palindrome tested fits inside\n unique_palindrome = False\n break\n else:\n # other wise it is unique\n unique_palindrome = True\n if unique_palindrome:\n # check if if it is not in the list\n if data not in pal_list:\n pal_list += [data]\n return pal_list", "def get_palindromes(kmer_list):\n rev_kmers = [get_reverse_complement(kmer) for kmer in kmer_list]\n palindromes = set()\n for mer1, mer2 in zip(kmer_list, rev_kmers):\n if check_is_palindrome(mer1, mer2):\n palindromes.add(mer1)\n return palindromes", "def check_palindrome():", "def isPalindrome(string):\n for i in range(len(string)//2):\n if string[i] != string[(i*-1)-1]:\n return False\n return True", "def countPalindromicSubsequences(self, S):\n if not S:\n return 0\n\n ways = [[0] * len(S) for i in range(len(S))]\n\n # base cases: for subarray of length 1 and 2\n for i in range(len(S)):\n ways[i][i] = 1\n if i < len(S) - 1:\n ways[i][i+1] = 2\n\n for ll in range(3, len(S)+1):\n for i in range(len(S) - ll + 1):\n j = ll + i - 1\n if S[i] != S[j]:\n ways[i][j] = ways[i+1][j] + ways[i][j-1] - ways[i+1][j-1]\n else:\n l = i + 1\n while l < j and S[l] != S[i]:\n l += 1\n r = j - 1\n while r > i and S[r] != S[j]:\n r -= 1\n\n if l < r:\n ways[i][j] = 2 * ways[i+1][j-1] - ways[l+1][r-1]\n elif l == r :\n ways[i][j] = 2 * ways[i+1][j-1] + 1\n else:\n ways[i][j] = 2 * ways[i+1][j-1] + 2\n return ways[0][len(S)-1] % (10**9 + 7)", "def is_palindrome_v2(s):\n n = len(s)\n\n return s[:n/2] == reverse(s[n-n/2:])", "def palindrome_permutation(w):\n w = w.strip().replace(' ', '')\n chars = {}\n for c in w:\n try:\n chars[c] += 1\n except KeyError:\n chars[c] = 1\n\n if len(w) % 2 == 0:\n #Check if there is an even number\n #of every character in w.\n return all(x % 2 == 0 for x in chars.values()) \n else:\n #Check if there is an even number\n #of every character in w,\n #except for exactly one character.\n found_odd = False\n for c in chars:\n if chars[c] % 1 == 0:\n if not found_odd:\n found_odd = True\n else:\n return False\n \n if found_odd:\n return True\n else:\n return False", "def find_mismatching_pair(s):\n i = 0\n j = len(s) - 1\n while i < j and s[i] == s[j]:\n i += 1\n j -= 1\n return i, j", "def is_palindrome(s):\n i, end = 0, len(s) // 2\n while i < end:\n if s[i] != s[len(s) - i - 1]:\n return False\n i += 1\n return True", "def main():\n for l in range(999,890,-1):\n for r in range(999,890,-1):\n num= l*r\n ans= palindrome_check(num)\n if ans:\n print l,r,num\n return\n print l,r,num\n print \"No palindrome found.\"\n return", "def is_palindrome(n):\n ns = str(n)\n for i in range(0, len(ns) // 2):\n if ns[i] != ns[len(ns) - 1 - i]: return False\n return True", "def probl4():\n\n largest_palindrome = 0\n for i in xrange(101, 1000):\n for j in xrange(101, 1000):\n output = i * j\n if str(output) == str(output)[::-1] and \\\n output > largest_palindrome:\n largest_palindrome = output\n return largest_palindrome", "def is_palindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] != v[len(v)-i-1]:\n return False\n return True", "def palindrome_permutation(s):\n char_dict = {}\n for i in s:\n if i in char_dict:\n char_dict[i] += 1\n else:\n char_dict[i] = 1\n numOdd = 0\n for key in char_dict:\n if key != ' ':\n if char_dict[key] % 2 == 1:\n numOdd += 1\n if numOdd < 2:\n print_permutation(char_dict)\n return True\n else:\n return False", "def match(list_string):\n assert type(list_string)==list\n for i in list_string:\n assert type(i)==str\n assert i.isalpha()\n #Loops through all the possible substrings of the list of words to find the word pairs that are palindromes.\n my_match = []\n for i in range(0,len(list_string)):\n for j in range(0,len(list_string)):\n if i!=j:\n a = list_string[i]\n b = list_string[j]\n c = a+b\n d = b+a\n if c==c[::-1]:\n if (i,j) not in my_match:\n my_match.append((i,j))\n elif d==d[::-1]:\n if (j,i) not in my_match:\n my_match.append((j,i))\n return my_match", "def is_palindrome_permutation(string):\n\n letter_to_count = dict()\n\n for letter in string:\n letter_to_count[letter] = letter_to_count.get(letter, 0) + 1\n\n residual = 0\n for count in letter_to_count.values():\n residual += count % 2\n\n # there are can be a single letter with an odd character count when the palindrome is of odd length\n return residual <= 1", "def is_palindrome(sub):\n for i in range(len(sub)):\n if sub[i] != sub[len(sub) - i - 1]:\n return False\n return True", "def is_palindromic(lst):\n return all( lst[i] == lst[-(i+1)] for i in range(len(lst)) )", "def get_pairs_to_delete(cycle):\n\n pairs = []\n for i, (_, right) in enumerate(cycle):\n left = cycle[(i - 1) % len(cycle)][0]\n successors = right.prefs[right.prefs.index(left) + 1 :]\n for successor in successors:\n pair = (right, successor)\n if pair not in pairs and pair[::-1] not in pairs:\n pairs.append((right, successor))\n\n return pairs", "def is_list_palindrome(lst):\n n = len(lst)\n for i in range(n//2): # go until middle of list\n if lst[i] != lst[-1 - i]: # a pair of elements doesn't match\n return False\n return True", "def find_reverse_palindromes(dna: str, min_len: int=4, max_len: int=12, zero_based: bool=True):\n def helper_for_non_zero_based(indexes: List[Tuple[int, int]]):\n if not zero_based:\n return [(i + 1, l) for i, l in indexes]\n else:\n return indexes\n\n length = len(dna)\n result = []\n for i in range(length):\n for l in range(min(min_len, length - i), min(max_len + 1, length - i + 1)):\n if l > max_len or l < min_len:\n continue\n sub_dna = dna[i: i + l]\n if sub_dna == reverse_complement(sub_dna):\n result.append((i, l))\n return helper_for_non_zero_based(result)", "def palindroame_multimi(lista_1, lista_2):\n lungime = lungime_multimi(lista_1, lista_2)\n lista_palindroame = []\n for i in range(0, lungime):\n element = str(lista_1[i]) + str(lista_2[i])\n if is_palindrome(element):\n lista_palindroame.append(int(element))\n return lista_palindroame", "def palindrome_check(num):\n num= str(num)\n len_num= len(num)\n for i in range(len_num/2):\n if num[i] == num[len_num-i-1]:\n ans= True\n else:\n ans= False\n break\n return ans", "def is_palindrome(n):\n num = list(str(n))\n h1 = num[:int(len(num)/2)] # first half of palindrome\n if len(num) % 2 == 0:\n h2 = num[int(len(num)/2):] # second half of palindrome\n else:\n h2 = num[int(len(num)/2) + 1:]\n return h1 == list(reversed(h2))", "def part3(string):\n palindrome = True\n for i in range(0, int(len(string)/2) + 1):\n if(string[i] != string[int(len(string))-i-1]):\n palindrome = False\n print(palindrome)", "def _get_double_base_palindromes(threshold: int) -> Iterable[int]:\n for number in range(threshold):\n if is_palindromic_number(number) and is_palindromic_number(decimal_to_binary(number)):\n yield number", "def check(i):\r\n return (has_palindrome(i, 2, 4) and\r\n has_palindrome(i+1, 1, 5) and\r\n has_palindrome(i+2, 1, 4) and\r\n has_palindrome(i+3, 0, 6))", "def palindrome(x):\n pass", "def are_reversed(i, j):\r\n return str_fill(i, 2) == str_fill(j, 2)[::-1]", "def is_palindrome(string):\n k, mid = len(string), len(string) // 2\n # checking even palindromes\n if k % 2 == 0:\n return string[:mid] == get_reverse_complement(string[mid:])\n # checking odd palindromes\n else:\n return string[:mid] == get_reverse_complement(string[mid + 1:])", "def find_longest_palindromic_string(text):\n n = len(text)\n start = 0\n max_len = 1\n matrix = [[False for _ in range(n)] for _ in range(n)]\n # all palindrome of length 1\n for i in range(n):\n matrix[i][i] = True\n # check palindrome of length 2\n for i in range(n-1):\n if text[i] == text[i + 1]:\n matrix[i][i + 1] = True\n start = i\n max_len = 2\n # check palindrome of length 3 or more\n for length in range(3, n):\n for i in range(n-length+1):\n j = i + length - 1\n if text[i] == text[j] and matrix[i+1][j-1]:\n matrix[i][j] = True\n start = i\n max_len = length\n return text[start: start + max_len]", "def is_permutation_palindrome(str):\n for s in permutations(str): # loop through all permutations of str\n if is_palindrome(s):\n return True # successfully find a palindrome permutation\n return False # reach this, then no possible permutation is palindrome", "def find_pairs(words): \n pass", "def part3():\n Input = raw_input('please enter a string:')\n for i in range(len(Input)):\n if Input[i] != Input[len(Input)-i-1]:\n print('It is not a palidrome')\n break\n else:\n i = i + 1", "def is_palindrome_permutation(input_string):\n input_string = input_string.lower()\n input_string = ''.join(input_string.split())\n\n number_chars = {}\n number_even_chars = 0\n\n for char in input_string:\n if char in number_chars:\n number_chars[char] += 1\n else:\n number_chars[char] = 1\n\n for char in number_chars:\n if number_chars[char] % 2 != 0:\n number_even_chars += 1\n if number_even_chars >= 2:\n return False\n\n return True", "def check_palindrome(s):\n return s[::-1] == s", "def permutation(string):\n i = 0\n j = len(string) - 1\n while i < j:\n if string[i] != string[j]:\n return False\n i += 1\n j -= 1\n return True", "def check_palindrome_using_reverse(self):\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res", "def palindrome(sll):\n\n node = sll.head\n counter = 1\n half_len = ceil(len(sll) / 2)\n\n while node is not None:\n if counter >= half_len:\n break\n elif node.data != k_to_last(sll, counter):\n return False\n else:\n counter += 1\n node = node.next\n return True", "def get_shortest_palindrome(text):\n strlen = len(text)\n unique_chars = len(set(text))\n print(set(text))\n if unique_chars == strlen:\n return (\"\".join(list(reversed(text[1:])))+text)\n if text==\"\" or strlen==1 or unique_chars==1:\n return text\n if is_palindrome(text):\n return text\n if strlen//unique_chars > 100:\n d = {}\n for char in set(text):\n \n left_pad = []\n #print(strlen)\n i = strlen-1\n while(i!=0):\n left_pad.append(text[i])\n #print(left_pad)\n #print(\"text[:i-1]: \",text[:i],i)\n if is_palindrome(text[:i]):\n # print(\"\".join(left_pad)+text)\n return (\"\".join(left_pad)+text)\n i = i -1", "def is_antipalindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] == v[len(v)-i-1]:\n return False\n return True", "def answer(codes):\n s = set()\n num_distinct_codes = 0\n for code in codes:\n if code in s:\n continue\n elif is_palindrome(code):\n s.add(code)\n else:\n s.add(code)\n s.add(code[::-1])\n num_distinct_codes += 1\n return num_distinct_codes", "def is_palindrome(n):\n x, y = n, 0\n f = lambda: 10 * y + x % 10\n while x > 0:\n x, y = x // 10, f()\n return y == n", "def is_palindrome_v1(s):\n return reverse(s) == s", "def is_palindrome(n):\n return(n == reverse(n))", "def is_palindrome(str):\n n = len(str)\n for i in range(n//2): # loop to middle of string str\n if str[i] != str[-1-i]:\n return False # find a character doesn't match with its mirror-positioned character\n return True # reach this, then str is palindrome", "def is_palindrome(num):\n digitList = int2list(num)\n \n i = 0\n while i <= round(len(digitList)/2):\n if digitList[i] != digitList[-(i+1)]:\n return False\n i += 1\n return True", "def double_base_palindrome(limit = 1000000):\n\t(palindrome_sum, i) = (0, 1)\n\tfor i in xrange(1, limit):\n\t\tif is_palindrome(i) and is_palindrome(int(decimal_binary(i))):\n\t\t\tpalindrome_sum += i\n\n\treturn palindrome_sum", "def is_palindrome(s):\n return s == s[::-1]", "def get_longest_palindrome(v,s):\n m,j = max( (x,i) for i,x in enumerate(v) )\n start = j//2 - m//2\n return s[start:start+m]", "def is_palindrome(n):\n x, y = n, 0\n f = lambda: y * 10 + x % 10\n while x > 0:\n x, y = x // 10, f()\n return y == n", "def test_palendrome_long_list_true():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('c')\n test_ll.push('d')\n test_ll.push('c')\n test_ll.push('b')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is True", "def largest_palindrome(num):\n rangeMax = 10 ** num - 1\n rangeMin = 1\n maxPal = 1\n i = rangeMax\n\n while i >= rangeMin:\n for j in reversed(range(rangeMin,i+1)):\n if is_palindrome(i * j) and i * j > maxPal:\n maxPal = i * j\n rangeMax = i \n rangeMin = j\n i -= 1\n break\n if j == rangeMin:\n i -= 1\n return [maxPal, rangeMin, rangeMax]", "def longestPalindromeSubseq(self, s: str) -> int:\n n = len(s)\n dp = [[1] * n for _ in range(n)]\n for length in range(1, n + 1):\n for i in range(n - length + 1):\n j = i + length - 1\n print(i, j)\n if length == 1:\n dp[i][j] = 1\n elif s[i] == s[j]:\n dp[i][j] = dp[i + 1][j - 1] + 2\n else:\n dp[i][j] = max(dp[i][j - 1], dp[i + 1][j])\n return dp[0][n - 1]", "def find_reversed(word_list):\n reversed_list = []\n word_set = set(word_list)\n for word in word_list:\n if word[::-1] in word_set and not check_palindrome(word):\n reversed_list.append(word)\n return reversed_list", "def is_palindrome(n):\n d = digits(n)\n r = int(\"\".join([str(i) for i in d]))\n return n == r", "def primePalindrome(self, N: int) -> int:\n for p in palindromes(N):\n if isPrime(p):\n return p", "def lab10_q4():\n return \"\"\"\n Use 'for i in range(len(seq)//2):' to give i be the indexing for the first half of the lst.\n Then make sure it is equal to the opposite indexing which is [-i-1] or [len(seq)-1-i]\n\tif it is not equal return false\n\tif the for loop is done without fail it means they are all equal so return True\n \"\"\"", "def isPalindromic(n: int):\n return str(n) == str(n)[::-1]", "def find_palindromes(word_list):\n palindrome_list = []\n\n for word in word_list:\n if check_palindrome(word):\n palindrome_list.append(word)\n\n return palindrome_list", "def palindrome(self):\n vas = []\n file = self.read1()\n print(file[0])\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n s_ii = s_i[::-1]\n if s_ii == s_i and s_i!= \"\":\n vas.append(s_i)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas", "def isPalendrome(number):\n\t\n\tnum = str(number)\n\ti \t= 0\n\tj \t= len(num) - 1\n\tmid = len(num) // 2\n\n\t#print(mid)\n\t\n\t# While i and j are not in the middle\n\twhile( i != mid):\n\t\t#print(i,j,sep=\"\\t\")\n\t\t#print(num[i],num[j], sep=\"\\t\")\n\t\tif(num[i] != num[j]):\n\t\t\treturn(False)\n\t\telse:\n\t\t\ti = i + 1\n\t\t\tj = j - 1\n\n\treturn(True)", "def fairAndSquareList(n):\n result = []\n for i in range(n+1):\n li = list(str(i))\n e = int(''.join(li + list(reversed(li))))\n if e <= n:\n if (isSquareOfPalindrome(e)): result.append(e)\n o = int(''.join(li + list(reversed(li))[1:]))\n if o <= n:\n if (o != e and isSquareOfPalindrome(o)): result.append(o)\n else:\n break\n return result", "def pairs_upto(n):\n return ((a, b)\n for a in range(1, n)\n for b in range(1, n)\n if a <= b)", "def pairwise(s):\n return [(s[i - 1], s[i]) for i in range(1, len(s))]", "def calc_syndrome(codeword, n):\r\n sym = 0\r\n for i in range(1, n):\r\n if codeword[i]:\r\n sym ^= i\r\n extra_parity = calc_parity_vector(codeword)\r\n if extra_parity == codeword[0]:\r\n if sym == 0:\r\n return 0, sym\r\n else:\r\n return 2, sym\r\n else:\r\n if sym >= n:\r\n pass\r\n else:\r\n codeword[sym] ^= 1\r\n return 1, sym", "def two_pairs(pword):\n\n last = ''\n count = 1\n counts = []\n for char in pword:\n if char == last:\n char_and_count = counts.pop()\n count = char_and_count.pop()\n updated_count = count + 1\n char_and_count.append(updated_count)\n counts.append(char_and_count)\n elif char != last:\n counts.append([char, count])\n last = char\n count = 1\n\n distinct_pairs = set()\n for char_and_count in counts:\n if char_and_count[1] >= 2:\n distinct_pairs.update(char_and_count[0])\n if len(distinct_pairs) >= 2:\n return True\n return False", "def _entangled_qubit_pairing(self, qubits: Sequence[int], \n *args, **kwargs) -> List[Tuple[int]]:\n pairing_indices = [(qubits[i], qubits[i+1]) for i in range(len(qubits)-1)]\n \n return pairing_indices", "def palindrome(word):\n reverse = word[::-1]\n return word == reverse", "def is_palindrome(text):\n\n # Property of a palindrome:\n # There be a maximum of only one letter that sums to an odd number\n \n char_count = {}\n # edge cases\n # Consider empty text as palindrome\n \n for char in text:\n if char in char_count:\n char_count[char] += 1\n else:\n char_count[char] = 1\n \n odd_count = 0\n for count in char_count.values():\n if count % 2 == 1:\n odd_count += 1\n if odd_count > 1:\n return False\n \n return True", "def checkPalindrome(self, s: str) -> str:\n # return s[:len(s) // 2] == s[(len(s) + 1) // 2::-1]\n return s == s[::-1]", "def is_palindrome(s):\n s_copy = s.replace(\" \", \"\")\n n = len(s_copy)\n for i in range(n // 2):\n left = s_copy[i]\n right = s_copy[n - 1 - i]\n if left.upper() != right.upper():\n return False\n return True", "def closest_palindrome_number(number):\n\n def check_all_9(number):\n\n for n in number:\n if n != 9:\n return False\n return True\n\n num_list = [int(i) for i in str(number)]\n\n num_size = len(num_list)\n\n if check_all_9(num_list):\n return number + 2\n\n mid_point = int(num_size/2)\n\n def list_to_int(nums): return int(''.join(str(i) for i in nums))\n\n def check_palindromes(all_palindromes, number):\n min_found = sys.maxsize\n pal_found = 0\n multiple_pals = []\n\n for i in all_palindromes:\n pal = list_to_int(i)\n distance = abs(number - pal)\n if distance <= min_found and distance != 0:\n if distance == min_found:\n multiple_pals.append(i)\n else:\n multiple_pals = []\n min_found = distance\n pal_found = i\n multiple_pals.append(i)\n\n if len(multiple_pals) == 1:\n return list_to_int(pal_found)\n else:\n numbers = []\n for i in multiple_pals:\n number = list_to_int(i)\n numbers.append(number)\n return numbers\n\n if num_size % 2 == 0:\n\n # Even number\n splitted = num_list[0: mid_point]\n mirrored = splitted + splitted[::-1]\n\n all_palindromes = []\n all_palindromes.append(mirrored)\n\n if splitted[-1] != 9:\n split_add_one = list(splitted)\n split_add_one[-1] += 1\n split_add_one = all_palindromes.append(\n split_add_one + split_add_one[::-1])\n\n if splitted[-1] != 0:\n split_sub_one = list(splitted)\n split_sub_one[-1] -= 1\n split_sub_one = all_palindromes.append(\n split_sub_one + split_sub_one[::-1])\n\n else:\n # Odd number\n splitted = num_list[0: mid_point]\n middle_num = num_list[mid_point]\n\n all_palindromes = []\n all_palindromes.append(\n splitted + [middle_num] + splitted[::-1])\n\n if middle_num != 9:\n all_palindromes.append(\n splitted + [middle_num + 1] + splitted[::-1])\n\n if middle_num != 0:\n all_palindromes.append(\n splitted + [middle_num - 1] + splitted[::-1])\n\n return check_palindromes(all_palindromes, number)", "def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst", "def pairwise_equalities(things):\n\n return np.array([things[i] == things[j] for i in range(len(things))\n for j in range(i + 1, len(things))])", "def check_palindrome(word):\r\n char_count = {} #char count hash\r\n for char in word:\r\n if char in char_count:\r\n char_count[char] += 1\r\n else:\r\n char_count[char] = 1\r\n odd_count = 0 #counting number of odd nos encountered\r\n for count in char_count.values():\r\n if count % 2 != 0:\r\n odd_count += 1\r\n len_word = len(word)\r\n if len_word % 2 == 0:\r\n if odd_count >= 1:\r\n return False\r\n else:\r\n if odd_count > 1:\r\n return False\r\n return True", "def is_palindrome(s):\n rev_str = \"\"\n for let in range(1, len(s) + 1):\n rev_str += s[-let]\n return rev_str == s", "def isPalindrome(n):\n str_n = str(n)\n return list(str_n) == list(reversed(str_n))", "def solution(A):\n xor = 0\n for item in A:\n xor ^= item\n return xor", "def is_palindrome(string):\r\n r_string = string[::-1]\r\n cnt = 0\r\n while cnt < len(string):\r\n if string[cnt] == r_string[cnt]:\r\n cnt += 1\r\n continue\r\n else:\r\n return False\r\n #cnt += 1\r\n return True", "def sherlockAndAnagrams(s):\n # A Dict of palindromes and their counts.\n palindrome_counts = {}\n\n # Get all substrings of length len(s)/c\n for substring_length in range(len(s) - 1):\n for substring_starting_index in range(len(s) - substring_length):\n substring_end_index = substring_starting_index + substring_length + 1\n substring = s[substring_starting_index:substring_end_index]\n # TODO: Sorting is an inefficient way to \"hash\" by palindrome.\n # A letter count dict would be more efficient (in the initial grouping).\n substring_arr = list(substring)\n substring_arr.sort()\n sorted_substring = \"\".join(substring_arr)\n\n if palindrome_counts.get(sorted_substring):\n palindrome_counts[sorted_substring] += 1\n else:\n palindrome_counts[sorted_substring] = 1\n\n return sum([_two_of_m(val) for val in palindrome_counts.values()])", "def longestPalindrome(self, s: str) -> int:\n # approach #1 -- using hashset\n # approach 2 -- using hashmap\n hashmap = defaultdict(int)\n odd = 0\n out = 0\n for char in s:\n hashmap[char] += 1\n\n for key, val in hashmap.items():\n if val % 2 == 1:\n odd = 1\n out += (val -1)\n else:\n out += val\n return out +odd", "def longest_palindromic_substring(s):\n longest = s[0] if len(s) > 0 else \"\"\n for i in range(len(s)):\n j = len(s)\n while s[i] in s[i+1:j] and j <= len(s):\n j = s[i + 1:j].rfind(s[i]) + i + 2\n print(i, j)\n if is_palindrome(s[i:j]) and len(longest) < len(s[i:j]):\n longest = s[i:j]\n j = len(s) + 1\n else:\n j -= 1\n if len(s) - len(longest) <= i:\n break\n return longest", "def has_palindrome(i, start, length):\r\n s = str(i)[start:start+length]\r\n return s[::-1] == s", "def check_pointing_pair(self):\n\n for index in range(self.board_size):\n squ = self.squares[index]\n nos = self.get_numbers([self.possibles[cell[0]][cell[1]] for cell in squ])\n\n for num in nos:\n s_row, s_col, found = self.same_row_col(num, squ)\n if s_row:\n row = found[0][0]\n for c in range(self.board_size):\n if (row, c) not in squ:\n if num in self.possibles[row][c]:\n self.possibles[row][c].remove(num)\n if s_col:\n col = found[0][1]\n for r in range(self.board_size):\n if (r, col) not in squ:\n if num in self.possibles[r][col]:\n self.possibles[r][col].remove(num)", "def offDiagPairs(self):\n return np.transpose(np.nonzero(np.triu(self.LaplacianMatrix,k=2)))", "def pairwise(a,b):\n return a != b" ]
[ "0.7363634", "0.7076098", "0.70694715", "0.6878793", "0.67712194", "0.67707276", "0.67490494", "0.65257967", "0.6466228", "0.63992494", "0.63456815", "0.633291", "0.6321037", "0.62712246", "0.6258766", "0.6231323", "0.6217285", "0.62059194", "0.6202977", "0.6194959", "0.6181608", "0.61775905", "0.61702603", "0.6162088", "0.61519253", "0.6142727", "0.6088312", "0.607948", "0.6078674", "0.606602", "0.60577965", "0.6045681", "0.60384285", "0.6023728", "0.6006212", "0.5988783", "0.59614277", "0.5956754", "0.5952331", "0.5952144", "0.5937994", "0.59133685", "0.58921516", "0.58869153", "0.5858956", "0.5857083", "0.584714", "0.5830476", "0.58254534", "0.580962", "0.5799738", "0.57892245", "0.5787105", "0.5776523", "0.5766305", "0.57371336", "0.57235557", "0.5720495", "0.57203656", "0.5715349", "0.5690328", "0.5689671", "0.5682353", "0.56775314", "0.56587064", "0.5656122", "0.56427854", "0.5641152", "0.56372654", "0.5636118", "0.5628065", "0.56229794", "0.5619036", "0.5609032", "0.560901", "0.56048614", "0.55975366", "0.5563628", "0.5557867", "0.5556248", "0.5552771", "0.5551921", "0.5540509", "0.55371493", "0.5530871", "0.5526624", "0.5520218", "0.5519088", "0.55179435", "0.5517434", "0.55083096", "0.55045915", "0.54863894", "0.54758126", "0.54743284", "0.5467825", "0.5463784", "0.5459886", "0.54591393", "0.54523885" ]
0.78059846
0
Sets the properties mean, count and sum for an object if a reduceRegion operation returns no values in earth engine one cannot join the result list with a list of the zones without setting a nodata (9999) value first
def ensure_default_properties(obj): obj = ee.Dictionary(obj) default_properties = ee.Dictionary({"mean": -9999,"count": -9999,"sum":-9999}) return default_properties.combine(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def region_stats(ds, mask, region_name):\n agg = ds.where(mask == arctic_mask_region[region_name]).mean(dim=['x','y'])\n if 'latitude' in agg:\n agg = agg.drop('latitude')\n if 'longitude' in agg:\n agg = agg.drop('longitude')\n return agg", "def addOverallMeans(results, fieldNames, fields):\n # Work out what the values we already have look like\n meanValues = [\"Overall Mean\"]\n geoMeanValues = [\"Overall Geometric Mean\"]\n for name in fieldNames[1:]:\n if name in fields:\n values = [r.__dict__[name] for r in results]\n geoMeanValues.append(geomean(values))\n meanValues.append(mean(values))\n else:\n geoMeanValues.append(0)\n meanValues.append(0)\n\n results.append(measurement(fieldNames, meanValues))\n results.append(measurement(fieldNames, geoMeanValues))\n return results", "def zonalStatsToRaster(image,zonesImage,geometry,maxPixels,reducerType):\n # reducertype can be mean, max, sum, first. Count is always included for QA\n # the resolution of the zonesimage is used for scale\n\n reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mean\"),ee.Reducer.mean(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"max\"),ee.Reducer.max(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"sum\"),ee.Reducer.sum(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"first\"),ee.Reducer.first(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mode\"),ee.Reducer.mode(),\"error\"))))\n )\n reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName=\"zones\") \n\n scale = zonesImage.projection().nominalScale().getInfo()\n zonesImage = zonesImage.select(zonesImage.bandNames(),[\"zones\"])\n\n totalImage = ee.Image(image).addBands(zonesImage)\n resultsList = ee.List(totalImage.reduceRegion(\n geometry= geometry, \n reducer= reducer,\n scale= scale,\n maxPixels=maxPixels\n ).get(\"groups\"))\n\n resultsList = resultsList.map(ensure_default_properties); \n zoneList = mapList(resultsList, 'zones');\n countList = mapList(resultsList, 'count');\n valueList = mapList(resultsList, reducerType);\n\n valueImage = zonesImage.remap(zoneList, valueList).select([\"remapped\"],[reducerType])\n countImage = zonesImage.remap(zoneList, countList).select([\"remapped\"],[\"count\"])\n newImage = zonesImage.addBands(countImage).addBands(valueImage)\n return newImage,zoneList,valueList,countList", "def zonal_interchange_total(\n self, start_date_range: str = None, end_date_range: str = None, **_\n ):\n if self.AGG_BY not in [\"zone\", \"zones\", \"Zone\", \"Zones\"]:\n logger.warning(\"This plot only supports aggregation zone\")\n return UnsupportedAggregation()\n\n # List of properties needed by the plot, properties are a set of tuples and\n # contain 3 parts: required True/False, property name and scenarios required,\n # scenarios must be a list.\n properties = [(True, \"line_Flow\", self.Scenarios)]\n\n # Runs get_formatted_data within PlotDataStoreAndProcessor to populate PlotDataStoreAndProcessor dictionary\n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n\n outputs: dict = {}\n\n for zone_input in self.Zones:\n\n logger.info(f\"{self.AGG_BY} = {zone_input}\")\n\n mplt = PlotLibrary()\n fig, ax = mplt.get_figure()\n plt.subplots_adjust(wspace=0.05, hspace=0.2)\n\n net_exports_all = []\n # Holds each scenario output table\n data_out_chunk = []\n\n for n, scenario in enumerate(self.Scenarios):\n\n exp_lines = self.meta.zone_exporting_lines(scenario)\n imp_lines = self.meta.zone_importing_lines(scenario)\n\n if exp_lines.empty or imp_lines.empty:\n return MissingMetaData()\n\n exp_lines.columns = [\"region\", \"line_name\"]\n imp_lines.columns = [\"region\", \"line_name\"]\n\n # Find list of lines that connect each region.\n exp_oz = exp_lines[exp_lines[\"region\"] == zone_input]\n imp_oz = imp_lines[imp_lines[\"region\"] == zone_input]\n\n other_zones = self.meta.zones(scenario).name.tolist()\n other_zones.remove(zone_input)\n\n net_exports = []\n logger.info(f\"Scenario = {str(scenario)}\")\n flow = self[\"line_Flow\"][scenario]\n flow = flow.reset_index()\n\n for other_zone in other_zones:\n exp_other_oz = exp_lines[exp_lines[\"region\"] == other_zone]\n imp_other_oz = imp_lines[imp_lines[\"region\"] == other_zone]\n\n exp_pair = pd.merge(\n exp_oz, imp_other_oz, left_on=\"line_name\", right_on=\"line_name\"\n )\n imp_pair = pd.merge(\n imp_oz, exp_other_oz, left_on=\"line_name\", right_on=\"line_name\"\n )\n\n # Swap columns for importing lines\n imp_pair = imp_pair.reindex(\n columns=[\"region_from\", \"line_name\", \"region_to\"]\n )\n\n export = flow[flow[\"line_name\"].isin(exp_pair[\"line_name\"])]\n imports = flow[flow[\"line_name\"].isin(imp_pair[\"line_name\"])]\n\n export = export.groupby([\"timestamp\"]).sum()\n imports = imports.groupby([\"timestamp\"]).sum()\n\n # Check for situations where there are only exporting or importing lines for this zonal pair.\n if imports.empty:\n net_export = export\n elif export.empty:\n net_export = -imports\n else:\n net_export = export - imports\n net_export.columns = [other_zone]\n\n if pd.notna(start_date_range):\n net_export = set_timestamp_date_range(\n net_export, start_date_range, end_date_range\n )\n if net_export.empty is True:\n logger.warning(\"No data in selected Date Range\")\n continue\n net_exports.append(net_export)\n\n net_exports = pd.concat(net_exports, axis=1)\n net_exports = net_exports.dropna(axis=\"columns\")\n net_exports.index = pd.to_datetime(net_exports.index)\n net_exports[\"Net Export\"] = net_exports.sum(axis=1)\n\n positive = net_exports.agg(lambda x: x[x > 0].sum())\n negative = net_exports.agg(lambda x: x[x < 0].sum())\n\n both = pd.concat([positive, negative], axis=1)\n both.columns = [\"Total Export\", \"Total Import\"]\n\n # unitconversion based off peak export hour, only checked once\n if scenario == self.Scenarios[0]:\n unitconversion = self.capacity_energy_unitconversion(\n both,\n self.Scenarios,\n )\n\n both = both / unitconversion[\"divisor\"]\n net_exports_all.append(both)\n\n # Add scenario column to output table.\n scenario_names = pd.Series([scenario] * len(both), name=\"Scenario\")\n data_table = both.set_index([scenario_names], append=True)\n data_table = data_table.add_suffix(f\" ({unitconversion['units']})\")\n data_out_chunk.append(data_table)\n\n data_table_out = pd.concat(data_out_chunk)\n\n # Make scenario/color dictionary.\n color_dict = dict(zip(self.Scenarios, self.color_list))\n\n mplt.clustered_stacked_barplot(\n net_exports_all, labels=self.Scenarios, color_dict=color_dict\n )\n ax.axhline(y=0, linestyle=\":\", color=\"gray\")\n ax.set_ylabel(\n f\"Interchange ({unitconversion['units']}h)\",\n color=\"black\",\n rotation=\"vertical\",\n )\n if plot_data_settings[\"plot_title_as_region\"]:\n mplt.add_main_title(zone_input)\n\n outputs[zone_input] = {\"fig\": fig, \"data_table\": data_table_out}\n\n return outputs", "def area_average_obs(cube, region, model_units):\n \n # Specify the latitudes and longitudes starting from the smallest number to largest or in latitude and longitude from south to north and east to west\n lon1, lon2, lat1, lat2 = region[0], region[1], region[2], region[3]\n\n print(cube.coord('latitude').var_name)\n print(cube.coord('latitude').units.modulus)\n cube.coord('latitude').units = model_units\n cube.coord('longitude').units = model_units\n print(cube.coord('latitude').units.modulus)\n\n # Then intersect the data at these points\n cube = cube.intersection(longitude=(lon1, lon2),latitude=(lat1, lat2))\n\n # cube.coord('latitude').guess_bounds()\n # cube.coord('longitude').guess_bounds()\n\n # area weighting\n weights = iris.analysis.cartography.area_weights(cube)\n # Average that area by latitude and longitudes by the weighted mean\n cube = cube.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=weights)\n\n return cube", "def calc_region_sum(cube, coord_names, aux_coord_names, grid_type, area_cube, region):\n\n if grid_type == 'curvilinear':\n assert area_cube, \"Must provide an area cube of curvilinear data\"\n\n cube = cube.copy() \n coord_names = coord_names.copy()\n lat_bounds = region_bounds[region]\n\n # Extract region\n if lat_bounds:\n if grid_type == 'curvilinear':\n cube = extract_region_curvilinear(cube, lat_bounds)\n else:\n cube = extract_region_latlon(cube, lat_bounds)\n\n if 'm-2' in str(cube.units):\n # Get area weights \n if area_cube:\n if grid_type == 'latlon' and lat_bounds:\n area_cube = extract_region_latlon(area_cube, lat_bounds)\n area_data = uconv.broadcast_array(area_cube.data, [1, 2], cube.shape)\n else:\n area_data = spatial_weights.area_array(cube)\n\n # Multiply by area\n cube.data = cube.data * area_data\n units = str(cube.units)\n cube.units = units.replace('m-2', '')\n\n assert cube.units == 'J'\n\n coord_names.remove('time')\n spatial_agg = cube.collapsed(coord_names, iris.analysis.SUM)\n \n spatial_agg.remove_coord('latitude')\n spatial_agg.remove_coord('longitude')\n if grid_type == 'curvilinear':\n spatial_agg.remove_coord(coord_names[0])\n spatial_agg.remove_coord(coord_names[1])\n\n return spatial_agg", "def zonalStatsToFeatureCollection(image,zonesImage,geometry,maxPixels,reducerType):\n # reducertype can be mean, max, sum, first. Count is always included for QA\n # the resolution of the zonesimage is used for scale\n\n reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mean\"),ee.Reducer.mean(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"max\"),ee.Reducer.max(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"sum\"),ee.Reducer.sum(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"first\"),ee.Reducer.first(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mode\"),ee.Reducer.mode(),\"error\"))))\n )\n reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName=\"zones\") \n\n scale = zonesImage.projection().nominalScale().getInfo()\n zonesImage = zonesImage.select(zonesImage.bandNames(),[\"zones\"])\n\n totalImage = ee.Image(image).addBands(zonesImage)\n resultsList = ee.List(totalImage.reduceRegion(\n geometry= geometry, \n reducer= reducer,\n scale= scale,\n maxPixels=maxPixels\n ).get(\"groups\"))\n\n resultsList = resultsList.map(ensure_default_properties); \n fc = ee.FeatureCollection(resultsList.map(dict_to_feature))\n\n return fc", "def area_average(cube, region):\n \n # Specify the latitudes and longitudes starting from the smallest number to largest or in latitude and longitude from south to north and east to west\n lon1, lon2, lat1, lat2 = region[0], region[1], region[2], region[3] \n # Then intersect the data at these points\n cube = cube.intersection(longitude=(lon1, lon2),latitude=(lat1, lat2))\n\n #cube.coord('latitude').guess_bounds()\n #cube.coord('longitude').guess_bounds()\n\n # area weighting\n weights = iris.analysis.cartography.area_weights(cube)\n # Average that area by latitude and longitudes by the weighted mean\n cube = cube.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=weights)\n\n return cube", "def test_summarize(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(\n {\n \"count\": {\"value\": None, \"status\": None},\n \"start\": measurement[\"start\"],\n \"end\": measurement[\"end\"],\n },\n measurement.summarize(),\n )", "def get_admins_per_region():\n chart_data = {}\n try:\n pipe = [\n {'$match': {VAX_AREA_KEY: {'$not': {'$eq': 'ITA'}}}},\n {\n '$group': {\n '_id': f'${VAX_AREA_KEY}',\n 'first': {'$sum': f'${VAX_FIRST_DOSE_KEY}'},\n 'second': {'$sum': f'${VAX_SECOND_DOSE_KEY}'},\n 'booster': {'$sum': f'${VAX_BOOSTER_DOSE_KEY}'}\n }\n }\n ]\n cursor = vax_admins_summary_coll.aggregate(pipeline=pipe)\n data = list(cursor)\n df = pd.DataFrame(data)\n df['region'] = df['_id'].apply(lambda x: OD_TO_PC_MAP[x])\n pop_dict = get_region_pop_dict()\n df['population'] = df['region'].apply(lambda x: pop_dict[x])\n df['percentage_2nd'] = df['second'].div(df['population'])\n df['percentage_3rd'] = df['booster'].div(df['population'])\n df.sort_values(by=['population'], ascending=False, inplace=True)\n chart_data = {\n \"title\": gettext('Admins per region'),\n \"categories\": df['region'].values.tolist(),\n \"pop_dict\": pop_dict,\n \"first\": {\n 'name': gettext(\"First Dose\"),\n 'data': df['first'].values.tolist()\n },\n \"second\": {\n 'name': gettext(\"Second Dose\"),\n 'data': df['second'].values.tolist()\n },\n \"booster\": {\n 'name': gettext(\"Booster Dose\"),\n 'data': df['booster'].values.tolist()\n },\n \"population\": {\n 'name': gettext(\"Population\"),\n 'data': df['population'].values.tolist()\n }\n }\n app.logger.debug(f\"region df : \\n{df}\")\n except Exception as e:\n app.logger.error(f\"While getting region chart data: {e}\")\n return chart_data", "def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean", "def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # compute it with lat_bnds, lon_bnds etc.\n if vid==None:\n vid = 'reduced_'+mv.id\n time,lat,lon = tllAxes(mv)\n if hasattr(mv.parent,'variables'):\n fil = mv.parent # mv is a fileVariable and fil is a file.\n lat_bnds = fil[lat.bounds]\n else:\n lataxis = latAxis(mv) # mv is a TransientVariable\n lat_bnds = lataxis._bounds_\n\n mvta = timeave_old( mv )\n\n # In computing the average, we use area weighting.\n # Sometimes the area is available in cell_measures, but for now I'll just use the backup method:\n # The area of a lonlat cell is R^2*delta(lon)*delta(sin(lat)).\n # With equally spaced lon, we don't need delta(lon) for weights.\n # I'll assume that lat,lon are in degrees, which is the only way I've ever seen them.\n wgtsum = 0\n zm = 0\n for i,lati in enumerate(lat):\n # The following test could be sped up a lot, because lat[i] is ordered...\n # >>> to do: partial overlaps\n if latmin<=lati and lati<latmax:\n latlo = lat_bnds[i,0]\n lathi = lat_bnds[i,1]\n wgti = sin(radians(lathi))-sin(radians(latlo))\n zi = 0.0\n for j in range(len(lon)):\n zi += mvta[i,j]\n zi *= wgti\n wgtsum += wgti*len(lon)\n zm += zi\n zm /= wgtsum\n # zm is a scalar, so createVariable gets no axes argument:\n zmv = cdms2.createVariable( zm, id=vid )\n return zmv", "def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)", "def add_region_feature(data):\n\n data.loc[:, 'region'] = data.loc[:, 'district'].apply(\n lambda x: mapping.SOFIA_NEIGHBOURHOOD_TO_REGION_MAPPING[x]\n )\n\n return data", "def setZmAreas(self):\n self.zmareas = []\n for sample in self.samples:\n self.zmareas.append(sample.ZmArea)", "def zonal_statistics(self, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n process_id = 'zonal_statistics'\n args = {\n 'imagery': self.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n return self.graph_add_process(process_id, args)", "def getCounts():\n for area in AREAS:\n print(area['name'])\n lat = area['lat']\n lng = area['lng']\n count = utils.getExtracted(countCrimes, lat, lng)\n print('count: %s' % count)\n if type(count) is list:\n area['count'] = count[0]\n return AREAS", "def _aggregate(group_df, sampling_percentage=5 * 2.5):\n out = {}\n dist = []\n total_count = 0\n for i, col in enumerate(columns):\n\n n = group_df[col].sum()\n total_count += n\n dist.append(dict(min=bins[i][0], max=bins[i][1], n=n))\n\n # only aggregate if we have data!\n if total_count:\n aggval, moe = cda.approximate_median(\n dist, sampling_percentage=sampling_percentage\n )\n else:\n aggval = np.nan\n moe = np.nan\n\n result = {}\n result[\"median\"] = aggval\n result[\"median_moe\"] = moe\n result[\"geometry\"] = group_df.geometry.unary_union\n\n return pd.Series(result)", "def calculate_world_statistics(countries_df, group_col):\n world_df = countries_df.drop(drop_cols, axis=1).groupby(group_col).agg('mean').reset_index()\n world_df['Country/Region'] = 'World'\n world_df['WHO Region'] = 'World'\n world_df['Population'] = population_data['Population'].sum()\n\n return world_df", "def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]", "def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will be fast enough:\n lataxis = latAxis( mv )\n lmin = -1\n lmax = len(lataxis)\n if lataxis[0]>=latmin: lmin = 0\n if lataxis[-1]<=latmax: lmax = len(lataxis)-1\n if lmin==-1 or lmax==len(lataxis):\n for l,ax in enumerate(lataxis):\n if lmin==-1 and ax>=latmin: lmin = max( 0, l )\n if lmax==len(lataxis) and ax>=latmax: lmax = min( l, len(lataxis) )\n lataxis_shrunk = lataxis.subaxis(lmin,lmax)\n mv2shape = list(mv.shape)\n mv2shape[ilat] = lmax-lmin+1\n axes[ilat] = lataxis_shrunk\n mvd1 = numpy.delete( mv, slice(0,lmin), ilat )\n mvdata = numpy.delete( mvd1, slice(lmax-lmin,len(lataxis)-lmin), ilat )\n mv2 = cdms2.createVariable( mvdata, axes=axes )\n\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n avmv = averager( mv2, axis=axes_string )\n avmv.id = vid # Note that the averager function returns a variable with meaningless id.\n ammv.units = mv.units\n\n return avmv", "def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()", "def addproperties_json(source, mortspd):\n with open(source, encoding=\"utf-8\",mode=\"r\") as f: # load boundaries\n boundaries = json.load(f)\n \n\n for regionBoundary in boundaries['features']: # get nb murdered by region\n del regionBoundary['properties']['admin1Pcod']\n del regionBoundary['properties']['admin1RefN']\n \n regionBoundary['properties']['Departement'] = regionBoundary['properties']['admin1Name']\n \n currentRegion = regionBoundary['properties']['Departement']\n if currentRegion in mortspd:\n regionBoundary['properties']['Morts'] = mortspd[currentRegion]\n \n else: \n regionBoundary['properties']['Morts'] = 0 \n continue\n return boundaries", "def zonal_statistics(self, imagery, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n\n graph = {\n 'process_id': 'zonal_statistics',\n 'imagery': imagery.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n imagery.graph = graph\n\n return imagery", "def residual_measure(measure, obj,Qfilter=None, **filters):\n if not Qfilter is None:\n objs_found=obj.objects.filter(Qfilter,**filters)\n else:\n objs_found=obj.objects.filter(**filters)\n if objs_found.count()<=0:\n return np.nan\n stus_found=[]\n residuals=pd.Series()\n for o in objs_found:\n stu=o.upn\n temp_filters=dict(filters)\n for t in filters.keys():\n if \"classgroup\" in t or \"subject\" in t:\n temp_filters.pop(t)\n stu_set=obj.objects.filter(upn=stu, **temp_filters)\n if not Qfilter is None:\n stu_set=found_objs.filter(Qfilter)\n stu_avg=stu_set.aggregate(models.Avg(measure))[measure+'__avg']\n try:\n if stu not in stus_found:\n stus_found.append(stu)\n stu_objs=objs_found.filter(upn=stu)\n if stu_objs.count()>1:\n stu_val=stu_objs.aggregate(models.Avg(measure))[measure+'__avg']\n objs_found=objs_found.exclude(upn=stu)\n elif \"__\" in measure:\n obj_fks=measure.split(\"__\")\n stu_val=o\n for fk in obj_fks:\n stu_val=getattr(stu_val,fk)\n else:\n stu_val=getattr(o,measure)\n stu_val=float(stu_val)\n residual=stu_val-stu_avg\n except:\n print(\"Error calculating \" + str(stu_val) + \" - \" + str(stu_avg))\n residual=np.nan\n residuals[stu.upn]=residual\n residual_avg=residuals.mean()\n if residual_avg is None:\n return np.nan\n else:\n return round(residual_avg,2)", "def _aggregate(group_df):\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)", "def rendered_regions(self, obj):\n return obj.render_json(self.context.get('request'))", "def __init__(self):\n super().__init__()\n self.metric = 'AREA'", "def summarize(self, locuslen):\n # First, calculate the mean of the parameter estimates from each\n # of the replicates\n hot_means = []\n for r_t in zip(*self.hot_params):\n v = [x for x in r_t if not math.isnan(x)]\n hot_means.append(sum(v)/len(v))\n cold_means = []\n for r_t in zip(*self.cold_params):\n v = [x for x in r_t if not math.isnan(x)]\n cold_means.append(sum(v)/len(v))\n bfgs_means = []\n for r_t in zip(*self.opt_params):\n v = [x for x in r_t if not math.isnan(x)]\n bfgs_means.append(sum(v)/len(v))\n theta_mean = sum(self.theta) / len(self.theta)\n # Then, convert the parameters into meaningful values\n # the theta estimate is 4*Na*u*L\n anc_ne = theta_mean / (4 * 3e-9 * locuslen)\n # Then, the parameters are scaled by that. Population sizes are scaled\n # by theta (4Na), and times and migration rates are given in units of\n # 2N.\n scaled_params = []\n for name, val in zip(self.params['Names'], bfgs_means):\n if name.startswith('N'):\n scaled_params.append(val * anc_ne)\n elif name.startswith('m'):\n scaled_params.append(val /(anc_ne * 2))\n elif name.startswith('T'):\n scaled_params.append(val * anc_ne * 2)\n else:\n scaled_params.append(val)\n # Write these values into the class data\n self.hot_mean = hot_means\n self.cold_mean = cold_means\n self.bfgs_mean = bfgs_means\n self.theta_mean = theta_mean\n self.Na = anc_ne\n self.scaled_params = scaled_params\n return", "def _get_res_mean(resource, res_gid, output_request):\n\n out_req_nomeans = copy.deepcopy(output_request)\n res_mean = None\n idx = resource.sites.index(res_gid)\n irrad_means = ('dni_mean', 'dhi_mean', 'ghi_mean',\n 'clearsky_dni_mean', 'clearsky_dhi_mean',\n 'clearsky_ghi_mean')\n\n if 'ws_mean' in out_req_nomeans:\n out_req_nomeans.remove('ws_mean')\n res_mean = {}\n res_mean['ws_mean'] = resource['mean_windspeed', idx]\n\n else:\n for var in resource.var_list:\n label_1 = '{}_mean'.format(var)\n label_2 = 'mean_{}'.format(var)\n if label_1 in out_req_nomeans:\n out_req_nomeans.remove(label_1)\n if res_mean is None:\n res_mean = {}\n res_mean[label_1] = resource[label_2, idx]\n\n if label_1 in irrad_means:\n # convert to kWh/m2/day\n res_mean[label_1] /= 1000\n res_mean[label_1] *= 24\n\n return res_mean, out_req_nomeans", "def _reduce_objective_values(self, trajectory, objective_values):\n if self.params.obj_type == 'mean':\n res = tf.reduce_mean(objective_values, axis=1)\n elif self.params.obj_type == 'valid_mean':\n valid_mask_nk = trajectory.valid_mask_nk\n obj_sum = tf.reduce_sum(objective_values * valid_mask_nk, axis=1)\n res = obj_sum / trajectory.valid_horizons_n1[:, 0]\n else:\n assert (False)\n return res", "def get_artif_area(self):\n result = self.cities.all().aggregate(total=Sum(\"surface_artif\"))\n return result[\"total\"] or 0", "def setZmAreas(self):\n self.zmareas = []\n for site in self.sites:\n self.zmareas.append(site.siteZmArea)", "def __init__(self):\n self.regions = []", "def calculate_continent_statistics(countries_df, group_col):\n continents_df = countries_df.drop(drop_cols, axis=1).groupby([group_col, 'WHO Region']).agg('mean').reset_index()\n continents_df['Country/Region'] = continents_df['WHO Region']\n continents_df['Population'] = population_data['Population'].sum()\n\n return continents_df", "def cube_ensemble_mean(self, cube):\n success = False\n try:\n if len(cube.coord(self.realization).points) > 1 or \\\n cube.coord(self.realization) in cube.coords(dim_coords=True):\n cube = cube.collapsed(self.realization, iris.analysis.MEAN)\n success = True\n except iris.exceptions.CoordinateNotFoundError:\n pass\n try:\n if len(cube.coord(self.forecast_ref_time).points) > 1 or \\\n cube.coord(self.forecast_ref_time) in \\\n cube.coords(dim_coords=True):\n cube = cube.collapsed(self.forecast_ref_time, \n iris.analysis.MEAN)\n success = True\n except iris.exceptions.CoordinateNotFoundError:\n pass\n if not success:\n raise iris.exceptions.CoordinateNotFoundError(\n 'No ensemble coordinates found.')\n return cube", "def summary(\n self,\n fex_object,\n mean=False,\n max=False,\n min=False,\n ignore_sessions=False,\n *args,\n **kwargs,\n ):\n self.extracted_features.append(\n fex_object.extract_summary(mean, max, min, ignore_sessions, *args, **kwargs)\n )", "def calc_regional_values(infiles, variable, time_constraint, area_cube):\n\n cube, coord_names, aux_coord_names, grid_type = read_data(infiles, variable, time_constraint)\n\n cube_list = iris.cube.CubeList([])\n for region in ['globe', 'nh', 'sh', 'nhext', 'tropics', 'shext']:\n region_sum = calc_region_sum(cube, coord_names, aux_coord_names, grid_type, area_cube, region)\n region_sum = rename_cube(region_sum, region + ' sum')\n cube_list.append(region_sum)\n\n return cube_list", "def preprocess(df, combine_list, single_provinces=[\"Hubei\"]):\n \n \"\"\" Mark single regions that are to remain separate\"\"\"\n for single_province in single_provinces:\n df.loc[df[\"Province/State\"]==single_province, \"Country/Region\"] = single_province\n \n \"\"\" Combine rows for other country provinces\"\"\"\n next_index = max(df.index)\n for singlename in combine_list:\n \n \"\"\" Select country\"\"\"\n singlecountry = df.loc[df[\"Country/Region\"]==singlename,:]\n \n \"\"\" Compute sum of provinces\"\"\"\n singlesum = singlecountry.sum(axis=0)\n \n \"\"\" Set other column variables\"\"\"\n singlesum[\"label\"] = singlename\n singlesum[\"Province/State\"] = np.nan\n singlesum[\"Country/Region\"] = singlename\n \n \"\"\" Drop provinces from DataFrame\"\"\"\n df = df.loc[df[\"Country/Region\"]!=singlename,:]\n \n \"\"\"Merge country sum into DataFrame\"\"\"\n singlesum.name = next_index\n next_index += 1\n df = df.append(singlesum)\n\n \"\"\" Rename rest of Mainland China\"\"\"\n df.loc[df[\"Country/Region\"]==\"Mainland China\", \"Country/Region\"] = \"Mainland China w/o Hubei\"\n df.loc[df[\"Country/Region\"]==\"China\", \"Country/Region\"] = \"China w/o Hubei\"\n \n \"\"\" Reset index to region name\"\"\"\n df[\"label\"] = df[\"Country/Region\"]\n df.loc[pd.notna(df[\"Province/State\"]),\"label\"] = df.loc[pd.notna(df[\"Province/State\"]),:][\"Province/State\"]\n df.index = df[\"label\"]\n \n df = df.sort_index()\n \"\"\" Drop unused columns\"\"\"\n df = df.drop(['Province/State', 'Country/Region', 'Lat', 'Long', \"label\"], axis = 1) \n \n \"\"\" Return\"\"\"\n return df", "def test_aws_service_api_regions_get(self):\n pass", "def region(self, box: list):\n is_box(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n self._AccessPoint_data = {'box': box} # Register the requested access point data\n\n if self._mode == \"standard\" and self._dataset_id != \"ref\":\n def postprocessing(xds):\n xds = self.fetcher.filter_data_mode(xds)\n xds = self.fetcher.filter_qc(xds)\n xds = self.fetcher.filter_variables(xds, self._mode)\n return xds\n self.postproccessor = postprocessing\n\n return self", "def update_only_total_statistics(self):\n\n self.average_radius = 0\n self.total_root_length = 0\n\n total_radius = 0\n\n for root in self.root_dict.values():\n\n self.total_root_length += root.total_length\n\n total_radius += root.total_length * root.average_radius\n\n self.average_radius = total_radius / self.total_root_length", "def avg_measure(measure,obj,Qfilter=None,**filters):\n\tif not Qfilter is None:\n\t\tfound_objs=obj.objects.filter(Qfilter,**filters)\n\telse:\n\t\tfound_objs=obj.objects.filter(**filters)\n\tavg=found_objs.aggregate(models.Avg(measure))[measure+'__avg']\n\tif avg is None:\n\t\treturn np.nan\n\telse:\n\t\treturn round(avg,3)", "def set_region(sender, instance, *args, **kwargs):\n if instance.geocity and not instance.georegion:\n instance.georegion = instance.geocity.region", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def aggregate_results(self, results):\n result = dict()\n result['MAE'] = self.average_dict_items(results, 'MAE')\n result['MdAE'] = self.average_dict_items(results, 'MdAE')\n result['RMSE'] = self.average_dict_items(results, 'RMSE')\n result['SMAPE'] = self.average_dict_items(results, 'SMAPE')\n result['num_values'] = self.average_dict_items(results, 'num_values')\n return result", "def compute_statistics(self, region):\n x = 0.0\n y = 0.0\n n = 1\n for pixel in region:\n n = n + 1\n x = x + pixel[0]\n y = y + pixel[1]\n\n x = x / n\n y = y / n\n k = 1\n print(\"Region: \" + str(k) + \", Centroid: (\" + str(x) + \",\" + str(y) + \"), Area: \" + str(n))\n\n # Please print your region statistics to stdout\n # <region number>: <location or center>, <area>\n # print(stats)\n\n return n", "def finalize_aggregated_data(aggregated_data):\n\n if aggregated_data['sum'] < aggregated_data['min']:\n aggregated_data['min'] = aggregated_data['sum']\n\n if aggregated_data['sum'] > aggregated_data['max']:\n aggregated_data['max'] = aggregated_data['sum']\n\n return aggregated_data", "def reaggregate(self, region_masks=None):\n if region_masks is None:\n region_masks = self.region_masks\n\n da = self.outputs.where(region_masks, 0).sum(dim=('lat', 'lon'))\n df = da.to_dataframe().drop(columns=['spatial_ref'])\n\n return df", "def get_vaccine_stats(self):\n final_response = {}\n dataframe = self.query_api()\n filtered_dataframe = self.filter_state(dataframe, self.state)\n final_response[self.state] = self.get_stats(filtered_dataframe, self.state)\n filtered_dataframe = self.filter_state(dataframe, \"India\")\n final_response[\"India\"] = self.get_stats(filtered_dataframe, \"India\")\n return json.dumps(final_response, indent=2)", "def mean(self) -> Dict:\n raise NotImplementedError", "def test_avalanche_warning_by_region_obs(self):\n pass", "def add_Longhurst_Province_raster_to_array(ds):\n import geopandas\n from rasterio import features\n from affine import Affine\n # Get the shape files\n provinces = geopandas.read_file('/work/home/ts551/data/longhurst_v4_2010')\n shapes = [(shape, n) for n, shape in enumerate(provinces.geometry)]\n # Now add the existing array\n ds_tmp = ds[list(ds.data_vars)[0]].copy().mean(dim='time')\n # Add raster the provinces onto this\n ds_tmp['LonghurstProvince'] = rasterize(shapes, ds_tmp.coords)\n # Then update the variable\n ds['LonghurstProvince'] = ds_tmp['LonghurstProvince']\n # Add Some attributes\n attrs = {\n 'Long name': 'Longhurst Provinces',\n 'data downloaded from': 'http://www.marineregions.org/downloads.php#longhurst',\n 'version': 'Version 4 - March 2010',\n 'Citations': \"Longhurst, A.R et al. (1995). An estimate of global primary production in the ocean from satellite radiometer data. J. Plankton Res. 17, 1245-1271 ; Longhurst, A.R. (1995). Seasonal cycles of pelagic production and consumption. Prog. Oceanogr. 36, 77-167 ; Longhurst, A.R. (1998). Ecological Geography of the Sea. Academic Press, San Diego. 397p. (IMIS) ; Longhurst, A.R. (2006). Ecological Geography of the Sea. 2nd Edition. Academic Press, San Diego, 560p.\",\n }\n ds['LonghurstProvince'].attrs = attrs\n return ds", "def extract_object_properties(segmented_image_path, intensity_image_path, image_name, xy_scale, z_scale):\n\n print('Extracting object properties for {image_name}'.format(image_name=image_name))\n\n # import packages needed for object extraction\n from skimage.io import imread\n from scipy.ndimage import label as ndi_label\n from skimage import measure\n\n # read in images\n segmented_image = imread(segmented_image_path)\n intensity_image = imread(intensity_image_path)\n\n # label connected components\n labeled, num_features = ndi_label(segmented_image)\n\n # measure properties\n region_properties = measure.regionprops(labeled, intensity_image = intensity_image)\n\n object_data_list = []\n\n for prop in region_properties:\n\n # apply the z scale and xy scales to the centroid and coordinates lists\n centroid = list(prop.centroid)\n centroid_scaled = [centroid[0] * z_scale, centroid[1]*xy_scale, centroid[2] * xy_scale]\n\n coords = prop.coords.tolist()\n coords_scaled = [[coord[0]*z_scale, coord[1]* xy_scale, coord[2]*xy_scale] for coord in coords ]\n\n # create a dict containing object properties\n object_properties_dict = {\n 'area': int(prop.area),\n 'min_intensity' : int(prop.min_intensity),\n 'max_intensity' : int(prop.max_intensity),\n 'mean_intensity' : int(prop.mean_intensity),\n 'total_intensity': int(prop.intensity_image.sum()),\n 'object_id' : int(prop.label),\n 'name': image_name,\n 'centroid': centroid_scaled,\n 'coordinates': coords_scaled,\n 'intensity_image': prop.intensity_image.tolist()}\n\n object_data_list.append(object_properties_dict)\n\n return object_data_list", "def set_borders_of_operation_area(self):\n for edge_document in self.edge_documents:\n starting_node = edge_document.get('starting_node')\n starting_node_point_document = starting_node.get('point')\n starting_node_longitude = starting_node_point_document.get('longitude')\n starting_node_latitude = starting_node_point_document.get('latitude')\n\n if starting_node_longitude < self.minimum_longitude:\n self.minimum_longitude = starting_node_longitude\n\n if starting_node_longitude > self.maximum_longitude:\n self.maximum_longitude = starting_node_longitude\n\n if starting_node_latitude < self.minimum_latitude:\n self.minimum_latitude = starting_node_latitude\n\n if starting_node_latitude > self.maximum_latitude:\n self.maximum_latitude = starting_node_latitude\n\n ending_node = edge_document.get('ending_node')\n ending_node_point_document = ending_node.get('point')\n ending_node_longitude = ending_node_point_document.get('longitude')\n ending_node_latitude = ending_node_point_document.get('latitude')\n\n if ending_node_longitude < self.minimum_longitude:\n self.minimum_longitude = ending_node_longitude\n\n if ending_node_longitude > self.maximum_longitude:\n self.maximum_longitude = ending_node_longitude\n\n if ending_node_latitude < self.minimum_latitude:\n self.minimum_latitude = ending_node_latitude\n\n if ending_node_latitude > self.maximum_latitude:\n self.maximum_latitude = ending_node_latitude", "def _agg_by_mean(self):\n return self._data_grouped_by_manufacturer.agg('mean')[['car_value']]", "def get_geo_data(self):\n # Get all countries and create a dictionary by name\n countries_shp = shpreader.natural_earth(\n resolution='10m',\n category='cultural',\n name='admin_0_countries',\n )\n self.countries = list(shpreader.Reader(countries_shp).records())\n self.countries_by_name = {}\n self.countries_by_iso_a2 = {}\n for country in shpreader.Reader(countries_shp).records():\n self.countries_by_name[country.attributes['NAME_LONG']] = country\n self.countries_by_iso_a2[country.attributes['ISO_A2']] = country\n\n # Get all states and create a dictionary by name\n states_provinces_shp = shpreader.natural_earth(\n resolution='50m',\n category='cultural',\n name='admin_1_states_provinces',\n )\n# full_list = list(shpreader.Reader(states_provinces_shp).records())\n# self.states = [x for x in full_list if x.attributes['type_en'] == 'State']\n self.states = list(shpreader.Reader(states_provinces_shp).records())\n self.states_by_name = {}\n for state in self.states:\n self.states_by_name[state.attributes['name']] = state\n\n # Get all timezones and create a dictionary by name\n timezones_shp = shpreader.natural_earth(\n resolution='10m',\n category='cultural',\n name='time_zones',\n )\n self.timezones = list(shpreader.Reader(timezones_shp).records())\n self.timezones_by_name = {}\n for timezone in shpreader.Reader(timezones_shp).records():\n # Try to get the actual name. Something like `Europe/Berlin`\n timezone_name = timezone.attributes['tz_name1st']\n # If there is no name, we default to the utc offset name `-5` `+4.5`\n if timezone_name == '':\n timezone_name = timezone.attributes['name']\n\n if timezone_name not in self.timezones_by_name.keys():\n self.timezones_by_name[timezone_name] = timezone", "def AddRegions(self, **kwargs):\n # Addregions use pixel coordinates. listRegions and SaveRegions use RA and Dec.\n n_objs = 0\n objs = []\n # default shape is circle\n if not 'shape' in kwargs:\n kwargs['shape'] = ['circle']\n for k in kwargs.keys():\n n_objs = max(n_objs, len(kwargs[k]))\n for j in range(n_objs):\n temp = {}\n for k in kwargs.keys():\n try:\n temp[k] = kwargs[k][j]\n except IndexError:\n if k == 'shape': \n temp[k] = 'circle'\n objs.append(temp)\n self.all_objs = json.dumps(objs)\n command = \"JS9.AddRegions({objs}, {{display:'{wid}{suffix}'}})\".format(objs=self.all_objs, wid=self.wid, suffix=self.suffix)\n get_ipython().run_cell_magic('javascript', '', command)", "def reindexed_dataframe(self):\n tmp = self.metric.copy()\n tmp.index = tmp.index.map(self.match_regions())\n #give the same index order as the geojson\n out = tmp.reindex(index = self.regions_names())\n return out\n #index_name = out.index.name\n\n #return out.reset_index().dropna().set_index(index_name)[self.metric.name]", "def polygonal_mean_timeseries(self, polygon: Union[Polygon, MultiPolygon]) -> 'ImageCollection':\n\n geojson = mapping(polygon)\n geojson['crs'] = {\n 'type': 'name',\n 'properties': {\n 'name': 'EPSG:4326'\n }\n }\n\n process_id = 'zonal_statistics'\n\n args = {\n 'imagery': self.graph,\n 'regions': geojson,\n 'func': 'avg'\n }\n\n return self.graph_add_process(process_id, args)", "def polygonal_mean_timeseries(self, polygon: Union[Polygon, MultiPolygon]) -> 'ImageCollection':\n\n geojson = mapping(polygon)\n geojson['crs'] = {\n 'type': 'name',\n 'properties': {\n 'name': 'EPSG:4326'\n }\n }\n\n process_id = 'zonal_statistics'\n\n args = {\n 'imagery': self.graph,\n 'regions': geojson,\n 'func': 'avg'\n }\n\n return self.graph_add_process(process_id, args)", "def get_means_map(self, ds_name, year, region=None,\n region_col='state'):\n lat_lons = self.lat_lon\n gids = slice(None)\n if region is not None:\n gids = self.region_gids(region, region_col=region_col)\n lat_lons = lat_lons[gids]\n\n means_map = self[ds_name, year, gids].mean(axis=0)\n means_map = pd.DataFrame({'longitude': lat_lons[:, 1],\n 'latitude': lat_lons[:, 0],\n ds_name: means_map})\n\n return means_map", "def region_region_checkerboard(self, **_):\n outputs: dict = {}\n\n if self.AGG_BY == \"zone\":\n agg = \"zone\"\n else:\n agg = \"region\"\n\n # List of properties needed by the plot, properties are a set of tuples and\n # contain 3 parts: required True/False, property name and scenarios required,\n # scenarios must be a list.\n properties = [(True, f\"{agg}_{agg}s_Net_Interchange\", self.Scenarios)]\n\n # Runs get_formatted_data within PlotDataStoreAndProcessor to populate PlotDataStoreAndProcessor dictionary\n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n\n ncols, nrows = set_x_y_dimension(len(self.Scenarios))\n grid_size = ncols * nrows\n excess_axs = grid_size - len(self.Scenarios)\n\n mplt = PlotLibrary(nrows, ncols, squeeze=False, ravel_axs=True)\n fig, axs = mplt.get_figure()\n plt.subplots_adjust(wspace=0.02, hspace=0.4)\n max_flow_group = []\n Data_Out = []\n n = 0\n for scenario in self.Scenarios:\n rr_int = self[f\"{agg}_{agg}s_Net_Interchange\"].get(scenario)\n if shift_leapday:\n rr_int = adjust_for_leapday(rr_int)\n\n if self.AGG_BY != \"region\" and self.AGG_BY != \"zone\":\n agg_region_mapping = (\n self.region_mapping[[\"region\", self.AGG_BY]]\n .set_index(\"region\")\n .to_dict()[self.AGG_BY]\n )\n # Checks if keys all aggregate to a single value, this plot requires multiple values to work\n if len(set(agg_region_mapping.values())) == 1:\n return UnsupportedAggregation()\n rr_int = rr_int.reset_index()\n rr_int[\"parent\"] = rr_int[\"parent\"].map(agg_region_mapping)\n rr_int[\"child\"] = rr_int[\"child\"].map(agg_region_mapping)\n rr_int_agg = rr_int.groupby([\"parent\", \"child\"], as_index=True).sum()\n rr_int_agg.rename(columns={\"values\": \"flow (MW)\"}, inplace=True)\n rr_int_agg = rr_int_agg.loc[\n rr_int_agg[\"flow (MW)\"] > 0.01\n ] # Keep only positive flows\n rr_int_agg.sort_values(ascending=False, by=\"flow (MW)\")\n rr_int_agg = rr_int_agg / 1000 # MWh -> GWh\n\n data_out = rr_int_agg.copy()\n data_out.rename(\n columns={\"flow (MW)\": \"{} flow (GWh)\".format(scenario)}, inplace=True\n )\n\n max_flow = max(rr_int_agg[\"flow (MW)\"])\n rr_int_agg = rr_int_agg.unstack(\"child\")\n rr_int_agg = rr_int_agg.droplevel(level=0, axis=1)\n\n current_cmap = plt.cm.get_cmap()\n current_cmap.set_bad(color=\"grey\")\n\n axs[n].imshow(rr_int_agg)\n axs[n].set_xticks(np.arange(rr_int_agg.shape[1]))\n axs[n].set_yticks(np.arange(rr_int_agg.shape[0]))\n axs[n].set_xticklabels(rr_int_agg.columns)\n axs[n].set_yticklabels(rr_int_agg.index)\n axs[n].set_title(scenario.replace(\"_\", \" \"), fontweight=\"bold\")\n\n # Rotate the tick labels and set their alignment.\n plt.setp(\n axs[n].get_xticklabels(),\n rotation=90,\n ha=\"right\",\n rotation_mode=\"anchor\",\n )\n\n # Delineate the boxes and make room at top and bottom\n axs[n].set_xticks(np.arange(rr_int_agg.shape[1] + 1) - 0.5, minor=True)\n axs[n].set_yticks(np.arange(rr_int_agg.shape[0] + 1) - 0.5, minor=True)\n axs[n].grid(which=\"minor\", color=\"k\", linestyle=\"-\", linewidth=1)\n axs[n].tick_params(which=\"minor\", bottom=False, left=False)\n\n max_flow_group.append(max_flow)\n Data_Out.append(data_out)\n n += 1\n\n # Remove extra axes\n mplt.remove_excess_axs(excess_axs, grid_size)\n\n cmap = cm.inferno\n norm = mcolors.Normalize(vmin=0, vmax=max(max_flow_group))\n cax = plt.axes([0.90, 0.1, 0.035, 0.8])\n fig.colorbar(\n cm.ScalarMappable(norm=norm, cmap=cmap),\n cax=cax,\n label=\"Total Net Interchange [GWh]\",\n )\n plt.xlabel(\"To Region\", color=\"black\", rotation=\"horizontal\", labelpad=40)\n plt.ylabel(\"From Region\", color=\"black\", rotation=\"vertical\", labelpad=40)\n\n data_table_out = pd.concat(Data_Out, axis=1)\n save_figures = self.figure_folder.joinpath(f\"{self.AGG_BY}_transmission\")\n fig.savefig(\n save_figures.joinpath(\"region_region_checkerboard.svg\"),\n dpi=600,\n bbox_inches=\"tight\",\n )\n data_table_out.to_csv(save_figures.joinpath(\"region_region_checkerboard.csv\"))\n\n outputs = DataSavedInModule()\n return outputs", "def all_average(structure, name=None):\n num_replicas = get_num_replicas()\n\n if num_replicas <= 1:\n return structure\n\n if (tf.distribute.has_strategy() and tf.distribute.get_replica_context()\n and not get_tf_replicator()):\n return tf.distribute.get_replica_context().all_reduce(\n tf.distribute.ReduceOp.MEAN, structure)\n\n return nest.map_structure(lambda x: x / num_replicas, all_sum(structure,\n name=name))", "def test_roi_averaging(self):\n filename = get_test_data_path() + 'sgacc_mask.nii.gz'\n regions = self.dataset.masker.mask(filename, in_global_mask=True)\n avg_vox = reduce.average_within_regions(self.dataset, regions)\n n_studies = self.dataset.image_table.data.shape[1]\n self.assertEqual(n_studies, avg_vox.shape[1])\n self.assertGreater(avg_vox.sum(), 0.05)", "def pick_area(data ,total_process, interval ,list_of_vars, list_of_areas, init_time=0, pr_height=None, ):\n \n \n \n #trying if the longitude values change from 0 to 360 or -180 to 180?\n \n if data['lon'].values[0] < 0:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [-142,-42,0,60],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [-100,-75,18,31],\n 'carribeans' : [-85,-60,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [-180, 180 ,0,90]}\n \n # -180 to 180 change the values given in the dictionary to relevant\n else:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [218,318,-10,70],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [260,285,14,37],\n 'carribeans' : [275,300,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [0, 360 ,0,90]}\n \n \n \n places_dict = {}\n #looping in the list of areas\n say_pl = 1\n for pl in list_of_areas:\n variables_l = {}\n #looping in the list of variables\n say_var =1\n for var in list_of_vars:\n #check if data contains 'lev' coords.\n try:\n \n #wrap the data\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]), \n lev=pr_height).isel(time=slice(init_time, total_process, interval))\n \n #if no 'lev' coords exist.\n except:\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]),).isel(time=slice(init_time, total_process, interval))\n \n #append a single variable given by the user\n variables_l[var] = single\n \n \n #append all the variables with respect to their area of interest.\n places_dict[pl] = variables_l\n \n #return\n return places_dict", "def mask_all_but(region='All', M_all=False, saizlopez=False,\n res='4x5', trop_limit=True, mask2D=False, mask3D=False,\n mask4D=False,\n use_multiply_method=True, lat=None, lon=None,\n verbose=False, debug=False):\n logging.info('mask_all_but called for region/MaskName {}'.format(region))\n # --- Setup MaskNumbers...\n #\n# MaskName = region # Switch to using instead of region?\n # Get Dictionary of mask details\n MaskDict = GetMaskExtents(None, ReturnDataFrame=True)\n\n # Get List of Rectangluar masks\n RectangluarMasks = MaskDict.loc[ MaskDict['Rectangle?']==True, : ]\n\n # ( except None, unmask_all and global to retrive no mask )\n# MaskDict = {\n# 'Tropics': 0,\n# 'tropics': 0,\n# 'mid_lats': 1,\n# 'Mid Lats': 1,\n# 'Mid lats': 1,\n# 'south_pole': 2,\n# 'south pole': 2,\n# 'north_pole': 3,\n# 'north pole': 3,\n# None: 4,\n# 'unmask_all': 4,\n# 'All': 4,\n# 'global': 4,\n# 'Global': 4,\n# 'Oceanic': 6,\n# 'Ocean': 6,\n# 'Ocean Tropics': 13,\n# 'Oceanic Tropics': 13,\n# 'Ocn. Trop.': 13,\n# # NEED TESTING ...\n# 'Extratropics': 5,\n# 'Ex. Tropics': 5,\n# 'NH': 7,\n# 'SH': 8,\n# 'Ice': 10,\n# 'Land': 11,\n# 'lat40_2_40': 12,\n# 'Land Tropics': 14,\n# 'All Sur.': 15,\n# 'surface': 15,\n# 'Ocean Sur.': 16,\n# 'Land Sur.': 17,\n# 'Ice Sur.': 18,\n# 'lat50_2_50': 19,\n# '50S-50N': 19,\n# # 'Oceanic lat50_2_50': 20,\n# 'Ocn. 50S-50N': 20,\n# # 'South >60': 2,\n# # 'North >60': 3\n# 'North Sea': 21,\n# 'Med. Sea': 22,\n# 'Mediterranean Sea': 22,\n# 'Black Sea': 23,\n# 'Irish Sea': 24,\n# 'Europe': 25,\n# 'EU': 25,\n# # 'Surface BL': 26,\n# 'Land Tropics Sur.': 27,\n# 'Boreal Land': 28,\n# 'Alps': 29,\n# 'loc': 30,\n# 'location': 30,\n# 'France': 31,\n# 'CONUS': 32,\n# 'Cape_Verde_Flying': 33,\n# 'local_CVAO_area': 34,\n# }\n MaskNumber = MaskDict.loc[ MaskDict['MaskName']==region,'ID'].values[0]\n\n # TODO: overhaul the above \"MaskNumber\" approach to be more pythonic.\n # for now just ensure that a 'MaskNumber' ID is used.\n # the eventual approach will not need different capitalisation etc.\n\n # --- This is a simple way of using masks ( as multiplers )\n # i.e. all (future) functions should have use_multiply_method=False\n # and not use the code below\n if use_multiply_method: # Kludge\n print(('!'*50, 'WARNING: using mulitply method for masking. '))\n # For MaskNumber, pull mask from MaskNumber list\n if MaskNumber == 0:\n mask = tropics_unmasked(res=res, saizlopez=saizlopez)\n elif MaskNumber == 1:\n mask = mid_lats_unmasked(res=res)\n elif MaskNumber == 2:\n mask = southpole_unmasked(res=res)\n elif MaskNumber == 3:\n mask = northpole_unmasked(res=res)\n elif MaskNumber == 4:\n # mask = np.logical_not( all_unmasked( res=res ) )\n mask = all_unmasked(res=res)\n elif MaskNumber == 5:\n mask = extratropics_unmasked(res=res)\n elif MaskNumber == 6:\n mask = ocean_unmasked(res=res)\n elif MaskNumber == 7:\n mask = NH_unmasked(res=res)\n elif MaskNumber == 8:\n mask = SH_unmasked(res=res)\n elif MaskNumber == 10:\n mask = ice_unmasked(res=res)\n elif MaskNumber == 11:\n mask = land_unmasked(res=res)\n elif MaskNumber == 12:\n mask = mask_lat40_2_40(res=res)\n elif MaskNumber == 13: # 'Oceanic Tropics'\n mask = np.ma.mask_or(ocean_unmasked(res=res),\n tropics_unmasked(res=res,\n saizlopez=saizlopez))\n elif MaskNumber == 14: # 'Land Tropics'\n mask = np.ma.mask_or(land_unmasked(res=res),\n tropics_unmasked(res=res,\n saizlopez=saizlopez))\n elif MaskNumber == 15: # 'All Sur.'\n mask = surface_unmasked(res=res)\n elif MaskNumber == 16: # 'Ocean Sur.'\n mask = np.ma.mask_or(surface_unmasked(res=res),\n ocean_unmasked(res=res))\n elif MaskNumber == 17: # 'Land Sur.':\n mask = np.ma.mask_or(surface_unmasked(res=res),\n land_unmasked(res=res))\n elif MaskNumber == 18: # 'Ice Sur.'\n mask = np.ma.mask_or(surface_unmasked(res=res),\n ice_unmasked(res=res))\n elif MaskNumber == 19: # '50S-50N'\n mask = lat2lat_2D_unmasked(lowerlat=-50, higherlat=50,\n res=res)\n elif MaskNumber == 20: # 'Ocn. 50S-50N'\n mask = np.ma.mask_or(lat2lat_2D_unmasked(lowerlat=-50,\n higherlat=50, res=res),\n ocean_unmasked(res=res)[..., 0])\n elif MaskNumber == 21:\n mask = get_north_sea_unmasked(res=res)\n elif MaskNumber == 25:\n mask = get_EU_unmasked(res=res)\n# if MaskNumber == 26:\n# mask = get_2D_BL_unmasked( res=res )\n elif MaskNumber == 27: # 'Land Tropics Sur.':\n tmp = np.ma.mask_or(surface_unmasked(res=res),\n land_unmasked(res=res))\n mask = np.ma.mask_or(tmp, tropics_unmasked(res=res))\n else:\n PrtStr = 'WARNING - Mask not setup for MaskNumber={}'\n print( PrtStr.format(MaskNumber) )\n sys.exit()\n # Invert mask to leave exception unmasked if used to multiply\n mask = np.logical_not(mask)\n\n # --- This is a more pythonic way of using masks (Use as preference)\n else:\n # Use a bulk approach for Rectangular masks\n if region in RectangluarMasks['MaskName'].values:\n # Retrieve mask extents from dictionary, then construct mask\n Mask4Region = MaskDict.loc[MaskDict['MaskName']==region]\n lowerlat = Mask4Region['lowerlat'].values[0]\n higherlat = Mask4Region['higherlat'].values[0]\n lowerlon = Mask4Region['lowerlon'].values[0]\n higherlon = Mask4Region['higherlon'].values[0]\n # Get a mask for lat and lon range, then combine\n mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n higherlat=higherlat)\n mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n higherlon=higherlon)\n mask = np.ma.mask_or(mask1, mask2)\n\n # For MaskNumber, pull mask from MaskNumber list\n elif MaskNumber == 0:\n mask = tropics_unmasked(res=res, saizlopez=saizlopez)\n elif MaskNumber == 1:\n mask = mid_lats_unmasked(res=res)\n elif MaskNumber == 2:\n mask = southpole_unmasked(res=res)\n elif MaskNumber == 3:\n mask = northpole_unmasked(res=res)\n elif MaskNumber == 4:\n # mask = np.logical_not( all_unmasked( res=res ) )\n mask = all_unmasked(res=res)\n elif MaskNumber == 5:\n mask = extratropics_unmasked(res=res)\n elif MaskNumber == 6:\n mask = ocean_unmasked(res=res)\n elif MaskNumber == 7:\n mask = NH_unmasked(res=res)\n elif MaskNumber == 8:\n mask = SH_unmasked(res=res)\n elif MaskNumber == 10:\n mask = ice_unmasked(res=res)\n elif MaskNumber == 11:\n mask = land_unmasked(res=res)\n elif MaskNumber == 12:\n mask = mask_lat40_2_40(res=res)\n elif MaskNumber == 13:\n mask = np.ma.mask_or(ocean_unmasked(res=res),\n tropics_unmasked(res=res,\n saizlopez=saizlopez))\n elif MaskNumber == 14:\n mask = np.ma.mask_or(land_unmasked(res=res),\n tropics_unmasked(res=res,\n saizlopez=saizlopez))\n elif MaskNumber == 15: # 'All Sur.'\n mask = surface_unmasked(res=res)\n elif MaskNumber == 16: # 'Ocean Sur.'\n mask = np.ma.mask_or(surface_unmasked(res=res),\n ocean_unmasked(res=res))\n elif MaskNumber == 17: # 'Land Sur.':\n mask = np.ma.mask_or(surface_unmasked(res=res),\n land_unmasked(res=res))\n elif MaskNumber == 18: # 'Ice Sur.'\n mask = np.ma.mask_or(surface_unmasked(res=res),\n ice_unmasked(res=res))\n elif MaskNumber == 19:\n mask = lat2lat_2D_unmasked(lowerlat=-50, higherlat=50,\n res=res)\n elif MaskNumber == 20:\n mask = np.ma.mask_or(lat2lat_2D_unmasked(lowerlat=-50,\n higherlat=50, res=res),\n ocean_unmasked(res=res)[..., 0])\n elif MaskNumber == 21:\n mask = get_north_sea_unmasked(res=res)\n elif MaskNumber == 22:\n mask = get_mediterranean_sea_unmasked(res=res)\n elif MaskNumber == 23:\n mask = get_unmasked_black_sea(res=res)\n elif MaskNumber == 24:\n mask = get_unmasked_irish_sea(res=res)\n elif MaskNumber == 25:\n mask = get_EU_unmasked(res=res)\n# if MaskNumber == 26:\n# mask = get_2D_BL_unmasked( res=res )\n elif MaskNumber == 27: # 'Land Tropics Sur.':\n tmp = np.ma.mask_or(surface_unmasked(res=res),\n land_unmasked(res=res))\n mask = np.ma.mask_or(tmp, tropics_unmasked(res=res))\n elif MaskNumber == 28:\n mask = np.ma.mask_or(lat2lat_2D_unmasked(lowerlat=50,\n higherlat=80, res=res),\n land_unmasked(res=res)[..., 0])\n# elif MaskNumber == 29: # Alps\n# # Alps mask\n# lowerlat = 43\n# higherlat = 47\n# lowerlon = 5\n# higherlon = 15\n# # Get a mask for lat and lon range, then combine\n# mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n# higherlat=higherlat)\n# mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n# higherlon=higherlon)\n# mask = np.ma.mask_or(mask1, mask2)\n elif MaskNumber == 30: # Location ('loc' )\n mask = location_unmasked(lat=lat, lon=lon, res=res)\n elif MaskNumber == 31: # Rough(!) France map\n mask = get_France_unmasked(res=res)\n\n# elif MaskNumber == 32: # CONUS\n# lowerlat = 23\n# higherlat = 60\n# lowerlon = -125\n# higherlon = -54\n# # Get a mask for lat and lon range, then combine\n# mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n# higherlat=higherlat)\n# mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n# higherlon=higherlon)\n# mask = np.ma.mask_or(mask1, mask2)\n\n# elif MaskNumber == 33: # Cape_Verde_Flying\n# # Retrieve spatial extents for MaskName\n# d = GetMaskExtents()\n# lowerlat = 11.9\n# higherlat = 21.1\n# lowerlon = -29.1\n# higherlon = -15.9\n# # Get a mask for lat and lon range, then combine\n# mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n# higherlat=higherlat)\n# mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n# higherlon=higherlon)\n# mask = np.ma.mask_or(mask1, mask2)\n#\n# elif MaskNumber == 34: # local_CVAO_area\n# lowerlat = 0\n# higherlat = 25\n# lowerlon = -30\n# higherlon = -10\n# # Get a mask for lat and lon range, then combine\n# mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n# higherlat=higherlat)\n# mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n# higherlon=higherlon)\n# mask = np.ma.mask_or(mask1, mask2)\n\n else:\n PrtStr = 'WARNING - Mask not setup for MaskNumber={}'\n print(PrtStr.format(MaskNumber))\n sys.exit()\n\n logging.debug('prior to setting dimensions: {}'.format(mask.shape))\n # Apply Saiz-Lopez Marine MFT/MUT? <= should this be before multiply op.?\n if M_all:\n if use_multiply_method: # Kludge\n mask = mask*land_unmasked(res=res)\n else:\n # check this!!!\n mask = np.ma.mask_or(mask, land_unmasked(res=res))\n\n # Ensure returned arrays are 2D\n if mask2D:\n if len(mask.shape) == 2:\n pass\n elif len(mask.shape) == 3:\n mask = mask[..., 0]\n elif len(mask.shape) == 4:\n mask = mask[..., 0, 0]\n\n # Create 3D array by concatenating through altitude dimension\n if mask3D:\n if any([(mask.shape[-1] == i) for i in (38, 47)]):\n pass\n else: # concatenate dimensions\n if len(mask.shape) == 3:\n mask = np.concatenate([mask]*47, axis=2)\n elif len(mask.shape) == 2:\n mask = np.concatenate([mask[..., None]]*47, axis=2)\n\n # Remove above the \"chemical tropopause\" from GEOS-Chem (v9-2)\n if trop_limit:\n if (len(mask.shape) == 2) or mask2D:\n pass\n else:\n mask = mask[..., :38]\n\n # Create 4D array by concatenating through time dimension\n # ( assuming year long array of 1 months )\n if mask4D:\n if any([(mask.shape[-1] == i) for i in [12]]):\n pass\n else: # concatenate dimensions\n mask = np.concatenate([mask[..., None]]*12, axis=3)\n logging.debug('post to setting dimensions: {}'.format(mask.shape))\n logging.info(\"returning a 'mask' of type:{}\".format(type(mask)))\n return mask", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def zonal_avg(data,Log=False):\n print 'computing zonal average'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n if data.ndim == 3:\n new_data = MA.zeros((data.shape[0],nlat,nlon),dtype=float)\n elif data.ndim == 2:\n new_data = MA.zeros((nlat,nlon),dtype=float)\n else:\n print 'Check field dimensions'\n sys.exit()\n\n # geometric mean?\n if Log:\n work = MA.log(data)\n else:\n work = data\n\n # remap data to new regular grid\n for i in range(nlat):\n #print 'lat = %.2f'%(lat_t[i])\n for j in range(nlon):\n new_data[:,i,j] = extract_loc(lon_t[j],lat_t[i],tlon,tlat,work)\n\n # compute zonal average\n if Log:\n za_data = (MA.exp(MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape))))\n else:\n za_data = (MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape)))\n\n return za_data, lat_t", "def aggregate_results(self):\n\n raise NotImplementedError", "def setZmVars(self):\n self.zmvars = []\n for sample in self.samples:\n self.zmvars.append(sample.ZmAreaVar)", "def test_aggregateby(self):\n result = export.processExport(houseId=1,\n aggregate=\"1D\",\n aggregateby=[\"min\"])\n\n #So this will just show the minimum value\n self.assertEqual(result.shape, (10, 2))\n\n\n\n result = export.processExport(houseId=1,\n aggregate=\"1D\",\n aggregateby=[\"min\",\"mean\",\"max\"])\n\n #print result.head()\n #So this will have 3 readings for each location (6 in total()\n self.assertEqual(result.shape, (10, 6))\n #And the second sample should be 10 minutes in\n #self.assertEqual(result.index[1], datetime.datetime(2013, 01, 01, 1, 00, 00))", "def getDayMeans(gdf,\r\n year_min,month_min,day_min,\r\n year_max,month_max,day_max,\r\n Long_min,Long_max,\r\n Lat_min,Lat_max,\r\n ValueName,Error_name = '',UnCorr_name=''):\r\n output_all = gdf[(gdf.Date >= datetime.date(year_min,month_min,day_min))\r\n & (gdf.Date <= datetime.date(year_max,month_max,day_max))\r\n & (gdf.Long >= Long_min)\r\n & (gdf.Long <= Long_max)\r\n & (gdf.Lat >= Lat_min)\r\n & (gdf.Lat <= Lat_max)].groupby(['Year','Month','Day'])[ValueName].mean().reset_index()\r\n\r\n output = output_all.copy(); print('Caution, min number of mean value = 0')\r\n #output = output_all[(output_all.number >= 10)]\r\n print(len(output_all.Year))\r\n print(len(output.Year))\r\n date = output.apply(lambda x: datetime.date(int(x.Year),int(x.Month),int(x.Day)),axis=1)\r\n output.insert(loc=1,column='Date',value=date)\r\n return output", "def _recalculate_centroids(self):\n\n self._prev_centroids = dict(self.centroids)\n for cluster in self.clusters:\n self.centroids[cluster] = np.average(self.clusters[cluster], axis=0)", "def test_viewset_post_passed(self):\n zone = [\n [\n [9.523050482755892,55.71576659960325],\n [9.52433794308304,55.71581494788879],\n [9.525732691770784,55.71585120906369],\n [9.527191813474886,55.715863296114506],\n [9.52785700131058,55.71585120906369],\n [9.530367548948519,55.715561118722064],\n [9.531440432554476,55.71540398555416],\n [9.53208416271805,55.71521059001827],\n [9.532856638914339,55.7149567569243],\n [9.531247313505403,55.713349109025195],\n [9.530989821439974,55.713107351738756],\n [9.529616530424349,55.71246668769403],\n [9.528801138883821,55.712055690133354],\n [9.52860801983475,55.71214030763166],\n [9.528436358457796,55.71206777835862],\n [9.525904353147737,55.711983160703205],\n [9.52410190868973,55.71200733719487],\n [9.52311485577225,55.71206777835862],\n [9.523200686460726,55.71427986060977],\n [9.523050482755892,55.71576659960325]\n ], [\n [9.529723818784944,55.71464248509411],\n [9.529037173277132,55.713880969788974],\n [9.528801138883821,55.713748005276905],\n [9.528221781736605,55.71368756671271],\n [9.528393443113558,55.71315570331575],\n [9.529165919309847,55.71315570331575],\n [9.52957361508011,55.71351833823549],\n [9.53058212566971,55.71435238577594],\n [9.529723818784944,55.71464248509411]\n ]\n ]\n\n data = {\n \"provider\": Provider.objects.first().pk,\n \"name\": \"Test Zone\",\n \"price\": \"5.50\",\n \"zone\": json.dumps(zone)\n }\n\n response = self.client.post(reverse(\"servicearea-list\"), data, format='json')\n self.assertEqual(response.status_code, 201)", "def item_gewest_adapter(obj, request):\n return {\n 'id': obj.id,\n 'namen': obj._namen,\n 'centroid': obj.centroid,\n 'bounding_box': obj.bounding_box\n }", "def build_region(self, \n dataset_metadata_dict,\n min_lod_pixels=100, \n max_lod_pixels=-1, \n min_fade_extent=200, \n max_fade_extent=800\n ):\n\n region = simplekml.Region(latlonaltbox=\"<north>\" + str(dataset_metadata_dict['latitude_max']) + \"</north>\" +\n \"<south>\" + str(dataset_metadata_dict['latitude_min']) + \"</south>\" +\n \"<east>\" + str(dataset_metadata_dict['longitude_max']) + \"</east>\" +\n \"<west>\" + str(dataset_metadata_dict['longitude_min']) + \"</west>\",\n lod=\"<minLodPixels>\" + str(min_lod_pixels) + \"</minLodPixels>\" +\n \"<maxLodPixels>\" + str(max_lod_pixels) + \"</maxLodPixels>\" +\n \"<minFadeExtent>\" + str(min_fade_extent) + \"</minFadeExtent>\" +\n \"<maxFadeExtent>\" + str(max_fade_extent) + \"</maxFadeExtent>\")\n return region", "def get_borders_of_operation_area(self):\n if len(self.edge_documents) == 0:\n self.retrieve_edge_documents()\n\n self.set_borders_of_operation_area()\n\n borders = {\n 'minimum_latitude': self.minimum_latitude,\n 'maximum_latitude': self.maximum_latitude,\n 'minimum_longitude': self.minimum_longitude,\n 'maximum_longitude': self.maximum_longitude\n }\n return borders", "def aggregate_location(request):\n try:\n start_date = request.GET.get('startDate', None)\n end_date = request.GET.get('endDate', None)\n\n items = __getQuerysetGivenInterval('item', start_date, end_date)\n\n items_grouped_by_location = list(\n items.annotate(location=F('donation__donor__city'))\n .values('location')\n .annotate(count=Count('location'))\n )\n result = {'result': items_grouped_by_location}\n\n return JsonResponse(result, status=200)\n except BaseException as e:\n print(e.args)\n return HttpResponseBadRequest()", "def mdAveragePropertiesList(self):\n\t\tpass", "def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]", "def area_average(mycube, coord1, coord2):\n import iris.analysis.cartography\n #mycube.coord(coord1).guess_bounds()\n #mycube.coord(coord2).guess_bounds()\n grid_areas = iris.analysis.cartography.area_weights(mycube)\n result = mycube.collapsed([coord1, coord2], iris.analysis.MEAN, weights=grid_areas)\n return result", "def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def _summ_meanonly(self, wt_index, wt_type, obs, varnames, indexes):\n zero_info = {'N': 0, 'sum_w': 0, 'sum': 0, \n 'key_order': ('N', 'sum_w', 'sum')}\n index = indexes[-1]\n \n if self._isnumvar(index):\n info = self._summ_stats_meanonly(index, wt_index, wt_type, obs)\n else:\n info = zero_info\n \n self._return_values = info if info[\"N\"] != 0 else zero_info", "def getMeanRMS (self,arr):\n # in base class we return redshift and zero varinace\n # repeat that here because mean RMS is meaningless for Template SED PDFs\n N=len(arr)\n return arr[\"z\"],np.zeros(N)", "def GetRegionVertices(self, *float, **kwargs):\n ...", "def test_avalanche_warning_by_region_detail(self):\n pass", "def initialize_areas(self, reset_areas=True):\n if reset_areas: self.reset_areas()\n #for dt in list(self.dtypes.values()): dt.initialize_areas()\n for dtk in self.dtypes: self.dtypes[dtk].initialize_areas()", "def calc_shape_statistics(self, stat_names):\n stats = {}\n try:\n all_props = [regionprops(m) for m in self.masks]\n except TypeError:\n raise TypeError(\"masks not the right type\")\n for stat in stat_names:\n stats[stat] = np.mean([p[0][stat] for p in all_props])\n return stats", "def poll_datacenter(self, server, obj, name):\n\n if '.' in name:\n name = name.split('.')[0]\n\n stats = self._poll_group('datacenter', server, obj, name)\n\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n else:\n if 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n\n return stats", "def calcZmAreaVar(self):\n #-- NO EXTRAPOLATION\n if self.extrapolation == \"none\":\n self.ZmAreaVar = sum(self.zmvars)\n #-- AREA EXTRAPOLATION\n if self.extrapolation == \"area\":\n self.ZmAreaVar = 0\n #-- LINEAR EXTRAPOLATION\n if self.extrapolation == \"linear\":\n self.ZmAreaVar = ( (self.stratum.LT / self.stratum.LN) ** 2 ) * (((self.stratum.Ni ** 2) * (1 - self.ni / self.stratum.Ni) * self.variance()) / self.ni) + ((self.stratum.Ni / self.ni) * sum(self.zmvars))\n return self.ZmAreaVar", "def zonal_stats(src_poly, src_raster, operator=['mean'], features=None):\n assert src_raster.geo_transform is not None, \"src_raster.geo_transform should not be None\"\n assert isinstance(operator, list), \"operator should be a list of string. ex: ['mean']\"\n features = list(range(src_raster.bands)) if features is None else features\n assert len(features) == src_raster.bands, \"length of features should equals number of bands of the raster\"\n df_shp = src_poly.copy()\n df_shp['poly_idx'] = list(range(len(df_shp)))\n df_shp['poly_idx'] = df_shp['poly_idx'].astype('float')\n poly_rst = tgp.ShapeGrid.rasterize_layer(df_shp, src_raster.rows, src_raster.cols, src_raster.geo_transform, 'poly_idx', all_touched=True, no_data_value=np.nan)\n X_combine = np.concatenate([poly_rst.data, src_raster.data], axis=-1)\n X_combine_df = pd.DataFrame(X_combine.reshape(-1, src_raster.bands))\n X_groupby = X_combine_df.groupby(0, as_index=False)\n for op in operator:\n columns = {0:'poly_idx'}\n for f_idx, f in enumerate(features):\n columns[f_idx+1] = f'zonal_{op}_{f}'\n if op == 'mean':\n df_shp = df_shp.merge(X_groupby.mean().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'max':\n df_shp = df_shp.merge(X_groupby.max().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'min':\n df_shp = df_shp.merge(X_groupby.min().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'median':\n df_shp = df_shp.merge(X_groupby.median().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'sum':\n df_shp = df_shp.merge(X_groupby.sum().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'std':\n df_shp = df_shp.merge(X_groupby.std().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'count':\n df_shp = df_shp.merge(X_groupby.count().rename(columns=columns), on='poly_idx', how='left')\n else:\n assert False, \"no this operator\"\n return df_shp", "def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models", "def geocode(self, geocoder):\n for term in self.terms:\n # No need to geocode regions\n if not term.get('region'):\n geo = geocoder.geocode(term['string'])\n if geo:\n term['geo'] = geo\n if not self.region:\n # TODO: descobrir regiao do ponto\n self.region = \"???\"\n else:\n self.region = term['region']", "def collect_rms(self, rms):\n if self._data:\n self._data['min'] = min(rms, self._data['min'])\n self._data['max'] = max(rms, self._data['max'])\n self._data['avg'] = float(rms + self._data['avg']) / 2\n else:\n self._data['min'] = rms\n self._data['max'] = rms\n self._data['avg'] = rms", "def cube_area_analysis(self, cube, method='MEAN'):\n try:\n if not cube.coord(self.xy_coords[0]).has_bounds():\n cube.coord(self.xy_coords[0]).guess_bounds()\n cube.coord(self.xy_coords[-1]).guess_bounds()\n except iris.exceptions.CoordinateNotFoundError:\n # If xycoords have been changed since load.\n self.xy_coords = get_xy_coords(cube)\n if not cube.coord(self.xy_coords[0]).has_bounds():\n cube.coord(self.xy_coords[0]).guess_bounds()\n cube.coord(self.xy_coords[-1]).guess_bounds()\n except ValueError:\n pass\n # Only these methods use weighting.\n if method in ['MEAN', 'SUM', 'RMS']:\n if 'longitude' in self.xy_coords[0] and \\\n 'latitude' in self.xy_coords[-1]:\n grid_areas = iris.analysis.cartography.area_weights(cube)\n else:\n grid_areas = None\n cube = cube.collapsed([self.xy_coords[0], self.xy_coords[-1]], \n getattr(iris.analysis, method), \n weights=grid_areas)\n else:\n cube = cube.collapsed([self.xy_coords[0], self.xy_coords[-1]], \n getattr(iris.analysis, method))\n return cube", "def centroid(self, region_list):\n centroid_list = [] # a list of [(distance from robot, centroid)]\n robot = map_helper.map_to_world(self.start[0], self.start[1], self.resolution, self.x_offset, self.y_offset)\n\t#rospy.loginfo(region_list)\n for region in region_list:\n n = len(region)\n i = math.trunc(n/2)\n centroid = region[i]\n\n x = abs(centroid[0] - robot[0])\n y = abs(centroid[1] - robot[1])\n dist = math.hypot(x, y)\n centroid_list.append((dist, centroid))\n return self.smallest_centroid(centroid_list)", "def _update_centroid_location(self):\n previous_centroid_info = copy.deepcopy(self.centroid_info)\n for centroid, belongings in self.cluster_result.items():\n if not self.centroid_stable_flag.get(centroid):\n temp_list = list()\n temp_list.append(self.centroid_info.get(centroid))\n temp_list.extend(belongings)\n\n self.centroid_info[centroid] = float(sum(temp_list) / len(temp_list))\n\n return previous_centroid_info, self.centroid_info", "def test_mean_value(self):\n dict_with_value = self.info_list.get_value_info()\n print(dict_with_value[\"amount\"])\n self.assertEqual(dict_with_value['mean'], 135.0)", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]" ]
[ "0.58719265", "0.5710536", "0.5359844", "0.5348019", "0.5338033", "0.52508754", "0.5220636", "0.5190192", "0.5181222", "0.5081943", "0.5053021", "0.5022049", "0.5003427", "0.5002751", "0.4982933", "0.496246", "0.496002", "0.49582618", "0.49564335", "0.4926023", "0.49112692", "0.4895866", "0.48743063", "0.485147", "0.4849576", "0.4838315", "0.4799011", "0.4784904", "0.47792763", "0.4769234", "0.47651792", "0.47467613", "0.47434464", "0.47414768", "0.47357798", "0.47209308", "0.4719216", "0.471683", "0.47015417", "0.4696574", "0.46930677", "0.4672793", "0.4662715", "0.46615", "0.46599582", "0.46555215", "0.4644599", "0.4632314", "0.46214327", "0.46085533", "0.45998478", "0.4595351", "0.45907673", "0.45884615", "0.458647", "0.45836833", "0.4578933", "0.45775014", "0.4573002", "0.45685506", "0.45685506", "0.45618397", "0.4558162", "0.45575917", "0.45568398", "0.45530996", "0.45354617", "0.453419", "0.45327765", "0.45279992", "0.45062116", "0.4505594", "0.4504782", "0.4503095", "0.4499742", "0.4498412", "0.4497405", "0.44940013", "0.4492523", "0.44889942", "0.44876426", "0.44849992", "0.44731668", "0.44728306", "0.44712907", "0.4469165", "0.44688162", "0.4466677", "0.44656068", "0.44638807", "0.4457747", "0.44552514", "0.44552073", "0.44551682", "0.44548535", "0.44506034", "0.4449914", "0.44428492", "0.44371277", "0.44370544" ]
0.5071645
10
filter results by key server side function.
def mapList(results, key): newResult = results.map(lambda x: ee.Dictionary(x).get(key)) return newResult
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_by(self, key: str, *args, **kwargs):\n filter_ = self.filters.get(key)\n if not filter_:\n raise ValueError(key)\n return filter_(*args, **kwargs)", "def filter_keys_c(func):\n return partial(filter_keys, func)", "def filter(self, key):\n with suppress(KeyError):\n yield from self.data[key]", "def filter_keys(self):\n filters = self.args.keyfilter.split('.')\n self.logger.info(u'Filtering with:{f}'.format(f=filters))\n data = self.inputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}'.format(k=key))\n returned_data = dict_key_filter(key, value, filters, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data After filter:{d}'.format(d=newdata))\n self.outputdata = newdata", "def filter(self, **args ):\n query = TXLOG.select('*')\n for key, value in args.items():\n if '__' in key:\n key, op = key.split('__')\n else:\n op = 'eq'\n\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n if not isinstance(value, basestring ):\n value = str(value)\n\n query = query.where({key:value}, self.operators[op])\n\n items = query.list()\n return items", "def filter(self, filter_dict):\n pass", "def filter(self, trans, user, query, dbkey):\n dbkey_user, dbkey = decode_dbkey(dbkey)\n dbkey = dbkey.replace(\"'\", \"\\\\'\")\n return query.filter(or_(\"metadata like '%%\\\"dbkey\\\": [\\\"%s\\\"]%%'\" % dbkey, \"metadata like '%%\\\"dbkey\\\": \\\"%s\\\"%%'\" % dbkey))", "def filter():\n return get_filter_data(db, MyTable)", "def search(self, key, headers=Headers()):", "def filtered(self, keys, lst=None, func=\"all\"):\n lst = self if lst is None else lst\n if len(lst) == 0:\n raise ValueError(\"No rows in list\")\n return [row for row in self.filter(keys, lst, func=func)]", "def test_filter_one_key():\n data = [\n {\n \"name\": \"Bill\",\n \"last_name\": \"Gilbert\",\n \"occupation\": \"was here\",\n \"type\": \"person\",\n },\n {\"is_dead\": True, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"polly\"},\n ]\n\n actual_result = make_filter(last_name=\"Gilbert\").apply(data)\n expected_result = [data[0]]\n assert actual_result == expected_result", "def filter(self, func):\n self._sets.filter(key=func)", "def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)", "def __contains__(self, key):\n for f in reversed(self.filters):\n if key in f:\n return True\n return False", "async def filter(self, **kwargs):\n\n pass", "def __getitem__(self, key):\n return self.query(key)", "def filter(self, keys):\n if keys is None or len(keys) == 0:\n return self._metadata\n\n return self._filter_new(self._metadata, keys)", "def filter(self, *args, **kwargs):", "def find(self, key, condition) -> list:\n pass", "def where(self, key, value):\n comparison = key + \" = \" + sanitize_value(value)\n results = self.__run(\n pagination_template.substitute(\n tablename=self.tablename,\n comparison=comparison,\n sortKey=key,\n asc=\"ASC\",\n limit=1\n ),\n )\n return results", "def filter(self, func=bool):\n return _(filter(func, self._))", "def key_filter(template_index, num_keys, row):\n\n template = nori.core.cfg['templates'][template_index]\n\n if (nori.core.cfg['key_mode'] == 'all' and\n template[T_KEY_MODE_KEY] == 'all'):\n return True\n\n if not check_key_list_match(nori.core.cfg['key_mode'],\n nori.core.cfg['key_list'], num_keys, row):\n return False\n\n if not check_key_list_match(template[T_KEY_MODE_KEY],\n template[T_KEY_LIST_KEY], num_keys, row):\n return False\n\n return True", "def is_filtered(self, key, filter_values):\n return str(key[-1]) in filter_values", "def filter_by_keys(self, keys):\n return list(filter(lambda item: item.keyword in set(keys), self._metadata))", "def _filter_from_dict(cls, nm, val):\n #Any necessary filtering place here.\n return val", "def filter(self, func: Callable[[Tuple[keyType, valueType]], Tuple[keyType, valueType]]) -> List[Tuple[keyType, valueType]]:\n result = []\n it = self.__iter__()\n while True:\n try:\n key, value = next(it)\n pair = (key, value)\n tmp = func(pair)\n if not (tmp is None):\n result.append(tmp)\n except StopIteration:\n break\n return result", "def filterRansac():\n pass", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError", "def filter_handler_results(self, handler_name):\n return filter(lambda results: handler_name in results.keys(), self.handler_results)", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered", "def filt(rec):\n return True # Show everything", "def eventFilter(self, object, event):\n if event.type() == QtCore.QEvent.KeyPress:\n if event.key() >= 48 and event.key() <= 57: # 0-9\n kbKey = 'KEY_' + chr(event.key())\n elif event.key() >= 65 and event.key() <= 90: # A-Z\n kbKey = 'KEY_' + chr(event.key())\n elif event.key() in special_keys: # special_keys\n kbKey = 'KEY_' + special_keys[event.key()]\n else:\n kbKey = 'None'\n # should not have to search for it... dunno how else to access item?\n lstMatch = self.ui.tv_bindings.findItems('[Press a key]', QtCore.Qt.MatchExactly, 1)[0]\n g13Key = lstMatch.text(0)\n lstMatch.setText(1, kbKey)\n if not self.keyNames:\n self.efButton.setText(kbKey[4:])\n self.ui.tv_bindings.removeEventFilter(self)\n self.efButton.removeEventFilter(self)\n # Update config\n self.writeFile()\n self.writePipe('bind ' + g13Key + ' ' + kbKey)\n return False", "def filter(self, func=None, **kwargs):\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)", "def filter(self, observable):", "def filter(self, keys, lst=None, func=\"all\"):\n f = all if func == \"all\" else any\n\n if lst is None:\n lst = self\n if DEP in lst[0] and INDEP in lst[0]:\n filt_dep = True\n else:\n filt_dep = False\n\n def filt_func(d):\n if filt_dep:\n return f([k in d[INDEP] or k in d[DEP] for k in listify(keys)])\n else:\n return f([k in d for k in listify(keys)])\n\n return filter(filt_func, lst)", "def filter_query_result(self, result, varenv):\n if isinstance(result, list):\n filter_result = []\n for elem in result:\n # need this pointer to get index results properly sorted.\n elem.list = result\n filter_result.append(self.filter_query_result(elem, varenv))\n elif isinstance(result, dict):\n filter_result = {}\n for key, asked in result.query.original_query.iteritems():\n if key[0] in '@:':\n basekey = key[1:]\n if basekey == 'id':\n filter_result[key] = asked\n # horrible hack to collect up the guids we care about...\n if asked is None:\n varenv.guid_list.append(result[key[0] + 'guid'])\n elif (basekey in QueryPrimitive.directives or\n basekey in QueryPrimitive.special):\n # should we output these?\n filter_result[key] = asked\n elif key[0] == '@' and result.query.get(\n '@optional') and key not in result:\n # XXX here we actually will give you an empty result\n # we could give you nothing at all\n filter_result[key] = None\n elif basekey == 'guid':\n filter_result[key] = result[key]\n elif basekey == 'value':\n # sanitize results.\n filter_result[key] = self.sanitize_value(\n result[key], result[key[0] + 'datatype'], varenv)\n elif basekey in QueryPrimitive.values:\n # this better be what you said!!!\n filter_result[key] = result[key]\n elif basekey == 'index':\n filter_result[key] = self.generate_index_read_result(result)\n elif basekey in QueryPrimitive.pointers:\n # might be direct sub-query or constraint, or query\n if isinstance(asked, dict):\n # sub-query, return it\n filter_result[key] = self.filter_query_result(result[key], varenv)\n else:\n if asked is None:\n # we'll be asking for the id of this thing, not just the guid.\n varenv.lookup_manager.guid_list.append(result[key])\n\n # just give back the guid\n filter_result[key] = result[key]\n elif valid_relname(key):\n # skip optional results we didn't get a value for.\n if result.query.get('@optional') and key not in result:\n # XXX should we give you None as a result rather than leaving it out completely?\n pass\n else:\n # is this a ResultError or an InternalError?\n if key not in result:\n raise MQLInternalError(\n result.query, \"No return result for '%(key)s'\", key=key)\n else:\n filter_result[key] = self.filter_query_result(result[key], varenv)\n\n elif key[0] == '?':\n # it's possible that we didn't find any order information, so give back null in that case\n filter_result[key] = result.get(key, None)\n else:\n raise MQLInternalError(\n result.query,\n \"Didn't expect to see %(key)s in original query while filtering\",\n key=key)\n\n result.filter = filter_result\n elif result is None:\n # there's no result here even though we expected one.\n filter_result = result\n else:\n raise MQLInternalError(\n result.query, \"Didn't understand result\", result=result)\n\n return filter_result", "def filter_by(self, **kwargs):\n from_entity = self._filter_by_zero()\n\n clauses = [\n _entity_namespace_key(from_entity, key) == value\n for key, value in kwargs.items()\n ]\n return self.filter(*clauses)", "def func_filter(self, func):\n return QuerySet(filter(func, self))", "def _filterfunc(self,*args,**kwargs):\n self._filterfunc = self.f\n return self.f(*args,**kwargs)", "def filter_all(_):\n return True", "def test_get_tag_filter_keys(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPTagView)\n handler = OCPTagQueryHandler(query_params)\n tag_keys = handler.get_tag_keys(filters=False)\n\n url = f\"?filter[tag:{tag_keys[0]}]=*\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n results = handler.get_tag_filter_keys()\n self.assertEqual(results, [\"tag:\" + tag_keys[0]])", "def filter(data, query, use_nested_keys=True,\n key_separator='.', case_sensitive=True,\n raise_keyerror=False):\n ast = parser.parse(query)\n dq = DataQueryVisitor(\n ast, use_nested_keys=use_nested_keys,\n key_separator=key_separator, case_sensitive=case_sensitive,\n raise_keyerror=raise_keyerror)\n for item in data:\n if not dq.evaluate(item):\n continue\n yield item", "def filter(self, filters):", "def query_filter(query_params, allow_func=None):\n query_params = query_unflatten(query_params)\n d = {}\n for name, value in query_params.items():\n if allow_func and not allow_func(name, value):\n continue\n else:\n d[name] = value\n return d", "def filter(self, filter_dict):\n self.result = [x for x in self.result if all(str(x[y]) == z or (hasattr(x[y], \"__iter__\") and (z in str(x[y]) or any(z in str(d.values) for d in x[y] if isinstance(d, dict)))) for y,z in filter_dict.items())] \n\n return self", "def __getitem__(self, key):\n responses, resolution_map = self._data_dict.__getitem__(key)\n return (self.FilteredResponses(responses, self._path),\n self.FilteredResolution(resolution_map, self._path))", "def filter_by_prefix(query, key_name_prefix):\n root_kind = query._model_class.__name__\n min_key = db.Key.from_path(root_kind, key_name_prefix)\n max_key = db.Key.from_path(root_kind, key_name_prefix + u'\\uffff')\n return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key)", "def result_filter(self, result, **kwargs):\n return result", "def critere_keys(key):\n critere = (key not in [\"input_observation\", \"y_true\", \"y_action\", \"y\"])\n critere = critere & (key[-3:] != \"_ph\") & (key[-7:] != \"_assign\")\n\n return critere", "def test_filter_one_key_second():\n data = [\n {\n \"name\": \"Bill\",\n \"last_name\": \"Gilbert\",\n \"occupation\": \"was here\",\n \"type\": \"person\",\n },\n {\"is_dead\": True, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"polly\"},\n ]\n\n actual_result = make_filter(kind=\"parrot\").apply(data)\n expected_result = [data[1]]\n assert actual_result == expected_result", "def maybe_outfeed(self, key, value):\n if self._filters is not None:\n if any(f in key for f in self._filters):\n self._vals[key] = value\n else:\n self._vals[key] = value", "def get_keys(request):\n\n keys=[]\n reports = Report.objects.all().exclude(institute = 'PUBMED')\n for report in reports:\n json_rep = report.report_json\n for el in json_rep.keys():\n if el not in keys:\n keys.append(el)\n json_resp = {'keys':keys}\n return JsonResponse(json_resp)", "def filter_values(function, dictionary):\n return {k: v for k, v in dictionary.items() if function(v)}", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def lookup(self, key):", "def filter_keys(func, a_dict):\n return dict((k, v) for (k, v) in a_dict.items() if func(k))", "def filterListenerResult(result):\n if kwIndex[0] == -1:\n # This was the deterministic search\n if type(result) == dict:\n # An exact template match was found, callback with this\n outerDf.callback(result[mainKey])\n else:\n # The deterministic search did not find anything; start searching subtuples\n findNextSubtuple()\n return\n \n kwKey = subtupleKeys[kwIndex[0]]\n \n if type(result) == dict:\n # Value was found; this should be list of keys for tuples matching this criterion\n index = result[kwKey]\n listenerResults.extend(index)\n listenerSubtupleSetCounter[0] += 1\n \n if listenerSubtupleSetCounter[0] == 3:\n if havePossibleMatches[0] == False:\n havePossibleMatches[0] = True\n filteredResults.extend(listenerResults)\n else:\n # Filter the our list of possible matching tuples with the new results\n delKeys = []\n for tupleKey in filteredResults:\n if tupleKey not in listenerResults:\n delKeys.append(tupleKey)\n for tupleKey in delKeys:\n try:\n filteredResults.remove(tupleKey)\n except ValueError:\n pass\n\n if len(filteredResults) == 0:\n # No matches for this template exist at this point; there is no use in searching further\n outerDf.callback(None)\n else:\n # Reset the cycle\n listenerSubtupleSetCounter[0] = 0\n while len(listenerResults):\n listenerResults.pop()\n findNextSubtuple()\n else:\n findNextSubtuple()", "def sort_by_key(request):\n return request.param", "def sort_by_key(request):\n return request.param", "def filter(self, pkt):\n return pkt", "def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata", "def query(self, key: int, *args, **kwargs) -> Optional[bytes]:\n result = []\n\n with self.get_add_handler() as redis_handler:\n for _key in redis_handler.scan_iter(match=key):\n res = {\n \"key\": _key,\n \"values\": redis_handler.get(_key),\n }\n result.append(res)\n\n return result", "def __getitem__(self, (essid, key)):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PYR2_DBObject).join(ESSID_DBObject)\n result = q.filter(sql.and_(ESSID_DBObject.essid == essid, \\\n PYR2_DBObject.key == key)).first()\n if result is None:\n raise KeyError(\"No result for ESSID:Key-combination \" \\\n \"(%s:%s).\" % (essid, key))\n else:\n return result", "def translate_filter(value, datastorekey=''):\n\n if not datastorekey:\n return value\n correspondences = DataStore.objects.get(key=datastorekey)\n return correspondences.value.get(value, value)", "def filter_query(self, request, query, view):\n raise NotImplementedError('.filter_query() must be implemented.') # pragma: no cover", "def _filter(self, metadata, keys):\n if isinstance(metadata, list):\n new_metadata = []\n for m in metadata:\n filtered_list = self._filter(m, keys)\n if filtered_list is not None:\n new_metadata.append(filtered_list)\n if not new_metadata:\n return None\n return new_metadata\n if isinstance(metadata, dict):\n new_metadata = {}\n for k in list(metadata.keys()):\n if k in keys:\n new_metadata[k] = metadata[k]\n elif k.lower() in keys:\n new_metadata[k] = metadata[k]\n else:\n filtered_dict = self._filter(metadata[k], keys)\n if filtered_dict is not None:\n new_metadata[k] = filtered_dict\n if new_metadata == {}:\n return None\n return new_metadata\n if isinstance(metadata, tuple):\n filtered_tuple = [(x, keys) for x in metadata]\n for a in filtered_tuple:\n if a is not None:\n return tuple(filtered_tuple)\n return None\n return None", "def contains(self, key):\n visitor = VisitorContains()\n self.visit(key, visitor)\n return visitor.result", "def filter_data(f):\n @functools.wraps(f, assigned=[])\n def wrapper(*args, **kwds):\n out = f(*args, **kwds)\n\n def _filter(obj):\n if isinstance(obj, list):\n new_list = []\n for o in obj:\n new_list.append(_filter(o))\n obj = new_list\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(k, str):\n obj[k.lower()] = _filter(v)\n return obj\n return _filter(out)\n return wrapper", "def test_get_filtered_mapping_keys(\n query: ClickhouseQuery, expected_result: Sequence[str],\n) -> None:\n assert get_filtered_mapping_keys(query, \"tags\") == expected_result", "def filter(self, function):\n return FunctionalWrapper(filter(function, self.data))", "def filter_by_prefix(query, key_name_prefix, root_kind=None):\n root_kind = root_kind or query._model_class.__name__\n min_key = db.Key.from_path(root_kind, key_name_prefix)\n max_key = db.Key.from_path(root_kind, key_name_prefix + u'\\uffff')\n return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key)", "def _custom_filter(self, query):\r\n return query", "def filter(self, func):\n n = len(self.data['id'])\n new_table = []\n for i in range(n):\n row = dict([(col, self.data[col][i]) for col in self.cols])\n if func(row):\n new_table.append(row)\n for col in self.cols:\n self.data[col] = []\n for row in new_table:\n self.data[col].append(row[col])\n return self", "def _oneProgramFilter(self, entity, params):\n\n desired_keyname = params.get('desired_keyname')\n if not desired_keyname:\n statistic = params.get('statistic')\n desired_keyname = statistic.scope.key().id_or_name()\n params['desired_keyname'] = desired_keyname\n\n program_field = params.get('program_field')\n if not program_field:\n program_field = self.helper.getProgramFieldForModel(params.get('model'))\n params['program_field'] = program_field\n if not program_field:\n raise ProtocolError()\n\n current_keyname = entity.__getattribute__(program_field).key().id_or_name()\n\n if current_keyname != desired_keyname:\n return False\n else:\n return True", "def filter_by_name(pillar_key, nodename=None):\n if nodename is None:\n nodename = __grains__['id']\n\n dictionary = __pillar__.get(pillar_key, {})\n filtered_list = []\n\n for name, items in dictionary.items():\n if name == '*' or name == nodename:\n filtered_list.extend(items)\n\n return filtered_list", "def pk_filter(cls, value=None):\n return {cls.PK_NAME: value}", "def dict_value_filter(key, data, dfilter, logger):\n\n logger.info(u'dict_value_filter:{l}'.format(l=locals()))\n newdata = {}\n if isinstance(data, dict):\n for nextkey, nextdata in data.items():\n returned_data = dict_value_filter(nextkey, nextdata, dfilter,\n logger)\n if bool(returned_data):\n newdata[nextkey] = returned_data\n elif isinstance(data, list):\n logger.info('Processing List:{}'.format(data))\n\n for item in data:\n logger.info(u'Process list:{}'.format(data))\n if isinstance(item, dict):\n logger.info('Found a dictionary:{}'.format(item))\n logger.info('Calling dict_value_filter:{k},{d},{f}'\n ''.format(k=key,d=item, f=dfilter))\n returned_data = dict_value_filter(key, item, dfilter, logger)\n if bool(returned_data):\n newdata = returned_data\n elif dfilter in unicode(data):\n newdata = data\n else:\n logger.info(u'Skipping data entry:{d}'.format(d=data))\n\n return newdata", "def filter_query(self, query, request, resource):\n raise NotImplementedError()", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def filter_list(client, args):\n from ..util import print_query\n print_query(client.context.query)", "def is_filter_at_key(self, key):\n\n if self.has_key(key):\n attribute_status = getattr(self, key)\n if isinstance(attribute_status, self.__class__):\n return True\n\n return False", "def matches_filters(self, entity):\r\n item = dict(entity)\r\n item[self.query.get_meta().pk.column] = entity.key()\r\n return self._matches_filters(item, self.query.where)", "def __getitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).one()", "def extract(self, key, readby=False):\n if key not in self:\n return []\n match = self[key].reads if readby is False else self[key].readby\n found = []\n for k, v in self.items():\n if k in match:\n found.append(v)\n return found", "def press(self, key):\n self.view.filter_short_keys([key], [])\n return self", "def check_key(key, value):\n return lambda event, data: data[key] == value", "def filter(self, request):\n try:\n columns = dict(request.data.iterlists())['columns']\n except AttributeError:\n columns = request.data['columns']\n return self._get_filtered_results(request, columns=columns)", "async def get_all(self, key: datastore.Key) -> RV:\n\t\treturn await (await self.get(key)).collect() # type: ignore[return-value]", "def select(self, key, what, value):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n if key not in self.columns:\n print(\"Key [{}] does not exist in your DB! \" . format(key))\n\n tmp_results = []\n for row in self.rows:\n details = row.get(key, None)\n if details is None:\n pass\n if what == \">\" and details > value:\n tmp_results.append(row)\n elif what == \"<\" and details < value:\n tmp_results.append(row)\n elif what == \"=\" and details == value:\n tmp_results.append(row)\n elif what == \">=\" and details >= value:\n tmp_results.append(row)\n elif what == \"<=\" and details <= value:\n tmp_results.append(row)\n\n return tmp_results", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def getSpecific(self, keyword, key):", "def select(self, key, key_column, query_columns):\n if not self.index.has_index(key_column):\n self.index.create_index(key_column)\n\n matching_rids = self.index.locate(key_column,key)\n if not matching_rids:\n print(\"No matching rids\")\n return False\n\n output = []\n for rid in matching_rids:\n selection_result = self.table.select(rid, query_columns, True)\n if selection_result:\n output.extend(selection_result)\n else:\n return False\n return output", "def __contains__(self, key):\n return self.__getitem__(key)", "def filter_query(self, request, query, view):\n\n raise NotImplementedError('.filter_query() must be implemented.') # pragma: no cover", "def _filter_results(self, result):\n out_result = {}\n for change_type in result:\n temp_dict = {}\n for key in result[change_type]:\n log.debug(\"change_type = %s\", change_type)\n if self.ignore_added and (change_type == \"+++\"):\n continue\n log.debug(\"result[change_type] = %s, key = %s\",\n unicode(result[change_type]), key)\n log.debug(\"self._is_incex_key = %s\",\n self._is_incex_key(\n key,\n result[change_type][key]))\n if not self._is_incex_key(key, result[change_type][key]):\n temp_dict[key] = result[change_type][key]\n if len(temp_dict) > 0:\n out_result[change_type] = temp_dict\n\n return out_result", "def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }", "def filter_data(data, filter_dict):\n for key, match_string in filter_dict.items():\n if key not in data:\n logger.warning(\"{0} doesn't match a top level key\".format(key))\n continue\n values = data[key]\n matcher = re.compile(match_string)\n if isinstance(values, list):\n values = [v for v in values if matcher.search(v)]\n elif isinstance(values, dict):\n values = dict((k, v) for k, v in values.items() if matcher.search(k))\n else:\n raise MiuraException(\"cannot filter a {0}\".format(type(values)))\n data[key] = values", "def test_key_predicate(datum):\n return 0 < datum", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])" ]
[ "0.7041496", "0.67481804", "0.66853714", "0.6602866", "0.6594346", "0.6538373", "0.62875813", "0.625645", "0.6235937", "0.61752564", "0.617185", "0.6147107", "0.61328727", "0.6030898", "0.59337604", "0.5894746", "0.58756953", "0.58703166", "0.5859899", "0.5856352", "0.5783209", "0.57674354", "0.57643515", "0.5757454", "0.5740265", "0.57261556", "0.5725724", "0.57035244", "0.56365156", "0.5635738", "0.56322074", "0.5618407", "0.5604946", "0.55558556", "0.5555188", "0.555138", "0.55474293", "0.55466515", "0.5544246", "0.5536041", "0.553603", "0.5532374", "0.55274296", "0.55171853", "0.5499906", "0.5499218", "0.5494601", "0.54907805", "0.5488656", "0.54870677", "0.54634804", "0.5454085", "0.54244256", "0.54228765", "0.5417485", "0.5408436", "0.54059005", "0.5395258", "0.5383185", "0.5382346", "0.5382346", "0.53810906", "0.5374413", "0.5373789", "0.5353681", "0.53514385", "0.5344449", "0.53281707", "0.5327858", "0.5317931", "0.5307776", "0.5305682", "0.5299177", "0.52879953", "0.5284817", "0.52638716", "0.52575696", "0.5228521", "0.5227481", "0.5225705", "0.52223045", "0.52161914", "0.52149254", "0.5214734", "0.5213399", "0.5199945", "0.5197158", "0.51939285", "0.519162", "0.5191334", "0.51906675", "0.51827115", "0.5181821", "0.5176142", "0.5175267", "0.5172069", "0.5163013", "0.51515037", "0.51494503", "0.5148799", "0.5142306" ]
0.0
-1
Convert a dictionary to an earth engine feature server side
def dict_to_feature(d): f = ee.Feature(None,ee.Dictionary(d)) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _preprocess(self, feature_dict):\n return feature_dict", "def from_dict(cls, dikt) -> 'Features':\n return util.deserialize_model(dikt, cls)", "def dict_to_feature(feature_dict, keys, max_value=None):\n feature = []\n for key, val in feature_dict.items(): # First level\n if key not in keys:\n continue\n if val is None or val == \"auto\" or key == \"autotuning\" or val == \"\":\n continue\n if isinstance(val, dict):\n feature.append(dict_to_feature(val, max_value))\n else:\n feature.append(float(val))\n\n # normalization, should not matter in tree models\n if max_value is not None:\n norm_feature = []\n for f, mv in zip(feature, max_value):\n norm_feature.append(f / mv)\n feature = norm_feature\n\n return feature", "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def map_to_app_features(self, app):\n app['features'] = []\n for form_feature in self.features:\n feature = {}\n if form_feature.feature_name.data:\n feature['name'] = form_feature.feature_name.data\n if form_feature.feature_version.data:\n feature['version'] = form_feature.feature_version.data\n if form_feature.feature_provisioner.data:\n feature['provisioner'] = form_feature.feature_provisioner.data\n if form_feature.feature_parameters.data:\n json_ob = json.loads(form_feature.feature_parameters.data)\n if json_ob:\n feature['parameters'] = json_ob\n feature['version'] = ''\n else:\n feature['parameters'] = {}\n if feature:\n app['features'].append(feature)", "def add_engineered(features):\n features[\"londiff\"] = features[\"dropofflon\"] - features[\"pickuplon\"]\n features[\"latdiff\"] = features[\"dropofflat\"] - features[\"pickuplat\"]\n features[\"euclidean\"] = tf.math.sqrt(\n features[\"londiff\"]**2 + features[\"latdiff\"]**2)\n return features", "def dict_to_example(dictionary):\n features = {}\n for k, v in six.iteritems(dictionary):\n features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))\n return tf.train.Example(features=tf.train.Features(feature=features))", "def sites_geojson():\n\n with Config() as config:\n with db.Connection(config) as con:\n features = con.features()\n features = list(features)\n return flask.jsonify(features)", "def __call__(self, *args, **kwargs):\n self.features = dict((k, v()) for k, v in self.features.items())\n return self.features", "def process_features(\n config,\n raw_features: Union[tf.train.Example, features.FeatureDict],\n random_seed: int) -> features.FeatureDict:\n if isinstance(raw_features, dict):\n return features.np_example_to_features(\n np_example=raw_features,\n config=config,\n random_seed=random_seed)\n else:\n return features.tf_example_to_features(\n tf_example=raw_features,\n config=config,\n random_seed=random_seed)", "def process_feature_file(filename: str) -> Dict[str, Any]:\n feature = json.loads(open(filename).read())\n template = feature['query']\n name = feature['name']\n params = feature['params']\n feature_spec = {\n 'name': name,\n 'template': template,\n 'params': params\n }\n return feature_spec", "def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict", "def save_feature(self):\n feature_dict = {\n 'name': self.name,\n 'preActionDes': self.pre_action_des,\n 'inActionDes': self.in_action_des,\n 'postActionDes': self.post_action_des,\n 'actionable': self.actionable,\n 'usable': self.usable,\n 'state': self.state,\n 'featureId': self.feature_id\n }\n return feature_dict", "def enhance_metadata(metadata, features='all'):\n\n # available options\n ortographic_features = ['w_length','n_vowels','n_consonants']\n lexical_features = ['uni_freq', 'bi_freq', 'func_word','count']\n position_features = ['position','position_end','is_first_word','is_last_word']\n\n # make list of features\n if features == 'all': features = ortographic_features +lexical_features + position_features \n\n # use ws clean to lower case\n words = [word.lower() for word in metadata['word'].values]\n\n # itereate features and fill metadata\n for feature in features:\n # ORTOGRAPHIC ##############################\n if feature == 'w_length': \n metadata[feature] = w_length(words)\n if feature == 'n_consonants':\n metadata[feature] = n_consonants(words)\n if feature == 'n_vowels':\n metadata[feature] = n_vowels(words)\n\n # LEXICAL ###################################\n if feature == 'uni_freq':\n metadata[feature] = unigram(words)\n if feature == 'bi_freq':\n metadata[feature] = bigram(words)\n if feature == 'func_word':\n metadata[feature] = function_word(words)\n if feature == 'count':\n metadata[feature] = count(words)\n\n # POSITION ###################################\n if feature == 'position':\n metadata[feature] = position(words)\n if feature == 'position_end':\n metadata[feature] = position_end(words)\n if feature == 'is_first_word':\n metadata[feature] = first_word(words)\n if feature == 'is_last_word':\n metadata[feature] = last_word(words)\n\n return metadata", "def _interactive_input_tensor_to_features_dict(feature_map, hparams):\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n\n x = inputs\n # Remove the batch dimension.\n num_samples = x[0]\n length = x[2]\n x = tf.slice(x, [3], tf.to_int32([length]))\n x = tf.reshape(x, [1, -1, 1, 1])\n # Transform into a batch of size num_samples to get that many random\n # decodes.\n x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n p_hparams = hparams.problem_hparams\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = inputs[1]\n features[\"inputs\"] = x\n return features", "def convert_series_to_feature(series: Types.SeriesObj,) -> Dict[str, tf.train.Feature]:\n try:\n image, metadata = series\n dicom_id = f\"{metadata.get('Study Instance UID', 'unknown_study')}/{metadata.get('Series Instance UID', 'unknown_series')}/\"\n\n if metadata.get(\"flags\") and metadata.get(\"time\"):\n name = f\"time{metadata.get('time')[1:]}/{'_'.join(metadata.get('flags'))}/\"\n else:\n name = dicom_id\n return dict(\n [\n (f\"{name}{k}\", v)\n for (k, v) in {\n \"image\": floatList_feature(image.flatten().tolist()),\n \"dx\": float_feature(metadata.get(\"Pixel Spacing\")[0]),\n \"dy\": float_feature(metadata.get(\"Pixel Spacing\")[1]),\n \"dz\": float_feature(metadata.get(\"Spacing Between Slices\")),\n \"is_seg\": int64_feature(int(metadata.get(\"Modality\") == \"SEG\")),\n \"right\": int64_feature(int(metadata.get(\"Laterality\") == \"R\")),\n \"shape\": int64List_feature(image.shape),\n \"dicom_id\": bytes_feature(dicom_id.encode()),\n \"Image Position (Patient)\": floatList_feature(metadata.get(\"Image Position (Patient)\")),\n \"Image Orientation (Patient)\": floatList_feature(metadata.get(\"Image Orientation (Patient)\")),\n \"z_bound\": floatList_feature(metadata.get(\"slice_z\")),\n }.items()\n ]\n )\n except Exception as e:\n _logger.error(\n f\"Error making Series Features. Series meta: {metadata}. Error: {str(e)}\"\n )\n return {}", "def convert_study_to_feature(study: List[Types.SeriesObj]) -> List[Dict[str, tf.train.Feature]]:\n return [convert_series_to_feature(s) for s in study]", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}", "def _make_feature(self, val, app, reg):\n return {\"type\": \"Feature\",\n \"properties\": self._get_properties(val.set_app(app).set_region(reg)),\n \"geometry\": self._get_geometry(val.geom)\n }", "def post_feature_set(\n feature_set: Dict[str, Any],\n model_name: str,\n es_host: str\n) -> None:\n host = f'http://{es_host}'\n url = f'_ltr/_featureset/{model_name}'\n url = urljoin(host, url)\n header = {'Content-Type': 'application/json'}\n resp = requests.post(url, data=json.dumps(feature_set), headers=header)\n if not resp.ok:\n raise Exception(resp.content)", "def to_feature_dict(self):\n return {feature:self.get_feature(feature) for feature in self._FEATURES}", "def expand_feature_meta(feat_meta):\n if type(feat_meta) != dict:\n if type(feat_meta).__name__ == 'Feature':\n feat_meta = feat_meta.getInfo()\n else:\n raise RuntimeError('Unsupported EE object')\n\n out_str = ''\n for k, y in feat_meta.items():\n if k == 'geometry':\n for _k, _y in y.items():\n out_str += '{}: {}\\n'.format(str(_k), str(_y))\n\n elif k == 'properties':\n for _k, _y in y.items():\n out_str += 'Property: {} : {}\\n'.format(_k, str(_y))\n else:\n out_str += '{} : {}\\n'.format(str(k), str(y))\n return out_str", "def test_convert_features(convert_features_parameters):\n test_input = convert_features_parameters[0]\n expected_output = convert_features_parameters[1]\n assert geojson2fromto.convert(test_input) == expected_output", "def wrap_feature(self, data):\n feature = {\n 'type': 'Feature',\n 'geometry': data.pop(self.__geometry_field_name__, None)\n }\n feature['properties'] = data\n return feature", "def _parse_tensor_or_dict(features):\n if isinstance(features, dict):\n keys = sorted(features.keys())\n with ops.colocate_with(features[keys[0]]):\n features = array_ops.concat([features[k] for k in keys], 1)\n return features", "def encode_features(item):\n item['is_male'] = int(item['Sex'] == 'male')\n del item['Name']\n del item['Sex']\n # del item['Fare']\n del item['Cabin']\n del item['Ticket']\n\n # One-hot encoding: Embarked\n item['embarked_s'] = int(item['Embarked'] == 'S')\n item['embarked_c'] = int(item['Embarked'] == 'C')\n item['embarked_q'] = int(item['Embarked'] == 'Q')\n del item['Embarked']\n\n # One-hot encoding: Title\n item['title_mr'] = int(item['Title'] == 'Mr')\n item['title_miss'] = int(item['Title'] == 'Miss')\n item['title_mrs'] = int(item['Title'] == 'Mrs')\n item['title_master'] = int(item['Title'] == 'Master')\n item['title_other'] = 1 - (item['title_mr'] +\n item['title_miss'] +\n item['title_mrs'] +\n item['title_master'])\n del item['Title']\n return item", "def convert_patient_to_feature(\n patient_data: Dict[str, object]\n) -> Dict[str, tf.train.Feature]:\n # TODO: Maybe prefix with \"patient/\" for post processing ease.\n return {\n \"patient_id\": int64_feature(patient_data.get(\"patient_id\")),\n \"age\": float_feature(patient_data.get(\"demographic_metadata\").get(\"age\")),\n \"race\": int64_feature(patient_data.get(\"demographic_metadata\").get(\"race\")),\n \"ERpos\": int64_feature(patient_data.get(\"clinical\").get(\"ERpos\")),\n \"Pgpos\": int64_feature(patient_data.get(\"clinical\").get(\"Pgpos\")),\n \"HRpos\": int64_feature(patient_data.get(\"clinical\").get(\"HRpos\")),\n \"HER_two_status\": int64_feature(\n patient_data.get(\"clinical\").get(\"HER_two_status\")\n ),\n \"three_level_HER\": int64_feature(\n patient_data.get(\"clinical\").get(\"three_level_HER\")\n ),\n \"Bilateral\": int64_feature(patient_data.get(\"clinical\").get(\"Bilateral\")),\n \"Laterality\": int64_feature(patient_data.get(\"clinical\").get(\"Laterality\")),\n # Outcomes\n \"Sstat\": int64_feature(patient_data.get(\"outcome\").get(\"Sstat\")),\n \"survival_duration\": int64_feature(\n patient_data.get(\"outcome\").get(\"survival_duration\")\n ),\n \"rfs_ind\": int64_feature(patient_data.get(\"outcome\").get(\"rfs_ind\")),\n \"rfs_duration\": int64_feature(patient_data.get(\"outcome\").get(\"rfs_duration\")),\n \"pCR\": int64_feature(patient_data.get(\"outcome\").get(\"pCR\")),\n \"RCB\": int64_feature(patient_data.get(\"outcome\").get(\"RCB\")),\n \"LD\": int64List_feature(patient_data.get(\"LD\")),\n }", "def from_dict(cls, fs_dict):\n\n feature_set_proto = json_format.ParseDict(\n fs_dict, FeatureSetProto(), ignore_unknown_fields=True\n )\n return cls.from_proto(feature_set_proto)", "def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap", "def BoltMotionObjToFeatureObj(all_bolt_data, electrode_pca_dict):\n\n # Store in feature class object\n all_features_obj_dict = dict();\n\n for motion_name in all_bolt_data:\n trial_list = all_bolt_data.get(motion_name)\n print motion_name\n\n feature_list = list()\n # For all objects\n for trial in trial_list:\n\n bolt_feature_obj = extract_features.extract_features(trial, electrode_pca_dict[motion_name])\n\n feature_list.append(bolt_feature_obj)\n\n # Store all of the objects away\n all_features_obj_dict[motion_name] = feature_list\n\n return all_features_obj_dict", "def create_feature(example):\n input_ids, label_ids = encode_fn(\n example['tokens'], example['labels'])\n\n features = {\n 'input_ids': int64_feature(input_ids),\n 'label_ids': int64_feature(label_ids)\n }\n\n return features", "def _dict_of_nonlist_numerical_to_tf_features(my_dict):\n\n tf_types_dict = {}\n tf_features_dict = {}\n for k, v in my_dict.items():\n if isinstance(v, int) or isinstance(v, bool):\n tf_features_dict[k] = _int64_feature(v)\n tf_types_dict[k] = tf.int64\n elif isinstance(v, float):\n tf_features_dict[k] = _float_feature(v)\n tf_types_dict[k] = tf.float32\n else:\n pass\n\n return tf_features_dict, tf_types_dict", "def _interactive_input_tensor_to_features_dict(feature_map, hparams):\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n input_is_image = False if len(inputs.get_shape()) < 3 else True\n\n x = inputs\n if input_is_image:\n x = tf.image.resize_images(x, [299, 299])\n x = tf.reshape(x, [1, 299, 299, -1])\n x = tf.to_int32(x)\n else:\n # Remove the batch dimension.\n num_samples = x[0]\n length = x[2]\n x = tf.slice(x, [3], tf.to_int32([length]))\n x = tf.reshape(x, [1, -1, 1, 1])\n # Transform into a batch of size num_samples to get that many random\n # decodes.\n x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n p_hparams = hparams.problem_hparams\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = (\n IMAGE_DECODE_LENGTH if input_is_image else inputs[1])\n features[\"inputs\"] = x\n # Save inputs to \"partial_targets\" when prepending inputs to targets. Also\n # keep \"inputs\" as some models crash if they don't exist.\n if getattr(hparams, \"prepend_mode\", \"none\") != \"none\":\n shape = tf.shape(x)\n partial_targets = tf.reshape(x, [shape[0], shape[1]])\n partial_targets = tf.pad(partial_targets, [[0, 0], [0, 1]])\n features[\"partial_targets\"] = partial_targets\n return features", "def parseFeatures(featuresDirectory):\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n queries = db.documents.distinct(\"query_id\")\n docToFeatureVector = {}\n for featureName in featureNames:\n for query in queries:\n if not os.path.exists(featuresDirectory + '/' +featureName + '_' + str(query)):\n continue\n f = open(featuresDirectory + '/' +featureName + '_' + str(query), 'r')\n\n for line in f:\n documentID = line.split()[0]\n docno = documentID\n features = line.split()[1:]\n floatFeatures = [float(val) for val in features]\n documentFeaturesDict = docToFeatureVector.get(docno, {})\n documentFeaturesDict[featureName.replace(\".\",\"_\")] = floatFeatures\n docToFeatureVector[docno] = documentFeaturesDict\n return docToFeatureVector", "def dict_to_entry(dct):\r\n relevant_items = [(k, v) for (k, v) in dct.items() if k in features]\r\n ordered = sorted(relevant_items, key=lambda (k, v): header.index(k))\r\n vals = [v for (_, v) in ordered]\r\n return vals", "def convert_example(example, tokenizer):\n\n feature = tokenizer(\n text=example['question'],\n text_pair=example['answer'],\n max_seq_len=args.max_seq_length)\n feature['labels'] = example['labels']\n feature['id'] = example['id']\n\n return feature", "def build_input_features_from_dict(sample: Dict[str, Union[str, int, np.ndarray]]) -> InputFeatures:\n if random.uniform(0.0, 1.0) < data_params.fraction_using_func_name:\n return InputFeatures(\n language=data_params.lang_ids[cast(str, sample[\"language\"])],\n similarity=cast(int, sample[\"similarity\"]),\n query_tokens=cast(np.ndarray, sample[\"query_tokens_func_name_as_query\"]),\n query_tokens_mask=cast(np.ndarray, sample[\"query_tokens_mask_func_name_as_query\"]),\n query_docstring_tokens=cast(np.ndarray, sample[\"query_tokens_docstring_as_query\"]),\n query_docstring_tokens_mask=cast(np.ndarray, sample[\"query_tokens_mask_docstring_as_query\"]),\n code_tokens=cast(np.ndarray, sample[\"code_tokens_func_name_as_query\"]),\n code_tokens_mask=cast(np.ndarray, sample[\"code_tokens_mask_func_name_as_query\"]),\n )\n else:\n return InputFeatures(\n language=data_params.lang_ids[cast(str, sample[\"language\"])],\n similarity=cast(int, sample[\"similarity\"]),\n query_tokens=cast(np.ndarray, sample[\"query_tokens_docstring_as_query\"]),\n query_tokens_mask=cast(np.ndarray, sample[\"query_tokens_mask_docstring_as_query\"]),\n query_docstring_tokens=cast(np.ndarray, sample[\"query_tokens_docstring_as_query\"]),\n query_docstring_tokens_mask=cast(np.ndarray, sample[\"query_tokens_mask_docstring_as_query\"]),\n code_tokens=cast(np.ndarray, sample[\"code_tokens_docstring_as_query\"]),\n code_tokens_mask=cast(np.ndarray, sample[\"code_tokens_mask_docstring_as_query\"]),\n )", "def preprocess(item):\n item = feature_engineering(item)\n item = encode_features(item)\n return item", "def create(self, new_feature):\n all_data = self._load()\n\n # Hijack the feature id and make sure it's unique\n new_feature['id'] = str(uuid.uuid4())\n\n all_data['features'].append(new_feature)\n\n with open(self.path, 'w') as dst:\n dst.write(json.dumps(all_data))", "def get_features_from_feature_server(url, query):\n\n logger.debug('url received: ' + url + ', query received: ' + query)\n\n features = []\n f = FeatureLayer(url = url)\n feature_set = f.query(where = query)\n for feature in feature_set:\n features.append(feature.as_dict)\n return features", "def getFeatureDicts(self):\n pass", "def _save_api_feature_data(self, dataset_id):\n dataset = Dataset.objects.get(id=dataset_id)\n json_data = Facade.prepare_dataset_data(dataset) # normalize data and convert it to json\n dataset.normalized_feature_JSON = json_data\n dataset.save() # save normalized data in models", "def extractFeatures(self, datum):\n abstract", "def getFeatures(gdf):\r\n import json\r\n features = [json.loads(gdf.to_json())['features'][0]['geometry']]\r\n return features", "def create(self, new_feature):\n\n all_data = self._load()\n\n if self.id_field not in new_feature and\\\n self.id_field not in new_feature['properties']:\n new_feature['properties'][self.id_field] = str(uuid.uuid4())\n\n all_data['features'].append(new_feature)\n\n with open(self.data, 'w') as dst:\n dst.write(json.dumps(all_data))", "def get_feature_collection(page):\n #print page['words']\n feature_array = []\n for i,word in enumerate(page['words']):\n # should line_num be required here? It's not supported by -bbox output... \n word_properties = {'text':word['text'], 'line_num':word['line_num']}\n # should we instead rely on the the word number for the id? \n feature_array.append(get_geojson_feature(i, word['bbox'], word_properties))\n \n featurecollection = geojson.FeatureCollection(feature_array)\n # todo: add page dimensions\n return geojson.dumps(featurecollection)", "def denseFeature(self, feat):\n return {'feat': feat}", "def observation_features_to_dict(obs_features: ObservationFeatures) -> Dict[str, Any]:\n return {\n \"__type\": obs_features.__class__.__name__,\n \"parameters\": obs_features.parameters,\n \"trial_index\": obs_features.trial_index,\n \"start_time\": obs_features.start_time,\n \"end_time\": obs_features.end_time,\n \"random_split\": obs_features.random_split,\n \"metadata\": obs_features.metadata,\n }", "def unwrap_feature(self, data):\n if data['type'] != 'Feature':\n raise ValidationError('Expecting a Feature object')\n flat = data['properties']\n flat[self.__geometry_field_name__] = data['geometry']\n return flat", "def default(self, o): \n if isinstance(o, GEOSGeometry):\n dictval = json.loads(o.geojson)\n #raise Exception(o.ewkt)\n dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification\n return dictval\n else:\n super(DjangoGEOJSONEncoder, self).default(o)", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def create_feature_set(es_host: str, model_name: str) -> None:\n features_path = PATH / f'{model_name}' / 'features'\n feature_set = {\n 'featureset': {\n 'name': model_name,\n 'features': [process_feature_file(str(filename)) for filename in\n features_path.glob('*')]\n }\n }\n post_feature_set(feature_set, model_name, es_host)", "def as_featurecollection(features: typing.List[dict]) -> dict:\n\n def _check_all_features(obj):\n for f in obj:\n if not _is_instance_of(f, 'Feature'):\n raise GeoJSONError(f'{f} is not a valid GeoJSON Feature ' +\n 'object.')\n\n _check_all_features(features)\n return {'type': 'FeatureCollection', 'features': features}", "def getFeatures(gdf):\r\n import json\r\n return [json.loads(gdf.to_json())['features'][0]['geometry']]", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def customAritcleESDecoder(articleDict):\n return namedtuple(\"ArticleES\", articleDict.keys())(*articleDict.value())\n # namedtuple是一个函数,相当于执行函数返回函数的返回值", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_feature_set_SA(tweet):\n features= {}\n return features", "def feature_sign_dict(three_feature_list):\n\n feature_dict = {}\n\n for i in list(range(1, 11)):\n feature_dict[-i] = three_feature_list[0]\n\n feature_dict[0] = three_feature_list[1]\n\n for i in list(range(1, 11)):\n feature_dict[i] = three_feature_list[2]\n\n return feature_dict", "def transform_features_op(\n self, train_features: Dict[str, tf.Tensor], metadata_features: Dict[str, tf.Tensor]\n ):\n\n # Sorting the train features dictionary so that we control the order\n train_features_list = [train_features[k] for k in sorted(train_features)]\n\n # Concat all train features to get a dense feature vector\n train_features_transformed = tf.concat(train_features_list, axis=-1, name=\"train_features\")\n\n return train_features_transformed, metadata_features", "def tweet_dict_to_nparr( dict ):\n fvec = numpy.empty( len(testFeatures) )\n\n for i in range( 0, len(testFeatures) ):\n fvec[i] = dict[ testFeatures[i][0] ]\n\n return fvec", "def getFeatures(gdf):\n import json\n return [json.loads(gdf.to_json())['features'][0]['geometry']]", "def getFeatures(gdf):\n import json\n return [json.loads(gdf.to_json())['features'][0]['geometry']]", "def getFeatures(gdf):\n import json\n return [json.loads(gdf.to_json())['features'][0]['geometry']]", "def region2feature(region , ext):\n server = \"https://rest.ensembl.org/overlap/region/human/\"+region +\"?\"\n for i in range(len(ext)):\n server = server + \"feature=\" + ext[i] + \";\"\n\n server = server[:-1]\n #print(server)\n r = requests.get(server,headers={\"Content-Type\" : \"application/json\"})\n\n if not r.ok:\n r.raise_for_status()\n sys.exit()\n decoded = r.content.decode()\n dict_decoded = json.loads(decoded)\n if len(dict_decoded):\n len_dict = len(dict_decoded)\n for i in range(len_dict):\n if dict_decoded[i]['feature_type'] == \"gene\":\n ID = dict_decoded[i]['gene_id']\n start = dict_decoded[i]['start']\n end = dict_decoded[i]['end']\n name = dict_decoded[i]['external_name']\n biotype = dict_decoded[i]['biotype']\n chr = dict_decoded[i]['seq_region_name']\n des = dict_decoded[i]['description']\n print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\".\\\n format(ID,name,chr,start,end,biotype,des),file = Output)\n return(name)\n\n if dict_decoded[i]['feature_type'] == \"regulatory\":\n ID = dict_decoded[i]['id']\n chr = dict_decoded[i]['seq_region_name']\n start = dict_decoded[i]['start']\n end = dict_decoded[i]['end']\n des = dict_decoded[i]['description']\n feature_type = dict_decoded[i]['feature_type']\n print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\".\\\n format(ID,chr,start,end,feature_type,des),file = Output)\n return(None)\n else:\n print(\"No match\")", "def json_serving_input_fn():\n inputs = {}\n for feat in INPUT_COLUMNS:\n inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)\n\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in inputs.iteritems()\n }\n return tf.contrib.learn.InputFnOps(features, None, inputs)", "def features_to_db(track_uri):\n data = spotify.audio_features(track_uri)[0]\n audio_features = AudioFeatures(**data)\n DB.session.add(audio_features)\n # id, uri, danceability, energy, key, loudness, mode,\n # speechiness, acousticness, instrumentalness,\n # liveness, valence, tempo, type", "def feature_eng2(housing_tr, housing):\n logging.info(\"Adding features.....\")\n housing_tr[\"rooms_per_household\"] = (\n housing_tr[\"total_rooms\"] / housing_tr[\"households\"]\n )\n housing_tr[\"bedrooms_per_room\"] = (\n housing_tr[\"total_bedrooms\"] / housing_tr[\"total_rooms\"]\n )\n housing_tr[\"population_per_household\"] = (\n housing_tr[\"population\"] / housing_tr[\"households\"]\n )\n housing_cat = housing[[\"ocean_proximity\"]]\n housing_prepared = housing_tr.join(\n pd.get_dummies(housing_cat, drop_first=True)\n )\n return housing_prepared", "def feature(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___Feature]:", "def get_geojson_feature(id, raw_bbox_string, properties_dict):\n coords = raw_bbox_string.split()\n \n # Tesseract uses ints, but allow floats\n for i, val in enumerate(coords):\n coords[i] = float(val)\n # bbox order = # x0 y0 x1 y1\n \n bbox_json_obj = geojson.Polygon([[\n (coords[0], coords[1]), \n (coords[0], coords[3]), \n (coords[2], coords[3]), \n (coords[2], coords[1]),\n (coords[0], coords[1])\n ]])\n return geojson.Feature(id, bbox_json_obj, properties=properties_dict)", "def convert_str(feature_vectors):\n for key in feature_vectors:\n feature_vectors[key] = map(lambda el: str(el), feature_vectors[key])", "def parse_mapzen_response(txt):\n geoDict = {}\n receivedJSONDict = json.loads(txt)\n if receivedJSONDict['features']:\n geoDict['status'] = \"OK\"\n geoDict['label'] = receivedJSONDict['features'][0]['properties']['label']\n geoDict['confidence'] = receivedJSONDict['features'][0]['properties']['confidence']\n geoDict['latitude'] = receivedJSONDict['features'][0]['geometry']['coordinates'][1]\n geoDict['longitude'] = receivedJSONDict['features'][0]['geometry']['coordinates'][0]\n else:\n \tgeoDict['status'] = None\n return geoDict", "def read_features_dict(path):\n # type_dict specifies the type conversion to be applied. Each key denotes\n # a column name and the value is the conversion. Columns not included are\n # converted to floats.\n type_dict = {'source': str, 'target': str, 'status': int}\n with open(path) as feature_file:\n reader = csv.DictReader(feature_file, delimiter='\\t')\n for row in reader:\n yield {key: type_dict.get(key, float)(value) for key, value in row.items()}", "def feature_magnitude_dict(three_feature_list):\n\n values_list = [[1, 2, 3],\n [4, 5, 6, 7],\n [7, 8, 9, 10]]\n\n feature_dict = {}\n for feature, values in zip(three_feature_list, values_list):\n\n for value in values:\n feature_dict[value] = feature\n feature_dict[-value] = feature\n\n return feature_dict", "def extract_feature(self, article) :\n pass", "def feature_dist_func_dict():\n return {\"tanimoto_dissimilarity\": tanimoto_dissimilarity}", "def tweet2features(tweet):\r\n features = {\r\n 'len(tweet)': len(tweet),\r\n 'avg_word_length': get_avg_word_len(tweet)\r\n }\r\n return features", "def convert_to_dict_then_json(row, sep,feature_list):\n feature_values = row.decode('utf-8').replace('\\n', '').replace('\\r', '').split(sep)\n feature_values_clean = [float(x) if is_number(x) else 0 for x in feature_values]\n feat_dict = dict(zip(feature_list, feature_values_clean))\n feat_json = json.dumps(feat_dict).encode('utf-8')\n return(feat_json)", "def feature_to_open511_element(feature):\n\n # Using a hash of the geometry for an ID. For proper production use,\n # there'll probably have to be some code in the importer\n # that compares to existing entries in the DB to determine whether\n # this is new or modified...\n geom_hash = hashlib.md5(feature.geom.wkt).hexdigest()\n id = JURISDICTION + ':' + geom_hash\n while id in ids_seen:\n id += 'x'\n ids_seen.add(id)\n\n elem = E.RoadEvent(id=id)\n\n def set_val(tag, val):\n if val not in (None, ''):\n e = etree.Element(tag)\n e.text = unicode(val)\n elem.append(e)\n\n set_val('Title', feature.get('Name').decode('utf8'))\n\n blob = lxml.html.fragment_fromstring(feature.get('Description').decode('utf8'),\n create_parent='content')\n\n description_label = blob.xpath('//strong[text()=\"Description\"]')\n if description_label:\n description_bits = []\n el = description_label[0].getnext()\n while el.tag == 'p':\n description_bits.append(_get_el_text(el))\n el = el.getnext()\n set_val('Description', '\\n\\n'.join(description_bits))\n\n localisation = blob.cssselect('div#localisation p')\n if localisation:\n set_val('AffectedRoads', '\\n\\n'.join(_get_el_text(el) for el in localisation))\n\n try:\n set_val('ExternalURL', blob.cssselect('#avis_residants a, #en_savoir_plus a')[0].get('href'))\n except IndexError:\n pass\n\n facultatif = blob.cssselect('div#itineraire_facult p')\n if facultatif:\n set_val('Detour', '\\n\\n'.join(_get_el_text(el) for el in facultatif))\n\n if blob.cssselect('div#dates strong'):\n try:\n start_date = blob.xpath(u'div[@id=\"dates\"]/strong[text()=\"Date de d\\xe9but\"]')[0].tail\n end_date = blob.xpath(u'div[@id=\"dates\"]/strong[text()=\"Date de fin\"]')[0].tail\n if start_date and end_date:\n set_val('StartDate', _fr_string_to_date(start_date))\n set_val('EndDate', _fr_string_to_date(end_date))\n except IndexError:\n pass\n\n elem.append(E.Geometry(\n geom_to_xml_element(feature.geom)\n ))\n\n return elem", "def image_to_features(image):\n image = tf.keras.preprocessing.image.img_to_array(image)\n image = tf.keras.applications.mobilenet_v2.preprocess_input(image)\n image = np.expand_dims(image, axis=0)\n return image", "def load_from_geojson(self, filename_or_url):", "def getFeatures(gdf):\n\timport json\n\treturn [json.loads(gdf.to_json())['features'][0]['geometry']]", "def render_single(self, data):\n try:\n data['type'] = \"Feature\"\n data['geometry'] = json.loads(data.get('location').pop('geometry'))\n return data\n except:\n return data", "def display_features():\n\n # Parse the URL, check for implicit resources, extract the primary record\n # http://127.0.0.1:8000/eden/gis/display_features&module=pr&resource=person&instance=1&jresource=presence\n ok = 0\n if \"module\" in request.vars:\n res_module = request.vars.module\n ok +=1\n if \"resource\" in request.vars:\n resource = request.vars.resource\n ok +=1\n if \"instance\" in request.vars:\n instance = int(request.vars.instance)\n ok +=1\n if \"jresource\" in request.vars:\n jresource = request.vars.jresource\n ok +=1\n if ok != 4:\n session.error = T(\"Insufficient vars: Need module, resource, jresource, instance\")\n raise HTTP(400, body=s3xrc.xml.json_message(False, 400, session.error))\n\n component, pkey, fkey = s3xrc.model.get_component(res_module, resource, jresource)\n table = db[\"%s_%s\" % (res_module, resource)]\n jtable = db[str(component.table)]\n query = (jtable[fkey] == table[pkey]) & (table.id == instance)\n # Filter out deleted\n deleted = (table.deleted == False)\n query = query & deleted\n # Filter out inaccessible\n query2 = db.gis_location.id == jtable.location_id\n accessible = s3_accessible_query(\"read\", db.gis_location)\n query2 = query2 & accessible\n\n features = db(query).select(db.gis_location.ALL, left = [db.gis_location.on(query2)])\n\n # Calculate an appropriate BBox\n bounds = gis.get_bounds(features=features)\n\n map = gis.show_map(\n feature_queries = [{\"name\" : \"Features\", \"query\" : features, \"active\" : True}],\n bbox = bounds,\n window = True,\n closable = False,\n collapsed = True\n )\n\n return dict(map=map)", "def extract_feature(c, pid, wid, extract_info_dict):\n\n image_feature_filter_names = set(\n extract_info_dict['image_feature_filter_name']\n )\n\n # Extract all images (different sites / fields of view)\n c.execute(\n \"\"\"\n SELECT TableNumber, ImageNumber\n FROM Image\n WHERE Image_Metadata_Plate = {} AND Image_Metadata_Well = '{}'\n \"\"\".format(pid, wid)\n )\n\n tid_iid_pairs = c.fetchall()\n\n # Track the feature names (row index)\n row_index = []\n features = []\n\n # Iterate through all sites and extract features from each site\n for p in tid_iid_pairs:\n tid, iid = p[0], p[1]\n\n # Extract image features\n c.execute(\n \"\"\"\n SELECT *\n FROM Image\n WHERE TableNumber = '{}' AND ImageNumber = {}\n \"\"\".format(tid, iid)\n )\n\n result = c.fetchall()\n result = np.array(result[0])\n\n # Filter out some features\n descriptions = [i[0] for i in c.description]\n droped_c = [i for i in range(len(descriptions)) if descriptions[i] in\n image_feature_filter_names]\n result = np.delete(result, droped_c, axis=0)\n\n # Change the data type of result into floats\n result = result.astype(float)\n\n image_feature = result\n image_name = [i for i in descriptions if i not in\n image_feature_filter_names]\n assert(image_name == extract_info_dict['image_name'])\n\n # Extract cell, cytoplasm, and nuclei features\n cell_feature = extract_cell_level_feature(\n c,\n 'Cells',\n tid,\n iid,\n set(extract_info_dict['cell_feature_filter_name']),\n extract_info_dict['cell_name']\n )\n\n cytoplasm_feature = extract_cell_level_feature(\n c,\n 'Cytoplasm',\n tid,\n iid,\n set(extract_info_dict['cytoplasm_feature_filter_name']),\n extract_info_dict['cytoplasm_name']\n )\n\n nuclei_feature = extract_cell_level_feature(\n c,\n 'Nuclei',\n tid,\n iid,\n set(extract_info_dict['nuclei_feature_filter_name']),\n extract_info_dict['nuclei_name']\n )\n\n # Combine image feature, cell level medians together\n cur_feature = np.hstack((image_feature,\n cell_feature,\n cytoplasm_feature,\n nuclei_feature))\n\n # Add the current feature into the well feature collections\n features.append(cur_feature)\n row_index.append('{}_{}_{}'.format(pid, wid, iid))\n\n features = np.vstack(features)\n return features, row_index", "def get_image_feature(opts, img_path, dictionary):\n img = Image.open(join(opts.data_dir,img_path))\n wordmap = visual_words.get_visual_words(opts, img, dictionary)\n return get_feature_from_wordmap_SPM(opts, wordmap)", "def test_to_geojson(self):\n fc = self.read_feature()\n dest_filename = str(self.datadir.join('test.geojson'))\n fc.to_geojson(dest_filename)\n fc_check = read_feature_collection(dest_filename)\n self.check_feature(fc_check.features[0])", "def test_add_geo_distance_with_dict():\n # When add a Geo Distance field\n t = GeoDistance({\"lat\": 1.0, \"lon\": 2.0}, \"20mi\")\n\n # Then I see the appropriate JSON\n results = {\n \"geo_distance\": {\n \"distance\": \"20mi\",\n \"location\": {\n \"lat\": 1.0,\n \"lon\": 2.0\n }\n }\n }\n\n homogeneous(t, results)", "def parse_feature_config(feature_config, feature_map):\n feature_name = feature_config[0]\n feature_args = feature_config[1]\n featurizer = feature_map[feature_name]\n\n return feature_name, feature_args, featurizer", "def expand_feature_coll_meta(feat_coll_meta):\n if type(feat_coll_meta) != dict:\n if type(feat_coll_meta).__name__ == 'FeatureCollection':\n feat_coll_meta = feat_coll_meta.getInfo()\n else:\n raise RuntimeError('Unsupported EE object')\n\n out_str = '---------------------\\n'\n for k, y in feat_coll_meta.items():\n if k == 'features':\n for feat in y:\n out_str += EEHelper.expand_feature_meta(feat) + '---------------------\\n'\n\n elif k == 'properties':\n for _k, _y in y.items():\n out_str += 'Property: {} : {}\\n'.format(_k, str(_y))\n else:\n out_str += '{} : {}\\n'.format(str(k), str(y))\n return out_str", "def add_features(data_dict, features_list):\n\n for name in data_dict:\n # add features for the log values of the financial data\n for feat in features_financial:\n try:\n data_dict[name][feat + '_log'] = math.log(data_dict[name][feat] + 1)\n except:\n data_dict[name][feat + '_log'] = 'NaN'\n\n # Add ratio of POI messages to total.\n try:\n total_messages = data_dict[name]['from_messages'] + data_dict[name]['to_messages']\n poi_related_messages = data_dict[name][\"from_poi_to_this_person\"] +\\\n data_dict[name][\"from_this_person_to_poi\"] +\\\n data_dict[name][\"shared_receipt_with_poi\"]\n poi_ratio = 1.* poi_related_messages / total_messages\n data_dict[name]['poi_ratio_messages'] = poi_ratio\n except:\n data_dict[name]['poi_ratio_messages'] = 'NaN'\n\n return data_dict", "def to_geojson(model, contrib_id):\n feature_collection = []\n for record in model.objects.filter(contributer_id=contrib_id):\n try:\n properies = {\n \"name\": record.name,\n \"address\": record.address,\n \"email\": record.email,\n \"website\": record.website,\n \"phone_number\": record.phone_number,\n }\n my_point = Point((record.longitude, record.latitude))\n my_feature = Feature(geometry=my_point, properties=properies)\n feature_collection.append(my_feature)\n except ValueError:\n pass\n return FeatureCollection(feature_collection)", "def vegref2geojson( vegref, dagensverdi=False): \r\n \r\n \r\n vegstr = vvi2vegrefstring( vegref) \r\n \r\n \r\n fradato = vegref['ValidFrom'][0:10]\r\n tildato = vegref['ValidTo'][0:10]\r\n veglenkeid = vegref['ReflinkOID']\r\n veglenkeposisjon = round( float( vegref['Measure'] ), 8) \r\n \r\n X = float( vegref['RoadNetPosition']['X'] ) \r\n Y = float( vegref['RoadNetPosition']['Y'] ) \r\n coordinates = [X, Y]\r\n if 'Z' in vegref['RoadNetPosition']:\r\n coordinates.append( float( vegref['RoadNetPosition']['Z'] ) )\r\n \r\n geoj = { \"type\": \"Feature\",\r\n \"geometry\": {\r\n \"type\": \"Point\",\r\n \"coordinates\": coordinates\r\n },\r\n \"properties\": {\r\n \"vegref\" : vegstr, \r\n \"fradato\" : fradato, \r\n \"tildato\" : tildato,\r\n \"veglenkeid\" : veglenkeid, \r\n \"veglenkeposisjon\" : veglenkeposisjon\r\n }\r\n }\r\n \r\n if dagensverdi: \r\n params = { 'viewDate' : '2022-10-31', \r\n 'reflinkoid' : veglenkeid, \r\n 'rellen' : veglenkeposisjon } \r\n \r\n url = 'https://visveginfo-static.opentns.org/RoadInfoService/GetRoadReferenceForNVDBReference' \r\n r = requests.get( url, params=params) \r\n if r.ok and 'RoadReference' in r.text: \r\n data = xmltodict.parse( r.text ) \r\n if 'RoadCategory' in data['RoadReference'].keys(): \r\n geoj['properties']['dagensvegref'] = vvi2vegrefstring( data['RoadReference'] ) \r\n else: \r\n geoj['properties']['dagensvegref'] = '' \r\n else: \r\n geoj['properties']['dagensvegref'] = '' \r\n \r\n return geoj", "def GetVectorArticleInput(dico_vector_input, features):\n features_left = set(features) - set(dico_vector_input.keys())\n if len(features_left) > 0:\n sentence = \"Some features aren't in the dict:\\n\"\n raise MyException(sentence + \"{}\".format(features_left))\n vector_art = []\n other_features = ['abstract', 'syn', 'exergue', 'title', 'secTitle']\n other_features += ['subTitle', 'supTitle']\n for feature in features:\n if feature == 'nbSign':\n if dico_vector_input['nbSign'] == 0:\n print(\"NbSign == 0 l.176 - GetVectorArticleInput\")\n vector_art.append(dico_vector_input[feature])\n else:\n vector_art.append(dico_vector_input[feature])\n # Conversion des variables en indicatrices\n # Normalement plus la peine, comme déjà fait auparavant\n elif feature in other_features:\n if dico_vector_input[feature] > 0:\n vector_art.append(1)\n else:\n vector_art.append(0)\n else:\n vector_art.append(dico_vector_input[feature])\n return (dico_vector_input['melodyId'], np.array([vector_art]))", "def get_features(feature_list, these_feature):\n features = {}\n def feat_filter(feature, this):\n try:\n mapper = lambda x, feat: filter(lambda y: feat in y, x.split(\" \"))[0]\n val = mapper(this, feature)\n if '+' in val:\n return TRUE\n return FALSE\n except:\n return UNDEF\n for feat in feature_list:\n features[feat] = feat_filter(feat, these_feature)\n return features", "def tweets_features(tweet):\n tweet = remove_stop_words(tweet)\n return {'TWEET': tweet}", "def _extract_feature(element):\n features = tf.parse_single_example(\n element,\n # Defaults are not specified since both keys are required.\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'label/x': tf.FixedLenFeature([], tf.int64),\n 'label/y': tf.FixedLenFeature([], tf.int64)\n })\n return features", "def transformation():\n data = None\n text = None\n\n if flask.request.content_type == \"application/json\":\n print(\"calling json launched\")\n data = flask.request.get_json(silent=True)\n\n text = data[\"text\"]\n try:\n bing_key = data[\"bing_key\"]\n except Exception:\n bing_key = None\n\n else:\n return flask.Response(\n response=\"This predictor only supports JSON data\",\n status=415,\n mimetype=\"text/plain\",\n )\n\n print(\"Invoked with text: {}.\".format(text.encode(\"utf-8\")))\n\n # Do the prediction\n predictions = ScoringService.predict(text, bing_key)\n\n result = json.dumps(predictions[:10])\n\n return flask.Response(response=result, status=200, mimetype=\"application/json\")", "def to_representation(self, instance):\n # prepare OrderedDict geojson structure\n feature = OrderedDict()\n # the list of fields that will be processed by get_properties\n # we will remove fields that have been already processed\n # to increase performance on large numbers\n fields = list(self.fields.values())\n\n # optional id attribute\n if self.Meta.id_field:\n field = self.fields[self.Meta.id_field]\n value = field.get_attribute(instance)\n feature[self.Meta.identifier] = field.to_representation(value)\n fields.remove(field)\n\n # required type attribute\n # must be \"Feature\" according to GeoJSON spec\n feature[\"type\"] = \"Feature\"\n\n # required geometry attribute\n # MUST be present in output according to GeoJSON spec\n field = self.fields[self.Meta.geo_field]\n geo_value = field.get_attribute(instance)\n feature[\"geometry\"] = field.to_representation(geo_value)\n fields.remove(field)\n # Bounding Box\n # if auto_bbox feature is enabled\n # bbox will be determined automatically automatically\n if self.Meta.auto_bbox and geo_value:\n feature[\"bbox\"] = geo_value.extent\n # otherwise it can be determined via another field\n elif self.Meta.bbox_geo_field:\n field = self.fields[self.Meta.bbox_geo_field]\n value = field.get_attribute(instance)\n feature[\"bbox\"] = value.extent if hasattr(value, 'extent') else None\n fields.remove(field)\n\n # GeoJSON properties\n feature[\"properties\"] = self.get_properties(instance, fields)\n\n return feature", "def as_geojson_feature(result: ResponseObject, properties: List[str] = None) -> ResponseObject:\n result['geojson']['coordinates'] = [float(i) for i in result['geojson']['coordinates']]\n return {\n 'type': 'Feature',\n 'geometry': result['geojson'],\n 'properties': {k: result.get(k) for k in properties or []},\n }" ]
[ "0.6012967", "0.5905541", "0.58329165", "0.57612747", "0.5738102", "0.5726723", "0.5687374", "0.56813467", "0.5624164", "0.56068397", "0.5599818", "0.5524402", "0.548304", "0.5466543", "0.5398618", "0.53919345", "0.53797644", "0.5355627", "0.53457856", "0.5333529", "0.53182507", "0.53055334", "0.5278664", "0.5269773", "0.5265213", "0.52626985", "0.5256953", "0.52399284", "0.52239853", "0.5206245", "0.5205967", "0.5204375", "0.5183353", "0.5179339", "0.51698864", "0.50987595", "0.50966114", "0.5089717", "0.50894445", "0.5085989", "0.5078441", "0.50662535", "0.50599223", "0.5048499", "0.5048455", "0.5047192", "0.504707", "0.50456566", "0.5042549", "0.503781", "0.5037307", "0.50366527", "0.50295067", "0.50191534", "0.5015623", "0.5012389", "0.5010561", "0.5004227", "0.5003635", "0.50022286", "0.5000124", "0.49989745", "0.49989745", "0.49989745", "0.49942735", "0.4990046", "0.49885768", "0.4985349", "0.49828464", "0.49820352", "0.4981714", "0.497697", "0.49736682", "0.49703854", "0.49674398", "0.4963142", "0.49546677", "0.49334186", "0.49332199", "0.4925866", "0.492285", "0.4920223", "0.49133173", "0.49089321", "0.4892729", "0.48808992", "0.48785385", "0.48775363", "0.4876393", "0.48754555", "0.4874203", "0.4868072", "0.4866175", "0.48659325", "0.48642537", "0.48636305", "0.48588786", "0.4852211", "0.48495632", "0.48495492" ]
0.78169143
0
Convert volume to flux
def volumeToFlux(volume_image): image = ee.Image(volume_image) flux_image = image.divide(ee.Image(AREA_PFAF6_30MIN)).multiply(1e6).copyProperties(image) flux_image = flux_image.set("units","m") flux_image = flux_image.set("convertedToFlux", 1) return flux_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convertflux(self, *args, **kwargs):\n return _image.image_convertflux(self, *args, **kwargs)", "def flux(source, freq=0.0, deltafreq=0.0, daysback=0.0) :\n x = queryFlux(source,freq,deltafreq,daysback)\n return x.flux", "def flux(self, x):\n return self.cal_spec.get_flux(self(x))", "def flux(self, q):\n q1, q2 = q\n if q1 > 0:\n u = q2/q1\n else:\n u = 0\n return np.array([q1*u, q1 * u*u + 0.5*9.81 * q1*q1])", "def treat_volume(volume):\n labels = measure.label(volume.dataobj, background=0, connectivity=2)\n new_volume = np.asarray(volume.dataobj)\n new_volume[labels > 1] = 0\n new_volume = nib.Nifti1Image(new_volume, volume.affine)\n return new_volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def to_volume(self, verbose=True):\n images = self.load_all_dicom_images(verbose=verbose)\n\n volume = np.stack(\n [\n x.pixel_array * x.RescaleSlope + x.RescaleIntercept\n for x in images\n ],\n axis=-1,\n ).astype(np.int16)\n return volume", "def test_flux_conversion_vega(in_q, out_u, ans):\n result = units.convert_flux(_wave, in_q, out_u, vegaspec=_vspec)\n assert_quantity_allclose(result, ans, rtol=1e-2)\n\n # Scalar\n i = 0\n result = units.convert_flux(_wave[i], in_q[i], out_u, vegaspec=_vspec)\n assert_quantity_allclose(result, ans[i], rtol=1e-2)", "def flux(self, u):\n flu = np.zeros((3,2), dtype=np.float64)\n flu[0,0] = u[1]\n flu[1,0] = u[0] * (u[1]/u[0])**2 + 0.5 * 9.81*u[0]**2\n flu[2,0] = u[1] * u[2]/u[0] #FIXME attenzione che c'è il punto controllare se sono scalari o vettori'\n flu[0,1] = u[2]\n flu[1,1] = u[2] * u[1]/u[0]\n flu[2,1] = u[0] * (u[2]/u[0])**2 + 0.5 * 9.81*u[0]**2\n return flu", "def mag_to_flux(mag, mag_zp):\n return 10 ** (-0.4 * (mag - mag_zp))", "def convert_flux(nu, flux, target_unit):\n\n curr_unit = flux.unit\n\n if curr_unit.is_equivalent(u.erg / u.s):\n flux = flux / sed.distance ** 2\n elif curr_unit.is_equivalent(u.Jy):\n flux = flux * nu\n elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):\n raise Exception(\"Don't know how to convert {0} to ergs/cm^2/s\" % (flux.unit))\n\n # Convert to requested unit\n\n if target_unit.is_equivalent(u.erg / u.s):\n flux = flux * sed.distance ** 2\n elif target_unit.is_equivalent(u.Jy):\n flux = flux / nu\n elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):\n raise Exception(\"Don't know how to convert %s to %s\" % (curr_unit, unit_flux))\n\n return flux.to(target_unit)", "def Vega_zero_flux(self):\n with Vega() as v:\n f_vega = self.get_flux(v.wavelength, v.flux, axis=-1)\n return f_vega", "def normalize(volume):\n max = np.amax(volume)\n if max == 0:#Fixes dividing by 0 error if nothing in the volume\n return volume.astype(np.uint8)\n\n normalized = volume * (255.0 / max)\n normalized = np.round(normalized).astype(np.uint8)\n return normalized", "def normalize(wav, flux):\n return flux / flux.max() # maximum flux = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm", "def normalize_flux(self):\n fmax = 0\n fmin = 1e99\n for n in self.graph:\n if n.flux > fmax:\n fmax = n.flux\n if n.flux < fmin:\n fmin = n.flux\n for n in self.graph:\n n.flux = (n.flux-fmin)/(fmax-fmin)", "def flux():\n delta = 0.01 # film thickness, [dm]\n c = pre * 10 ** 2 / (R * tem) # total concentration calculated by ideal gas equation, in [mol/L]\n D12 = 0.001626528 / pre # HCl diffusion in Air, [dm2/s] @296K\n D13 = 3e-7 # HCl gas diffusion in water, [dm2/s] @296K\n D23 = 1.5e-7 # CH4 gas diffusion in water, [dm2/s] @296K\n N1 = ((x1_bar * x2d * D23) / (x2_bar * delta * D13) - x1_bar / delta) / \\\n (x2_bar / (D12 * c) + x3_bar / (D13 * c) + D23 * x1_bar / (D12 * D13 * c))\n # print 'Flux of HCl into water', abs(N1), [mol/(dm2*sec)]\n return N1", "def flux_hack(self):\r\n return self.planes[1].galaxies[0].light_profiles[0].flux", "def normalize_volume(vol_data):\n h, w, d = np.shape(vol_data)\n mean = np.sum(vol_data)/(h*w*d)\n std = np.std(vol_data)\n return (vol_data - mean) / std", "def flux(u, kappa):\n V = u.function_space()\n mesh = V.mesh()\n degree = V.ufl_element().degree()\n W = VectorFunctionSpace(mesh, 'P', degree)\n flux_u = project(-kappa*grad(u), W)\n flux_u.rename('flux(u)', 'continuous flux field')\n return flux_u", "def toa_incoming_shortwave_flux(srad0, srad0u):\n return srad0 - srad0u", "def magtoflux(_mag, _id):\n return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9))", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def volume(self):\n return self.volume_array", "def getScalarFlux(self):\n totScalarFlux = []\n for cell in self.cells:\n totScalarFlux.append(cell.getTotScalarFlux())\n totScalarFlux = np.array(totScalarFlux)\n #return totScalarFlux / np.sum(totScalarFlux) # norm flux to 1.\n return totScalarFlux", "def vol_uc(x):\r\n return sum([vol(m) for m in metamer(x)])", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def cps_to_flux(self, counts):\n return counts * 10**(-(2.406+self.zp) / 2.5 ) / (self.lbda**2)", "def floor_volume(volume):\n return ul(math.floor(volume.to('microliter').magnitude))", "def volume(self, volume: float | None, from_unit: str) -> float:\n if not isinstance(volume, Number):\n raise TypeError(f\"{volume!s} is not a numeric value.\")\n\n # type ignore: https://github.com/python/mypy/issues/7207\n return VolumeConverter.convert( # type: ignore[unreachable]\n volume, from_unit, self.volume_unit\n )", "def getMagFlux(self):\n return self.magflux", "def volume(self):\n self.convert_window(\"Volume\", \"cubic decimeters\", [\"acre foot\", \"barrels\", \"bushels(UK)\", \"bushels(US)\", \"centiliters\", \"cubic centimeters\", \"cubic decameters\", \"cubic decimeters\", \"cubic feet\", \"cubic inches\", \"cubic kilometers\", \"cubic meters\", \"cubic mile\", \"cubic millimeters\", \"cubic yards\", \"cups\", \"deciliters\", \"dram\", \"dram(imperial)\", \"fluid ounces(US)\", \"fluid ounces(imperial)\", \"gallons(US,dry)\", \"gallons(US,liquid)\", \"gallons(imperial)\", \"gill(US)\", \"gill(imperial)\", \"liters\", \"liters(1901-1964)\", \"microliters\", \"milliliters\", \"nanoliters\", \"picoliters\", \"pints(US,dry)\", \"pints(US,liquid)\", \"pints(imperial)\", \"quarts(UK,dry)\", \"quarts(US,liquid)\", \"quarts(imperial)\", \"table spoons\", \"tea spoons\"])", "def value_to_volts(value):\n return ...", "def volume():\n # Get the active object\n obj = bpy.context.active_object\n \n scene = bpy.context.scene\n unit = scene.unit_settings\n \n # Set blender unit in mm\n unit.scale_length = 0.001\n bpy.context.scene.unit_settings.length_unit = 'MILLIMETERS' \n \n # Get the scale\n scale = 1.0 if unit.system == 'NONE' else unit.scale_length\n \n # Switch in object mode \n bpy.ops.object.mode_set(mode='EDIT')\n \n # Load mesh\n me = bpy.context.edit_object.data\n bm_orig = bmesh.from_edit_mesh(me)\n \n # Make a copy of the mesh\n bm = bm_orig.copy()\n\n # Apply modifier to the copy\n bm.transform(obj.matrix_world)\n \n print(scale)\n print(bm.calc_volume())\n \n # Calcul the volume\n bpy.types.Scene.volume = bm.calc_volume() * (scale ** 3.0) / (0.001 ** 3.0)\n print(bpy.types.Scene.volume)\n \n # Delete the copy\n bm.free()\n \n # Switch in object mode \n bpy.ops.object.mode_set(mode='OBJECT')", "def flux(self) -> ErrorValue:\n try:\n return ErrorValue(self._data['Flux'], self._data.setdefault('FluxError',0.0))\n except KeyError:\n return 1 / self.pixelsizex / self.pixelsizey / ErrorValue(self._data['NormFactor'],\n self._data.setdefault('NormFactorError',0.0))", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # random normal too slow\n #out_random = np.random.normal(0, 1, size = volume.shape)\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def flux(self, photon_energy, distance=1*u.kpc):\n\n spec = self.spectrum(photon_energy)\n\n if distance != 0:\n distance = validate_scalar('distance', distance, physical_type='length')\n spec /= 4 * np.pi * distance.to('cm') ** 2\n out_unit = '1/(s cm2 eV)'\n else:\n out_unit = '1/(s eV)'\n\n return spec.to(out_unit)", "def Luminosity_to_Flux(self, z, lum, dnu=1000):\n ld = self.Luminosity_Distance(z)\n ld2 = ld*ld\n flux = lum/4/np.pi/ld2/dnu/self.MHz2Hz/self.Jy2CGS\n return flux", "def normalize_volumes_mixmode(directory, amplitude=0.08, ext='.wav'):\n subdirectories = [x[0] for x in os.walk(directory)]\n for subdirectory in subdirectories:\n os.system(f\"normalize-audio -w 16 -a {amplitude} -b '{subdirectory}/'*{ext}\")", "def convert_flux(in_flux, start_unit, end_unit, wavelength,\n wave_unit):\n # check for same input values\n if start_unit == end_unit:\n log.debug(f'Start and end units are same: {start_unit}')\n return in_flux\n\n if isinstance(wave_unit, str):\n wave_unit = u.Unit(wave_unit)\n if isinstance(start_unit, str):\n start_unit = u.Unit(start_unit, str)\n if isinstance(end_unit, str):\n end_unit = u.Unit(end_unit, str)\n\n # check again in case new unit equalities were found\n if start_unit == end_unit:\n log.debug(f'Start and end units are same: {start_unit}')\n return in_flux\n\n log.debug(f'Converting {len(wavelength)} of {type(wavelength)} '\n f'{type(wavelength[0])}')\n wave = wavelength * wave_unit\n in_flux = in_flux * start_unit\n try:\n out_flux = in_flux.to(end_unit, equivalencies=u.spectral_density(wave))\n except u.core.UnitConversionError:\n log.debug(f'Units not convertible: {start_unit} -> {end_unit}')\n raise ValueError('Inconvertible units') from None\n\n return out_flux.value", "def volume(self):\n return _cantera.reactor_volume(self.__reactor_id)", "def test_str_volume_flux(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"volume_flux\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xC5,\n 0x4,\n 0x2D,\n 0x72,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -2114.84033203125)\n self.assertEqual(sensor.unit_of_measurement(), \"m³/s\")\n self.assertEqual(sensor.ha_device_class(), None)", "def getComponentVolume(self):\n lengthO = self.getDimension(\"lengthOuter\")\n widthO = self.getDimension(\"widthOuter\")\n heightO = self.getDimension(\"heightOuter\")\n lengthI = self.getDimension(\"lengthInner\")\n widthI = self.getDimension(\"widthInner\")\n heightI = self.getDimension(\"heightInner\")\n mult = self.getDimension(\"mult\")\n vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n return vol", "def compute_volume(bundle):\n\taff=np.array([[-1.25, 0, 0, 90],[0, 1.25, 0, -126],[0, 0, 1.25, -72],[0, 0, 0, 1]])\n\tvoxel_list = streamline_mapping(bundle, affine=aff).keys()\n\tvol_bundle = len(set(voxel_list))\n\n\treturn vol_bundle", "def tiles_to_volume(self, tiles: list) -> np.ndarray:\n if not self.ascending:\n tiles = tiles[::-1]\n volume = np.stack(tiles, axis=-1).transpose((1, 0, 2))\n return np.flip(volume, axis=1)", "def get_molar_volume(self):\n structure = self.structure\n volume = structure.volume\n _, units = structure.composition.get_reduced_composition_and_factor()\n unit_volume = volume / units\n\n return unit_volume", "def convert_volts(self,data,places):\n volts = (data * self.max) / float(255)\n volts = round(volts,places)\n return volts", "def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)", "def volume(self):\n vol = ((self.I0 * self.V.omega *\n self._mu_0 / (self._mu_0 + self._mu_ex))\n * (1. - np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex)))\n * self.V.p(self.t_0, self.t_ex, self.p_0, self.p_ex,\n param_dict=self.param_dict))\n\n return (1. - self.bsf) * vol", "def update_volume(self):\r\n\r\n # for the first cell\r\n self.cells[0].volume = self.cells[0].volume + \\\r\n self.inflow - self.flows[0]\r\n # for the intermediate cells\r\n for i in range(1, self.cells_number-1):\r\n self.cells[i].volume = self.cells[i].volume + \\\r\n self.flows[i-1]-self.flows[i]\r\n # for the last cells\r\n self.cells[-1].volume = self.cells[-1].volume + \\\r\n self.flows[-1] - self.outflow", "def renormalize(flux, ivar):\n\n # axis=1 corresponds to the rebinned spectral axis\n # Finding the weighted mean both for normalization and for the rms\n mean = np.average(flux, axis=1, weights=ivar)[:, None]\n rms = np.sqrt(np.average((flux - mean) ** 2, axis=1, weights=ivar))[:, None]\n\n # Normalize by subtracting the weighted mean and dividing by the rms\n # as prescribed in the original QuasarNet paper.\n return (flux - mean) / rms", "def single_volume_inference(self, volume):\n self.model.eval()\n\n # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis\n slices = []\n\n # Write code that will create mask for each slice across the X (0th) dimension. After \n # that, put all slices into a 3D Numpy array. You can verify if your method is \n # correct by running it on one of the volumes in your training set and comparing \n # with the label in 3D Slicer.\n \n # normalize\n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n \n new_image = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n mask3d = np.zeros(new_image.shape)\n \n for slc_ix in range(new_image.shape[2]):\n tsr_test = torch.from_numpy(new_image[:,:,slc_ix].astype(np.single)).unsqueeze(0).unsqueeze(0)\n #image = torch.from_numpy(self.data[slc[0]][\"image\"][:,:,slc[1]]).unsqueeze(0)\n #tsr_test = torch.from_numpy(slc.astype(np.single)).unsqueeze(0).unsqueeze(0)\n pred = self.model(tsr_test.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n mask3d[:,:,slc_ix] = torch.argmax(pred, dim=0)\n\n return mask3d", "def infer_from_volume(self):\n return self._infer_from_volume", "def generate_volume(self, x_ax):\n\n x_ax = np.asarray(x_ax, dtype=np.float).flatten()\n\n vol = _generate_boxcar_volume(x_ax, self.radius, self.center)\n\n return vol", "def volume(x: torch.Tensor, floor=1e-8):\n return torch.log10(floor + (x**2).mean(-1)) * 10", "def total_volume(self):", "def get_volt(data):\n volt = (data * 5.0) / 255\n #volt = round(volt, 1)\n return volt", "def air_to_vacuum(wavelength):\n # Following the air to vacuum conversion from VALD3 (computed by N. Piskunov) http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion\n s_square = np.power(1.e4 / wavelength, 2)\n n2 = 1. + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s_square) + 0.0001599740894897 / (38.92568793293 - s_square)\n return wavelength*n2 # Angstroms", "def get_volume(cls) -> float:\n raise NotImplementedError", "def volume(nodes, graph):\n ###TODO\n pass", "def getFluxVector(\n self, energyOrder=0, adjoint=False, extSrc=False, volumeIntegrated=True\n ):\n flux = []\n blocks = list(self.getBlocks())\n groups = range(self.lib.numGroups)\n\n # build in order 0\n for b in blocks:\n if adjoint:\n vals = b.p.adjMgFlux\n elif extSrc:\n vals = b.p.extSrc\n else:\n vals = b.p.mgFlux\n\n if not volumeIntegrated:\n vol = b.getVolume()\n vals = [v / vol for v in vals]\n\n flux.extend(vals)\n\n if energyOrder == 1:\n # swap order.\n newFlux = []\n for g in groups:\n oneGroup = [flux[i] for i in range(g, len(flux), len(groups))]\n newFlux.extend(oneGroup)\n flux = newFlux\n\n return numpy.array(flux)", "def mag2Flux(mag, unit='maggy'):\n flux = 10.0 ** (-0.4 * mag)\n\n if unit.lower().strip() == 'jy':\n return flux * 3631.0\n\n if unit.lower().strip() == 'maggy':\n return flux\n\n if unit.lower().strip() == 'nanomaggy':\n return flux * 1.0E-9\n\n raise Exception(\"# Wrong unit! (jy/maggy/nanomaggy)\")", "def epflux_all(U, V, W, T, longitude, latitude, press, boa=None):\n pass", "def getFluxGeometry(self):\r\n\t\treturn self.getTotalFlux() / self.rho_w0;", "def train_preprocessing(volume, label):\n # Rotate volume\n volume = rotate(volume)\n volume = horizontal_flip(volume)\n# volume = vertical_flip(volume)\n volume = tf.expand_dims(volume, axis=3)\n return volume, label", "def getFlux(self, slamb, sflux, axis=-1):\n return self.get_flux(slamb, sflux, axis=axis)", "def water_evapotranspiration_flux(evap):\n return evap * (-1)", "def create_flux_vector_pf_gr(self):\n t0 = time.time()\n\n verif_local = 1\n lim4 = 1e-4\n soma = 0\n soma2 = 0\n soma3 = 0\n store_flux_pf = {}\n\n for volume in self.all_fine_vols:\n #1\n flux = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #2\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n altura = centroid_adj[2]\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n z = uni[2]\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)\n keq = keq*(np.dot(self.A, uni))/(self.mi)\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n\n q = (grad_p)*keq - grad_z*keq*self.gama\n flux[tuple(unit)] = q\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n store_flux_pf[volume] = flux\n flt = sum(flux.values())\n # print(gid_vol)\n # print(flt)\n # print(store_flux_pf)\n # print('\\n')\n # import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)\n soma += flt\n if abs(flt) > lim4 and volume not in self.wells:\n verif_local = 0\n print('nao esta dando conservativo na malha fina')\n print(gid_vol)\n print(flt)\n import pdb; pdb.set_trace()\n soma_prod = []\n soma_inj = []\n with open('fluxo_malha_fina_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]\n values = store_flux_pf[volume].values()\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n\n # print('gid:{0}'.format(gid))\n # print('valor:{0}'.format(sum(values)))\n if volume in self.wells_inj:\n soma_inj.append(sum(values))\n else:\n soma_prod.append(sum(values))\n # print('\\n')\n soma2 += sum(values)\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(sum(soma_inj)))\n arq.write('soma_prod:{0}'.format(sum(soma_prod)))\n\n print('soma_inj:{0}'.format(sum(soma_inj)))\n print('soma_prod:{0}'.format(sum(soma_prod)))\n\n print('soma2 : {0}'.format(soma2))\n if abs(soma2) > lim4:\n print('nao esta dando conservativo globalmente')\n import pdb; pdb.set_trace()\n\n # print('saiu de def create_flux_vector_pf')\n print('\\n')\n\n tf = time.time()\n # import pdb; pdb.set_trace()\n return store_flux_pf", "def volume(self, values):\n self._vart = float(values.get('art', self._vart))\n self._vven = float(values.get('ven', self._vven))", "def volume(self, values):\n self._vart = float(values.get('art', self._vart))\n self._vven = float(values.get('ven', self._vven))", "def volume(self, values):\n self._vart = float(values.get('art', self._vart))\n self._vven = float(values.get('ven', self._vven))", "def normalise_to_magnitude(self, magnitude, band):\n\n from ..photometry import mag2flux\n\n mag_flux = mag2flux(magnitude, band)\n spec_flux = self.calculate_flux(band)\n norm = mag_flux / spec_flux\n self.flux *= norm", "async def volume_(\n client,\n event,\n volume: P('float', 'volume', min_value = 0.0, max_value = 5.0),\n):\n player = get_player_or_abort(client, event)\n \n filter = Volume(volume)\n player.add_filter(filter)\n await player.apply_filters()\n \n return create_filter_added_embed(filter)", "def create_flux_vector_pf_gr_bif_1(self):\n # volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]\n # volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)\n lim = 1e-4\n self.dfdsmax = 0\n self.fimin = 10\n self.qmax = 0\n self.store_velocity_pf = {}\n store_flux_pf = {}\n for primal in self.primals:\n #1\n primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id1]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n list_keq = []\n list_p = []\n list_gid = []\n list_keq3 = []\n list_gidsadj = []\n list_qw = []\n qw3 = []\n qw = 0\n flux = {}\n velocity = {}\n fi = self.mb.tag_get_data(self.fi_tag, volume, flat=True)[0]\n if fi < self.fimin:\n self.fimin = fi\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume, flat=True)[0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume, flat=True)[0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n fw_vol = self.mb.tag_get_data(self.fw_tag, volume, flat=True)[0]\n sat_vol = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]\n padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]\n lbt_adj = lamb_w_adj + lamb_o_adj\n fw_adj = self.mb.tag_get_data(self.fw_tag, adj, flat=True)[0]\n\n keq3 = (kvol*lamb_w_vol + kadj*lamb_w_adj)/2.0\n\n # kvol = kvol*(lamb_w_vol + lamb_o_vol)\n # kadj = kadj*(lamb_w_adj + lamb_o_adj)\n\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n q = ((grad_p) - grad_z*self.gama)*(np.dot(self.A, uni))*keq\n\n list_keq.append(keq)\n list_p.append(padj)\n list_gid.append(gid_adj)\n\n keq2 = keq\n\n qw += q*(fw_adj + fw_vol)/2.0\n\n #keq = keq*(np.dot(self.A, uni))\n #pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n #padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n\n #grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n #q = (grad_p)*keq\n #qw3.append(grad_p*keq3*(np.dot(self.A, uni)))\n # if grad_p < 0:\n # #4\n # fw = fw_vol\n # qw += (fw*grad_p*kvol*(np.dot(self.A, uni)))\n # list_qw.append(fw*grad_p*kvol*(np.dot(self.A, uni)))\n #\n # else:\n # fw = fw_adj\n # qw += (fw*grad_p*kadj*(np.dot(self.A, uni)))\n # list_qw.append(fw*grad_p*kadj*(np.dot(self.A, uni)))\n\n\n # if gid_adj > gid_vol:\n # v = -(grad_p)*keq2\n # else:\n # v = (grad_p)*keq2\n\n flux[tuple(unit)] = q\n #velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n if abs(sat_adj - sat_vol) < lim or abs(fw_adj -fw_vol) < lim:\n continue\n dfds = abs((fw_adj - fw_vol)/(sat_adj - sat_vol))\n # print('aqui')\n # print(gid_vol)\n # print(gid_adj)\n # print(fw_adj - fw_vol)\n # print(sat_adj - sat_vol)\n # print(dfds)\n if dfds > self.dfdsmax:\n self.dfdsmax = dfds\n\n #2\n # list_keq.append(-sum(list_keq))\n # list_p.append(pvol)\n # list_gid.append(gid_vol)\n #\n # list_keq = np.array(list_keq)\n # list_p = np.array(list_p)\n # resultado = sum(list_keq*list_p)\n\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n #self.store_velocity_pf[volume] = velocity\n store_flux_pf[volume] = flux\n flt = sum(flux.values())\n print('gid')\n print(gid_vol)\n print('flux')\n print(flt)\n print('\\n')\n import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)\n\n if abs(sum(flux.values())) > lim and volume not in self.wells:\n print('nao esta dando conservativo na malha fina')\n print(gid_vol)\n print(sum(flux.values()))\n import pdb; pdb.set_trace()\n\n qmax = max(list(map(abs, flux.values())))\n if qmax > self.qmax:\n self.qmax = qmax\n if volume in self.wells_prod:\n qw_out = sum(flux.values())*fw_vol\n #qw3.append(-qw_out)\n qo_out = sum(flux.values())*(1 - fw_vol)\n self.prod_o.append(qo_out)\n self.prod_w.append(qw_out)\n qw = qw - qw_out\n\n if abs(qw) < lim and qw < 0.0:\n qw = 0.0\n\n elif qw < 0 and volume not in self.wells_inj:\n print('gid')\n print(gid_vol)\n print('qw < 0')\n print(qw)\n import pdb; pdb.set_trace()\n\n else:\n pass\n\n\n # if (qw < 0.0 or sum(qw3) < 0.0) and volume not in self.wells_inj:\n # print('qw3')\n # print(sum(qw3))\n # print('qw')\n # print(qw)\n # import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_w_tag, volume, qw)\n\n # print(self.dfdsmax)\n # print(sum(flux.values()))\n # print(sum(qw))\n # print(sum(qw3))\n # print('\\n')\n\n soma_inj = []\n soma_prod = []\n soma2 = 0\n with open('fluxo_malha_fina_bif_gr{0}.txt'.format(self.loop), 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]\n values = self.store_flux_pf[volume].values()\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n\n # print('gid:{0}'.format(gid))\n # print('valor:{0}'.format(sum(values)))\n if volume in self.wells_inj:\n soma_inj.append(sum(values))\n else:\n soma_prod.append(sum(values))\n # print('\\n')\n soma2 += sum(values)\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(sum(soma_inj)))\n arq.write('soma_prod:{0}\\n'.format(sum(soma_prod)))\n arq.write('tempo:{0}'.format(self.tempo))\n\n return store_flux_pf", "def Vega_zero_flux(self):\n return self._get_mean_and_samples_attribute('Vega_zero_flux')", "def surface_runoff_flux(runoff, drain):\n return runoff - drain", "def LoadFluxData(self, *args):\n return _gmat_py.SolarFluxReader_LoadFluxData(self, *args)", "def volume(self):\n return self.structure.volume", "def __call__(self, asset, lookback):\n return self._vol(asset, lookback)", "def read_volume(volume_file, threshold=-Inf):\n volume = nload(str(volume_file))\n data = volume.get_fdata()\n if threshold is not None:\n i = data >= threshold\n else:\n i = abs(data) > 0.001 # exclude small values\n\n output = zeros(i.sum(), dtype=DTYPE_VOLUME)\n\n output['pos'] = apply_affine(volume.affine, array(where(i)).T)\n if threshold is not None:\n output['value'] = 1\n else:\n output['value'] = data[i]\n\n return output", "def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)", "def liquid_depth_from_volume(self, volume):\n pass", "def set_flux(self, flux, var):\n self._properties[\"flux\"] = np.float(flux)\n self._properties[\"var\"] = np.float(var) if var is not None else np.NaN\n self._reset_derived_prop_()", "def _convert_value(self, value, unit, axis):\n if hasattr(value, 'units'):\n return value.to(unit).magnitude\n else:\n return self._reg.Quantity(value, axis.get_units()).to(unit).magnitude", "def Flux(self, flux, r_lim, z_lim):\n if flux == 'mass':\n set_integrand = lambda x: x\n\n elif flux == 'momentum':\n set_integrand = lambda x: x**2\n\n elif flux == 'buoyancy':\n b = self.read_vars(['b'])['b']\n set_integrand = lambda x: x*b\n\n npx = self.params['npx']\n Lx = self.params['Lx']\n Ly = self.params['Ly']\n Lz = self.params['Lz']\n nz = self.params['nz']\n\n dx = Lx/npx\n t = self.read_vars(['t'])['t']\n n_time = t.shape[0]\n\n r_max = r_lim # as in forced_plume_nudging.py\n z_max = z_lim\n new_nz = int(nz*z_lim)\n\n flux = np.zeros(n_time)\n\n fields = self.read_vars(['w', 'x', 'y', 'z'])\n w = velocity_interpolation(fields['w'], axis=1)\n\n XX, YY = np.meshgrid(fields['x']/Lx - 0.5,\n fields['y']/Ly - 0.5)\n\n r = np.sqrt(XX**2 + YY**2)\n mask_1 = ma.masked_outside(r, 0, r_max)\n #mask_2 = ma.masked_outside(ZZ, 0, z_max)\n\n # defining integrand\n integrand = set_integrand(w)\n\n for t in range(n_time):\n aux = np.zeros(new_nz)\n for z_i in range(new_nz):\n field_new = ma.masked_array(integrand[t, z_i], mask_1.mask)\n aux[z_i] = field_new.sum()\n\n flux[t] = aux.sum()\n\n return flux", "def cube_volume(edge : number) -> number:\n volume = edge*edge*edge\n\n return volume", "def range_flux(self):\n return self.max_flux - self.min_flux", "def ensemble_flux(self):\n return transition_matrices.flux_measure(self.ensemble_transition_matrix)", "def train_preprocessing2(volume, label, weight):\n # Rotate volume\n volume = rotate(volume)\n volume = horizontal_flip(volume)\n# volume = vertical_flip(volume)\n volume = tf.expand_dims(volume, axis=3)\n return volume, label, weight", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')", "def volume(self) -> int:\n return self.audio_mixer.getvolume()[0]", "def volume (self):\n volume = self.sideLength**3\n return volume", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def volume_flow_from_area(\n self,\n velocity: Quantity,\n area: Quantity,\n unit: Unit = BaseCalculator.DEFAULT_VOLUME_FLOW_UNIT,\n ):\n check_dimensionality(velocity, self.DEFAULT_VELOCITY_UNIT)\n check_dimensionality(area, self.DEFAULT_AREA_UNIT)\n volume_flow = velocity * area\n return volume_flow.to(unit)", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def calculate_volume(hull):\n origin = hull[0][0]\n volume = 0.0\n for face in hull:\n logvolume = signed_volume(form_face(face, origin))[1]\n volume += numpy.exp(logvolume)\n # n-dimensional simplex = det / n!\n volume /= scipy.special.factorial(len(origin))\n\n return volume", "def flux2mag(flux, threshold=None, mode=\"table\") :\n\n\timport parameters as param\n\timport scipy.interpolate\n\n\tif mode == \"compute\" :\n\t\treturn flux2mag_conversion(mag, threshold)\n\telif not mode == \"table\":\n\t\traise ValueError(\"Mode not recognised.\")\n\n\tif threshold is None:\n\t\tthreshold = param.ppm_threshold\n\tthreshold *= 1e-6\n\n\tif type(flux) == list:\n\t\tflux = np.asarray(flux)\n\telif type(flux) in [np.float, np.float32, np.float64]:\n\t\tflux = np.asarray([flux])\n\n\tx = np.log10(param.flux_in_aperture[:,1])\n\ty = param.flux_in_aperture[:,0]\n\tinterp_out = scipy.interpolate.UnivariateSpline(x, y)\n\tinterp_in = scipy.interpolate.interp1d(x, y)\n\n\tflux = np.asarray(flux)\n\tflux_of_star = flux / threshold\n\tflux_in_aperture = flux_of_star * (np.pi * param.radius_psf * param.radius_psf)\n\n\tflux_in_aperture = flux_in_aperture.clip(min=1e-40)\n\n\tif np.log10(flux_in_aperture) >= np.amin(x) and np.log10(flux_in_aperture) <= np.amax(x):\n\t\tmag = interp_in(np.log10(flux_in_aperture))\t\n\telse:\n\t\tmag = interp_out(np.log10(flux_in_aperture))\n\n\n\n\tmag[np.where(mag>param.magnitude_max)]=param.magnitude_max\n\n\treturn mag", "def compute_volume(self) -> float:\n return (\n (1 if self.clockwise else -1)\n * np.sum(\n np.linalg.det(\n np.dstack(\n (\n self.vertices[self._faces[:, 0]],\n self.vertices[self._faces[:, 1]],\n self.vertices[self._faces[:, 2]],\n )\n )\n )\n )\n / 6\n )", "def flux_ratio(self):\n return self._flux_ratio", "def volume(self) -> float:\n return self._volume" ]
[ "0.6863947", "0.65460765", "0.6410462", "0.63754797", "0.6337502", "0.63291806", "0.63291806", "0.62562454", "0.61814946", "0.61583227", "0.60439867", "0.60325944", "0.592126", "0.5912541", "0.5898983", "0.5894703", "0.58924603", "0.58740014", "0.5857131", "0.58522546", "0.58230066", "0.5814723", "0.5810996", "0.5797539", "0.5693262", "0.56831056", "0.5676475", "0.56579185", "0.56464136", "0.56361353", "0.56122017", "0.56101996", "0.5584122", "0.558117", "0.55659014", "0.55658424", "0.550791", "0.5506391", "0.54937035", "0.54917204", "0.54876095", "0.54750615", "0.5474194", "0.5474183", "0.54738253", "0.54590607", "0.54443634", "0.5442308", "0.54378754", "0.54324496", "0.54232556", "0.54126894", "0.5396093", "0.5358552", "0.53583574", "0.5337677", "0.53340125", "0.53286237", "0.53238475", "0.53190637", "0.5312874", "0.52916145", "0.5290539", "0.5286437", "0.5277122", "0.52711344", "0.5263445", "0.52617955", "0.5260626", "0.5260626", "0.5260626", "0.5254512", "0.5254322", "0.5245421", "0.52446985", "0.5244603", "0.52435213", "0.5242832", "0.5237751", "0.5225262", "0.5224471", "0.52139974", "0.52122766", "0.52097917", "0.5207131", "0.5206932", "0.5205465", "0.5203031", "0.5187725", "0.5185355", "0.5173114", "0.5155879", "0.5154673", "0.51510346", "0.5148974", "0.5145438", "0.51453346", "0.51430714", "0.5142136", "0.51421136" ]
0.8054486
0
filters an imagecollection based on year and month
def filter_ic(ic,year,month): ic_filtered = (ic.filter(ee.Filter.eq("month",month)) .filter(ee.Filter.eq("year",year))) image = ee.Image(ic_filtered.first()) return(image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)", "def filter_month(data, month, year):\n input_month = str(month).zfill(2)\n input_year = str(year)\n\n month_data = []\n\n for row in data:\n date_as_string = row['inspection_date'][:10]\n month, day, year = date_as_string.split('/')\n if input_month == month and input_year == year:\n month_data.append(row)\n\n return month_data", "def get_images(self,\n collection,\n bounds=None,\n year=None,\n start_date=None,\n end_date=None,\n start_julian=1,\n end_julian=365,\n index_list=None,\n scale_factor=None,\n **kwargs):\n coll = ee.ImageCollection(collection)\n\n if year is not None:\n start_date = '{}-01-01'.format(str(year))\n end_date = '{}-12-31'.format(str(year))\n\n if bounds is not None:\n coll = coll.filterBounds(bounds)\n if (start_date is not None) and (end_date is not None):\n coll = coll.filterDate(start_date, end_date)\n\n coll = coll.filter(ee.Filter.calendarRange(start_julian, end_julian))\n\n if len(kwargs) > 0:\n for key, value in kwargs.items():\n if key == 'map':\n if value == 'add_indices':\n\n if index_list is not None:\n self.index_list = index_list\n\n if scale_factor is not None:\n self.scale_factor = scale_factor\n\n func = getattr(self, value, None)\n\n if func is not None:\n coll = coll.map(func)\n else:\n warnings.warn('The function {} is not implemented'.format(str(key)))\n return coll", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def test_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"year\": entry.publication_date.year}\n\n self._test_filtering(**params)", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def date_filter(frame, date_column, year):\n frame[date_column] = pd.to_datetime(frame[date_column])\n frame = frame[frame[date_column] > pd.Timestamp(year, 1, 1)]\n return frame", "def test_collection_author_year_filtering(self):\n # Create a collection\n entries = Entry.objects.filter(id__in=(1, 5, 10, 15))\n collection = CollectionFactory(entries=entries)\n entry = Entry.objects.get(id=1)\n\n # Get a valid collection\n params = {\n \"collection\": collection.id,\n \"author\": entry.first_author.id,\n \"year\": entry.publication_date.year,\n }\n self._test_filtering(**params)", "def filter_daterange(self, imagery, extent) -> 'ProcessGraph':\n\n graph = {\n 'process_id': 'filter_daterange',\n 'imagery': imagery.graph,\n 'extent': extent\n }\n\n imagery.graph = graph\n\n return imagery", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def get_immats_fromdwh(dwh_schema, table_name, connection, date_col=\"date_immat\"):\n \n query = f\"SELECT distinct date AS date_immat FROM {dwh_schema}.{table_name}\"\n df = pd.read_sql(query,con=connection) \n df[\"year_immat\"] = df[date_col].str[-4:]\n dc_years = dict()\n for year, nb_mois in df.year_immat.value_counts().iteritems():\n if nb_mois < 12:\n key_name = f\"immats/immats_{year}.csv\"\n dc_years[key_name] = df[df[\"year_immat\"]==year][date_col].tolist()\n return dc_years", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def filter_creation_date(groups, start, end):\n results = []\n for g in groups:\n created = datetime.fromtimestamp(g['creationTime'] / 1000.0)\n if created > end:\n continue\n if created > start:\n g['exportStart'] = created\n else:\n g['exportStart'] = start\n results.append(g)\n return results", "def _get_metadata(self): \n def add_dates(date_list, dates):\n \"\"\"\n Append dates to date_list which are not already within date_list.\n \n \"\"\"\n for date in dates:\n if date.strftime('%d-%b') not in date_list:\n date_list.append(date.strftime('%d-%b'))\n return date_list\n \n metadata = {'DATA_TYPE':'Observation Data'} \n \n self.cube_dates = []\n years = []\n \n for cube in self.cubelist:\n cube_metadata = self._get_obs_metadata(cube)\n \n self.cube_dates = add_dates(self.cube_dates, \n cube_metadata['DATES'])\n # Years are based on the earliest date.\n years.append(min(cube_metadata['DATES']).year)\n del cube_metadata['DATES']\n \n for key, val in cube_metadata.items():\n # Find unique metadata which has not already been added by \n # previous cubes. Years are the common one.\n current_vals = metadata.get(key)\n if current_vals is not None:\n for this_val in current_vals:\n if hasattr(this_val, '__iter__'):\n try: \n if numpy.array_equal(this_val, val):\n break\n except AttributeError:\n # If the array type is not comparable for \n # example array of strings.\n equal = True\n for this_item, item in zip(this_val, val):\n if this_item != item:\n equal = False\n break\n if equal:\n break\n else:\n if this_val == val:\n break\n metadata[key].append(val)\n else:\n metadata[key] = [val]\n \n bound_names = []\n # Tidy up lists of length 1.\n for key, val in metadata.items():\n if type(val) == list and len(val) == 1:\n metadata[key] = val[0]\n # Retrieve the exact bound names.\n if key[-7:] == '_BOUNDS':\n bound_names.append(key)\n \n metadata['YEARS'] = sorted(list(set(years)))\n metadata['DATES'] = self.cube_dates\n \n return self.MetaData(metadata, bound_names)", "def album_filter(query_params, query):\n table = Album.__table__\n col_name = table.c.release_date\n if query_params.get('start_year') is not None \\\n and query_params.get('end_year') is not None:\n filt_statement = and_(\n col_name >= date(int(query_params.get('start_year')), 1, 1),\n col_name <= date(int(query_params.get('end_year')), 12, 31))\n query = query.filter(filt_statement)\n elif query_params.get('start_year') is not None:\n query = query.filter(\n col_name >= date(int(query_params.get('start_year')), 1, 1))\n elif query_params.get('end_year') is not None:\n query = query.filter(\n col_name <= date(int(query_params.get('end_year')), 12, 31))\n if query_params.get('num_tracks') is not None:\n query = query.filter(\n table.c.num_tracks == int(query_params.get('num_tracks')))\n if query_params.get('label') is not None:\n query = query.filter(table.c.label == str(query_params.get('label')))\n return query", "def plotOceanParcelsAccumulatedResults(input_data_folder, output_folder, start_year, end_year, dt=1):\n # Only for\n tot_days = (end_year-start_year)*365\n start_date = datetime.strptime(str(start_year),'%Y')\n\n open_files = []\n for c_day in np.arange(0, tot_days, dt):\n print(F\"------- {c_day}---------\")\n # Released months\n c_date = start_date + timedelta(days=int(c_day))\n months = (c_date.year - start_date.year)*12 + c_date.month - start_date.month\n\n # Iterate over all the files that should contribute to the image\n fig = plt.figure(figsize=(20,10))\n ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())\n for c_month in range(0, months + 1):\n c_file_year = (start_date + relativedelta(months=int(c_month))).year\n c_file_month = (start_date + relativedelta(months=int(c_month))).month\n skip_days = c_day - (c_date - datetime.strptime(F\"{c_file_year}-{c_file_month}\",'%Y-%m')).days\n\n if len(open_files) <= c_month:\n file_name = F\"TenYears_YesWinds_YesDiffusion_NoUnbeaching_{c_file_year}_{(c_file_month):02d}.nc\"\n print(F\"Reading new file: {file_name}\")\n open_files.append(Dataset(join(input_data_folder, file_name), \"r\", format=\"NETCDF4\"))\n\n c_time_step = c_day - skip_days\n # lats = open_files[c_month].variables['lat'][:,c_time_step]\n # lons = open_files[c_month].variables['lon'][:,c_time_step]\n ax.scatter(open_files[c_month].variables['lon'][:,c_time_step], open_files[c_month].variables['lat'][:,c_time_step], color='c', s=1)\n\n title = F\"{start_date.strftime('%Y-%m-%d')} - {c_date.strftime('%Y-%m-%d')}\"\n ax.coastlines()\n ax.set_title(title, fontsize=30)\n\n # plt.show()\n plt.savefig(F\"{output_folder}/{start_date.strftime('%Y_%m')}_{c_day:04d}.png\")\n plt.close()", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def get_filtered(self, collection, xmlFormat):\n\t\tstart = \"2012-05-01T00:00:00Z\"\n\t\tend = \"2012-05-20T00:00:00Z\"\n\t\tquery = '/text//annotationRecord/service/date/@modified:[%s TO %s]' % (start, end)\n\t\t\n\t\treturn {\n\t\t\t'q' : query,\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t\"ky\": collection,\n\t\t\t'sortDescending' : '/text//annotationRecord/service/date/@modified'\n\t\t\t}", "def find_by_year(our_data,year):\n return [album for album in our_data if album['number'] == str(year)]", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def apply_filter(self, image):\n pass", "def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc", "def filter_images(data, vgid2idx, meta_vgids):\r\n new_data = []\r\n for vgid in meta_vgids:\r\n new_data.append(data[vgid2idx[vgid]])\r\n return new_data", "def get_filtered(self, collection, xmlFormat):\n\t\tstart = \"2012-05-01T00:00:00Z\"\n\t\tend = \"2012-05-20T00:00:00Z\"\n\t\tquery = '/text//itemRecord/metaMetadata/dateInfo/@lastModified:[%s TO %s]' % (start, end)\n\t\t\n\t\treturn {\n\t\t\t'q' : query,\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t\"ky\": collection,\n\t\t\t'sortDescending' : '/text//itemRecord/metaMetadata/dateInfo/@lastModified'\n\t\t\t}", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def test_author_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id, \"year\": entry.publication_date.year}\n\n self._test_filtering(**params)", "def masked(months=range(1, 13), years=[2009], folder=\"data/\", layer=\"BHR_VIS\"):\n data = []\n file_template = 'NETCDF:\"{:s}\":{:s}' # Template for the Netcdf path\n # the actual filename\n fname_template = '{:s}/GlobAlbedo.merge.albedo.05.{:d}{:02d}.nc'\n for year in years:\n for month in months:\n fname = fname_template.format(folder, year, month)\n netcdf_fname = file_template.format(fname, layer)\n g = gdal.Open(netcdf_fname)\n if g is None:\n raise IOError(\"Problem with reading file {}\".format(fname))\n the_data = g.ReadAsArray()\n masked_data = np.ma.array(the_data,mask=np.isnan(the_data))\n data.append(masked_data)\n output_data = np.ma.array(data)\n return output_data", "def select_calendar_month(X, year_month, timename='time'):\n\n def calendar_month(year, month):\n \"\"\"\n For a given year and month return the date of the begining of the month and the date of the beginning of the next month\n \"\"\"\n start = datetime.datetime(year, month, 1)\n if month == 12:\n end = datetime.datetime(year+1, 1, 1)\n else:\n end = datetime.datetime(year, month+1, 1)\n print(start)\n print(end)\n return start, end\n \n year, month = year_month\n \n start, end = calendar_month(year, month)\n \n # Gotta do better than this\n if timename.lower() == 'time':\n X_cm = X.sel(time = slice(start, end))\n elif timename.lower() == 'time_wave':\n X_cm = X.sel(time_wave = slice(start, end))\n # Gotta do better than this\n \n return X_cm, [start, end]", "def GEEterraClimatePtsAvgMonth(ptsFile,metric,startYear,endYear,buf,poly,username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n years = list(range(startYear, endYear + 1))\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n ID_field = \"geeID\"\n\n scale_d = {}\n scale_d['aet'] = 0.1\n scale_d['def'] = 0.1\n scale_d['pdsi'] = 0.01\n scale_d['pet'] = 0.1\n scale_d['soil'] = 0.1\n scale_d['srad'] = 0.1\n scale_d['tmmn'] = 0.1\n scale_d['tmmx'] = 0.1\n scale_d['vap'] = 0.001\n scale_d['vpd'] = 0.01\n scale_d['vs'] = 0.01\n \n for met in metric:\n metL = [met]\n Gridmet_pr = ee.ImageCollection('IDAHO_EPSCOR/TERRACLIMATE').select(met)\n \n img_col0 = Gridmet_pr.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n if any([(met == 'pr'),(met == 'ro'),(met == 'swe')]):\n\n img_col = img_col0\n \n else:\n\n def Scale1(img):\n return (img.float()\n .multiply(scale_d[metL[0]])\n .copyProperties(img,['system:time_start','system:time_end']))\n\n img_col = img_col0.map(Scale1)\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'tcy'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print ('buffered pts by:' + str(buf) + ' for ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'tcy'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'tcy'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for ' + met)", "def _year_range(m):\n return (m.group(1), m.group(2))", "def get_result(self, now):\n year = now.split('-')[0]\n month = int(now.split('-')[1])\n if int(month/7) == 0:\n count = '01'\n if int(month/7) == 1:\n count = '07'\n sql = \"select imagenum, source_name from (select count(*) as \\\n imagenum, source_name from image_info_%s%s\\\n where create_time > \\'%s\\' and status =3 group by source_name) as image_num order \\\n by imagenum desc limit 100\"\\\n %(year, count ,now)\n print sql\n #sql = \"select imagenum, source_name from (select count(*) as \\\n # imagenum, source_name from image_info_%s%s\\\n # where create_time between \\'2017-05-13 00:00:00\\' and \\'2017-05-14 00:00:00\\'\\\n # and status =3 group by source_name) as image_num order \\\n # by imagenum desc limit 100\"\\\n # %(year, month)\n #print sql\n try:\n with session_cpu_ic() as session: \n results = session.execute(sql).fetchall()\n if results is not None:\n return results\n except Exception as e:\n self.logger.exception(\"get_results %s\" % str(e))", "def create_calmap(df, yr, s3_resource_bucket):\n calmap_data = pd.Series(df['Miles'].values, index=df['Date'])\n plt.figure(figsize=(15, 5))\n plt.title(f'CALENDAR HEATMAP')\n calmap.yearplot(calmap_data,\n year=int(yr),\n fillcolor='lightgrey')\n plt.savefig(f'yr_calmap.png')\n s3_resource_bucket.upload_file('yr_calmap.png', 'yr_calmap.png',\n ExtraArgs={'ContentType': 'image/png'})\n # remove local file\n os.remove('yr_calmap.png')", "def filter_by_date(sequence, _min, _max):\r\n _max, _min = [_convert_date(x) for x in (_max, _min)]\r\n return {x for x in sequence if _max >= x.date >= _min}", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def filter_tracks(df, start_year=1980, end_year=2010, zeta=0, age=36):\n tracks = df.groupby('num')\n filterdf = tracks.filter(lambda x: (x['datetime'].dt.year.min() >= start_year) &\n (x['datetime'].dt.year.max() <= end_year) &\n (x['age'].max() >= age) &\n (np.abs(x['vorticity'].min()) > zeta))\n return filterdf", "def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return", "def apply_photo_style(path, decade):\n flt_path = os.path.dirname(path) + \"/\" + str(uuid.uuid4()) + \".jpg\"\n shutil.copyfile(path, flt_path) # make a copy of image because part of the filters change image in place\n f = None\n if decade <= 1930 or decade == 1950 or decade == 1970:\n success = execute_js(js_path, arguments='{} {} {}'.format(path, decade, flt_path)) # execute js rendering with Naked\n if decade == 1930:\n f = Thirties(flt_path)\n if decade == 1940:\n f = Gotham(flt_path)\n \n if decade == 1950 or decade == 1960: # for non-standard photo frames \n padding_x = 80\n if decade == 1950: # kodachrome frame\n padding_top = 80\n padding_bottom = 240\n else: # polaroid frame\n padding_bottom = 80\n padding_x = padding_top = 0\n expand_rect_padding(flt_path, padding_x, padding_top, padding_bottom, flt_path)\n \n if decade == 1950:\n f = Fifties(flt_path)\n if decade == 1960:\n f = Toaster(flt_path)\n if decade == 1970:\n f = Seventies(flt_path)\n if decade == 1980:\n f = Nashville(flt_path)\n if decade == 1990:\n f = Lomo(flt_path)\n if decade == 2000:\n f = Davehill(flt_path)\n \n if f is not None:\n f.apply() # apply photo filter using imagemagick\n\n if decade == 1940:\n # resize fix - gotham filter output image slightly differs in size so resize it to sizes of original image\n origin_img = Image.open(path)\n width, height = origin_img.size \n img = Image.open(flt_path) \n img = img.resize([width,height], Image.ANTIALIAS)\n img.save(flt_path, \"JPEG\")\n\n return flt_path", "def filter_movie():\n name = request.args.get('name', default=\"\", type=str)\n year = request.args.get('year', default=-1, type=int)\n\n filtered_list = []\n\n if name != \"\":\n name = name.replace('_', ' ')\n name = name.replace('\"', '')\n for movie in movies_data:\n if name.lower() in movie.lower():\n filtered_list.append(movies_data[movie])\n\n if year != -1:\n filtered_list = [movie for movie in filtered_list if year == movie['year']]\n\n return make_response(jsonify(filtered_list), 200)", "def addNightLights(self,img,y):\n\t\t\n\t\tstartDate = ee.Date.fromYMD(y, 1, 1)\n\t\tendDate = ee.Date.fromYMD(y, 12, 31)\n\t\t\n\t\tif y < 2012:\n\t\t\n\t\t\tnightLights = ee.Image(ee.ImageCollection(\"NOAA/DMSP-OLS/NIGHTTIME_LIGHTS\").filterDate(startDate,endDate).mean())\t\n\t\t\timg = img.addBands(nightLights.select([\"stable_lights\"]).rename([\"stable_lights\"]))\n\t\t\n\t\tif y >= 2012:\n\t\t\tnightLights = ee.Image(ee.ImageCollection(\"NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG\").filterDate(startDate,endDate).mean())\t\n\t\t\timg = img.addBands(nightLights.select([\"avg_rad\"]).rename([\"stable_lights\"]))\n\t\t\n\t\treturn img", "def __init__(self, country, months):\n self.country = country\n self.months = months\n self.make_tiff_list(self.months)\n self.make_vrt() #CHOOSE ONE\n self.merge_rasters() # OR THE OTHER METHOD", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def test_grdimage_slice(grid):\n grid_ = grid.sel(lat=slice(-30, 30))\n fig = Figure()\n fig.grdimage(grid_, cmap=\"earth\", projection=\"M6i\")\n return fig", "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def filter_meteo_data(self, startdate, enddate):\n self.all_meteo_data.columns.values[0]='Datum-tijd'\n self.all_meteo_data['datetime']=pd.to_datetime(self.all_meteo_data['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n self.all_meteo_data.drop(['Datum-tijd'],axis=1, inplace=True)\n mask = (self.all_meteo_data['datetime'] > startdate) & (self.all_meteo_data['datetime'] <= enddate)\n meteodata = self.all_meteo_data.loc[mask].copy()\n meteodata.set_index('datetime',inplace=True)\n return meteodata", "def scrape(startyear, startmonth, endyear, endmonth):\n year = startyear\n month = startmonth\n while (not (year == endyear and month == endmonth)):\n ys = \"{}\".format(year)\n ms = \"{:02d}\".format(month)\n gather_all_profiles(ys,ms) \n if month == 12:\n year += 1\n month = 0\n month += 1", "def plot_photos_per_year(woe_id=None, save2docs=False):\n\n series = pd.Series()\n\n for year in range(PLOT_START_YEAR, PLOT_END_YEAR):\n query = FlickrQuery(woe_id=woe_id, year=year)\n n_photos = store.read(store.N_PHOTOS, query)\n series.set_value(year, n_photos)\n\n series.plot(kind='bar', use_index=True)\n\n place_name = flickr_api.get_place_name(woe_id=woe_id)\n plt.title(place_name)\n plt.ylabel('Number of flickr photo uploads')\n plt.xlabel('Year')\n\n file_name = '%s.png' % time.time()\n target_path = path.join(FLICKR_PLOT_DIR, file_name)\n plt.savefig(target_path)\n\n if save2docs:\n place_name_formatted = place_name.lower().replace(' ', '-')\n file_name = 'flickr_%s' % place_name_formatted\n target_path = path.join(DOCS_IMG_DIR, file_name)\n plt.savefig(target_path)", "def get_subset(mlist,year):\n newlist = []\n for entry in mlist:\n if int(entry[0][:4]) > int(year):\n continue\n newvec = entry[:8]\n citations = entry[8]['citations']\n citations = filter(lambda a: int(a[:4]) <= int(year), citations)\n newvec[2] = len(citations)\n newlist.append(newvec)\n return newlist", "def test_contains_month_true(self):\n ary = self.ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')[2009]\n self.assertTrue(11 in ary)", "def main(filename, column, year):\n ee.Initialize()\n image = medoid_image(year)\n first = True\n for feature in shapefile_generator(filename):\n invocation = image.reduceRegion(\n ee.Reducer.mean(), ee.Geometry(feature['geometry']), scale=30)\n res = invocation.getInfo()\n if first:\n header = [column] + [item for item in res]\n print(','.join(header))\n first = False\n line = [\n feature['properties'][column]] + [res[item] for item in res]\n line = [str(item) for item in line]\n print(','.join(line))", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def test_magnitude_filters(self):\n credentials = Mock(base_url=\"\")\n\n manager = Manager('invoices', credentials)\n uri, params, method, body, headers, singleobject = manager._filter(**{'Date__gt': datetime.datetime(2007, 12, 6)})\n\n self.assertEqual(\n params,\n {u'where': u'Date>DateTime(2007,12,6)'}\n )\n\n manager = Manager('invoices', credentials)\n uri, params, method, body, headers, singleobject = manager._filter(**{'Date__lte': datetime.datetime(2007, 12, 6)})\n\n self.assertEqual(\n params,\n {u'where': u'Date<=DateTime(2007,12,6)'}\n )", "def year_text_range_filter(self, queryset, name, value):\n if value:\n if value[0] and value[1]:\n queryset = queryset.filter(**{name+'__regex': r'^[0-9]{3,4}$'}) \\\n .annotate(**{name+'_int': Cast(name, IntegerField())})\\\n .filter(**{name+'_int__range': (value[0], value[1])})\n else:\n if value[0]:\n queryset = queryset.filter(**{name+'__regex': r'^[0-9]{3,4}$'}) \\\n .annotate(**{name+'_int': Cast(name, IntegerField())})\\\n .filter(**{name+'_int__gte': value[0]})\n if value[1]:\n queryset = queryset.filter(**{name+'__regex': r'^[0-9]{3,4}$'}) \\\n .annotate(**{name+'_int': Cast(name, IntegerField())})\\\n .filter(**{name+'_int__lte': value[1]})\n\n return queryset", "def filter_img(img, new_img, f):\n\n datas = img.getdata()\n new_data = []\n for item in datas:\n if f(item[0]) and f(item[1]) and f(item[2]):\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n new_img.putdata(new_data)", "def stats_data_by_year():\n videos = db_session.query(MediaFiles.year, func.count(MediaFiles.year)) \\\n .filter(MediaFiles.duration > 0) \\\n .group_by(MediaFiles.year) \\\n .all()\n photos = db_session.query(MediaFiles.year, func.count(MediaFiles.year)) \\\n .filter(MediaFiles.duration == 0) \\\n .group_by(MediaFiles.year) \\\n .all()\n years = sorted(list(set([item[0] for item in videos + photos])), reverse=True)\n videos_by_year = {}\n photos_by_year = {}\n data_by_type_year = []\n for year in years:\n for item in videos:\n if year == item[0]:\n videos_by_year.update({year: item[1]})\n elif year not in videos_by_year:\n videos_by_year.update({year: 0})\n for item in photos:\n if year == item[0]:\n photos_by_year.update({year: item[1]})\n elif year not in photos_by_year:\n photos_by_year.update({year: 0})\n data_by_type_year.append({'name': year,\n 'data': [photos_by_year[year], videos_by_year[year]]})\n data_by_year_type = {'years': years,\n 'values': [{'name': 'Photos', 'color': '#76BCEB',\n 'data': [count for year, count in photos_by_year.items()]},\n {'name': 'Videos', 'color': '#397DAA',\n 'data': [count for year, count in videos_by_year.items()]}\n ]}\n return data_by_year_type, data_by_type_year", "def collect_data(self, src_directory=None,src_filename_format=None,\n date_selection=None,units=None,exposure_schedule=None,bin_width=None) :\n\n # TODO: There must be a better way to do this\n if not (src_directory is None) :\n self.src_directory = src_directory\n if not (src_filename_format is None) :\n self.src_filename_format = src_filename_format\n if not (date_selection is None) :\n self.date_selection = date_selection\n if not (units is None) :\n self.units = units\n if not (exposure_schedule is None) :\n self.exposure_schedule = exposure_schedule\n if not (bin_width is None) :\n self.bin_width = bin_width\n\n # first we read the src_directory to check the total number of unique years available\n data_dir_contents = os.listdir(self.src_directory)\n # TODO: improve jankiness of this format-matching search for filenames\n char_year = self.src_filename_format.find('yyyy')\n dataset_years = [ x for x in data_dir_contents if re.findall(self.src_filename_format.replace(\"yyyy\",\"[0-9]{4}\"),x)]\n dataset_years = [ int(x[char_year:char_year+4]) for x in dataset_years ]\n\n # Now we can handle default options like \"all\"\n if type(self.date_selection) == str and self.date_selection == \"all\" :\n date_selection = pd.date_range(start=str(dataset_years[0])+\"-01-01\",\n end=str(dataset_years[-1])+\"-12-31\")\n else :\n date_selection = self.date_selection # TODO: much more interpretation options here\n\n #now we find unique years \n list_of_years = sorted(set(date_selection.year))\n\n for i in range(len(list_of_years)) :\n year = list_of_years[i]\n print(\"Processing year \"+str(year)) #should use logging, don't yet know how\n dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year))) \n dataset.set_auto_mask(False) #to get normal arrays (faster than default masked arrays)\n\n if dataset.dimensions['time'].size == 24 :\n # needed if just a single day\n time_subset = [True for i in range(dataset.dimensions['time'].size)]\n else :\n # Next we pull a subset from the netCDF file\n # declare false array with same length of time dimension from netCDF\n time_subset = [False for i in range(dataset.dimensions['time'].size)] \n # reshape false array to have first dimension 24 (hours in day)\n time_subset = assert_data_shape_24(time_subset) \n # set the appropriate days as true\n time_subset[:,date_selection[date_selection.year == year].dayofyear-1] = True \n # flatten time_subset array back to one dimension\n time_subset = time_subset.flatten(order='F')\n\n # load subset of data\n print(\" Slicing netcdf data with time subset\")\n data = dataset['UV_AS'][time_subset,:,:] #work in UVI by default because it's easy to read\n # TODO: check units of dataset files, CF conventions for UVI or W/m2\n\n # now to calculate doses if requested\n if self.units in [\"SED\",\"J m-2\",\"UVIh\"] :\n # if calculating doses\n print(' Calculating doses')\n data = assert_data_shape_24(data)\n data = np.sum(np.reshape(self.exposure_schedule,[24,1,1,1]) * data,axis=0)\n\n elif (self.exposure_schedule != np.ones(24)).any() :\n # assume elsewise calculating intensity (i.e. UV-index) then limit data selection according\n # to schedule (remembering that default schedule is just ones)\n print(' Slicing data with exposure schedule')\n # reshape so first dimension is 24 hours\n data = assert_data_shape_24(data)\n # select only those hours with nonzero entry in exposure schedule\n data = data[self.exposure_schedule != 0,:,:,:]\n # select nonzero values from exposure schedule\n exposure_schedule_nonzero = self.exposure_schedule[self.exposure_schedule != 0]\n\n # if any nonzero entries aren't 1, multiply data accordingly\n if (exposure_schedule_nonzero != 1).any() :\n data *= np.reshape(exposure_schedule_nonzero,[len(exposure_schedule_nonzero),1,1,1])\n\n # recombine first two dimensions (hour and day) back into time ready for histogram\n data = assert_data_shape_24(data,reverse=True) \n\n # now multiply data by conversion factor according to desired untis\n # TODO: Should expand upon this in reference files\n data *= {\"SED\":0.9, \"J m-2\":90, \"UVIh\":1, \"UVI\":1, \"W m-2\":0.025, \"mW m-2\":25}[self.units]\n\n # if this is the first iteration, declare a hist\n if i == 0 :\n # seems like useful metadata to know bin n and edges\n # TODO: reconsider where this belongs in the code (__init__?)\n self.num_bins = int(np.nanmax(data) // self.bin_width ) + 2\n self.bin_edges = (np.array(range(self.num_bins+1)) - 0.5) * self.bin_width \n # this form allows for weird custom bin edges, but probably will never use that\n self.bin_centers = self.bin_edges[:-1] + 0.5 * np.diff(self.bin_edges)\n\n # TODO: think about possible cases where dimensions could differ\n self.pix_hist=np.zeros([self.num_bins,\n np.shape(data)[-2],np.shape(data)[-1]], dtype=np.int16)\n\n # TODO: this should also be done by some initial dataset analysis, but that's a drastic\n # design overhaul\n self.lat = dataset['lat'][:]\n self.lon = dataset['lon'][:]\n\n else :\n new_num_bins = int(np.nanmax(data) // self.bin_width) + 2 - self.num_bins\n # check if new data requires extra bins in pix_hist\n if new_num_bins > 0 :\n # append zeros to pix hist to make room for larger values\n self.pix_hist = np.concatenate((self.pix_hist,np.zeros(\n [new_num_bins,np.shape(self.pix_hist)[-2],np.shape(self.pix_hist)[-1]],\n dtype=np.int16)),axis=0)\n # update bin information\n self.num_bins = self.num_bins + new_num_bins\n self.bin_edges = (np.array(range(self.num_bins+1)) - 0.5) * self.bin_width \n self.bin_centers = self.bin_edges[:-1] + 0.5 * np.diff(self.bin_edges)\n\n # TODO: Add check in case bins get \"full\" (i.e. approach int16 max value)\n # now put data into hist using apply_along_axis to perform histogram for each pixel\n print(\" Calculating and adding to pixel histograms\")\n self.pix_hist[:,:,:] += np.apply_along_axis(lambda x: \n np.histogram(x,bins=self.bin_edges)[0],0,data)\n\n return self", "def get_monthly_prism_ppt_data(year,month, plotPPTBounds):\n \"\"\" It is in the form of grid \"\"\"\n \n if(month<10):\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+\"0\"+str(month)+\"_bil.bil\"\n else:\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+str(month)+\"_bil.bil\" \n \n ppt_data = read_prism_bil(join(cf.root, cf.prism_dir, prism_file_path))\n \n hdr_dict = read_prism_hdr(join(cf.root, cf.prism_dir, prism_file_path).replace('.bil', '.hdr'))\n \n hdr_dict[\"ULXMAP\"] = float(hdr_dict[\"ULXMAP\"])\n hdr_dict[\"ULYMAP\"] = float(hdr_dict[\"ULYMAP\"])\n hdr_dict['NROWS'] = int(hdr_dict['NROWS'])\n hdr_dict['NCOLS'] = int(hdr_dict['NCOLS'])\n hdr_dict['XDIM'] = float(hdr_dict['XDIM'])\n hdr_dict['YDIM'] = float(hdr_dict['YDIM'])\n \n p1 = (hdr_dict[\"ULXMAP\"] - (hdr_dict['XDIM']/2), \n hdr_dict[\"ULYMAP\"] + (hdr_dict['YDIM']/2))\n\n p2 = (p1[0] + (hdr_dict['NCOLS']*hdr_dict['XDIM']),\n p1[1])\n\n p3 = (p2[0],\n p2[1] - (hdr_dict['NROWS']*hdr_dict['YDIM']))\n\n p4 = (p1[0],\n p3[1])\n \n lon_point_list = (p1[0], p2[0], p3[0], p4[0])\n lat_point_list = (p1[1], p2[1], p3[1], p4[1])\n \n ppt_bounds = Polygon(zip(lon_point_list, lat_point_list))\n \n if(plotPPTBounds):\n crs = {'init': 'epsg:4326'}\n m = folium.Map(zoom_start=10, tiles='cartodbpositron')\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[ppt_bounds]) \n \n folium.GeoJson(polygon).add_to(m)\n folium.LatLngPopup().add_to(m)\n m.save(\"Prism Bounds.html\")\n\n return ppt_bounds, ppt_data, hdr_dict", "def filter_images(history, whitelist):\n docker_client = docker.client.APIClient()\n local_images = common.get_local_images(docker_client)\n approved_images = set(local_images) - set(whitelist)\n return {image: timestamp for image, timestamp in history.items() if image in approved_images}", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def remove_unseasonal_images(data, date_inf=\"15-05\", date_sup=\"15-10\"):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if seasonal(path, date_inf, date_sup) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)", "def time_search_month(year, month):\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM entries\n WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}\n AND CAST(strftime('%m',entries.published)AS INT) = {month}\n ORDER BY entries.published DESC\n \"\"\".format(year=int(year), month=int(month)))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def filter_years(text):\n months = ['january', 'february', 'march', 'april', 'may', 'june',\n 'july', 'august', 'september', 'october', 'november', 'december']\n prepositions = ['around', 'after', 'at', 'as',\n 'approximately', 'before', 'between', 'by',\n 'during', 'from', 'in', 'near', 'past',\n 'since', 'until', 'within'] # removed: about, on\n conjugations = ['and']\n articles = ['the']\n times = ['early', 'mid', 'late']\n patterns = months + prepositions + conjugations + articles + times\n re_string = r'\\b(' + '|'.join(patterns) + r')\\b(\\s|-)\\b([0-9]{3,4})s?\\b(?i)(?!\\sMYA)\\s?(BCE|BC)?'\n years = [int(match.group(3)) * (-2*bool(match.group(4))+1)\n for match in re.finditer(re_string, text, re.IGNORECASE)]\n re_string = r'([0-9]{1,2})(st|nd|rd|th) century\\s?(BCE|BC)?'\n centuries = [(int(match.group(1)) * 100 - 100) * (-2*bool(match.group(2))+1)\n for match in re.finditer(re_string, text, re.IGNORECASE)]\n years += centuries\n years = [y for y in years if y<Dump.MAX_YEAR]\n return sorted(years + centuries)", "def pip_hmt(start_timestamp, end_timestamp, city='Torino'):\n\n pipeline = [\n {\"$match\":\n {\"city\": city}\n #{\"init_time\": {\"$gte\": start_timestamp}},\n #{\"final_time\": {\"$lte\": end_timestamp}}\n },\n {\"$group\": {\n \"_id\": {\"day\": {\"$dayOfYear\": \"$init_date\"}, \"year\": {\"$year\": \"$init_date\"}},\n \"plate\": {\"$addToSet\": \"$plate\"}},\n\n },\n {\n \"$unwind\": \"$plate\"\n },\n {\n \"$group\": {\n \"_id\": {\"day\": \"$day\", \"year\": \"$year\"},\n \"Count\": {\"$sum\": 1}\n }\n }\n\n ]\n\n return pipeline", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def an(x):\r\n return Feature(x, \"year\")", "def filter(self, filters):", "def month_overview(items, month_long):\n events = []\n for item in items:\n dt = datetime.strptime(item['Date'], '%m/%d/%Y')\n if filters.month_l_filter(dt.month) == month_long:\n events.append(item)\n return events", "def precipitation():\n \n # Obtain the current year from the date and using that date determine the previous year appending 01-01 and 12-31\n \n compare_date = dt.date.today()\n start_date = f\"{compare_date.year - 1}-01-01\"\n end_date = f\"{compare_date.year - 1}-12-31\"\n precipitation_result = session.query(Measurement).filter((Measurement.date >= start_date) & (Measurement.date <= end_date)\n ).order_by(Measurement.date).all()\n \n precipitation = []\n \n for row in precipitation_result:\n precipitation_dict = {}\n precipitation_dict[\"date\"] = row.date\n precipitation_dict[\"tobs\"] = row.tobs\n precipitation.append(precipitation_dict)\n \n return jsonify(precipitation)", "def all_mice_rasters(experiment, obs_period=None, write_days=True, as_only=False):\n for g in experiment.groups:\n for m in g.mice:\n mouse_raster(experiment, obs_period, mouse_label=m.label, write_days=write_days, as_only=as_only)", "def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk", "def events_in_month(request, year, month):\n month = datetime(year=year, month=month, day=1)\n next_month = month + timedelta(months=1)\n month_events = Event.objects.filter(date__gte=month, date__lte=next_month).order_by('date')\n return render_short(request, 'adhoc_calendar/events.html', context)", "def filter_dates(data, date_file, offset):\n\n if not date_file:\n data_filtered = data\n date_metadata = None\n size_filtered = False\n size_excluded = False\n else:\n date_list, date_metadata = gio.read_dates(date_file)\n if offset:\n date_list = date_offset(date_list, offset)\n \n matching_date_indexes = nio.match_dates(date_list, data.getTime().asComponentTime(), invert_matching=False, return_indexes=True)\n\n data_filtered = nio.temporal_extract(data, matching_date_indexes, indexes=True, tvar_out=False)\n\n size_filtered = len(matching_date_indexes)\n\n return data_filtered, date_metadata, size_filtered", "def get_year_month_range(year, month, quantity):\n yield year, month\n for _ in range(quantity - 1):\n year, month = increment_year_month(year, month)\n yield year, month", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def get_series(self, page=0, filters=''):", "def filter(request, year=None, month=None):\n\n # Arguments used when searching for entries.\n kwargs = {}\n\n errors = []\n\n # Update kwargs with appropriate search kwargs.\n if year and month:\n kwargs.update({'created__year': year, 'created__month': month})\n elif year:\n kwargs.update({'created__year': year})\n elif month:\n errors.append('A Year is required to make a query search.')\n else:\n errors.append('A Year or Year/Month combination is needed.')\n\n search_str = 'You searched for Year: %s - Month: %s.' % (year, month)\n data = {'blog_info': get_blog_info(), 'errors': errors, 'mon_sel': month,\n 'yr_sel': year, 'action_str': search_str}\n\n if not errors:\n entries = BlogEntry.objects.filter(**kwargs)\n data.update({'entries': paginate_objects(request, entries)})\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))", "def custom_filter(image: Image) -> Image:\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image", "def get_filtered_acc_gas(database_year, start_year, end_year):\n columns = [\n \"year\",\n \"month\",\n *ACC_COMPONENTS_GAS,\n ]\n columns_str = \", \".join(columns)\n sql_str = f\"\"\" \n SELECT * \n FROM acc_gas\n WHERE year >= {start_year}\n AND year <= {end_year}\n \"\"\"\n con = get_db_connection(database_year=database_year)\n return pd.read_sql(sql_str, con=con)", "def filter_image(image: Image, image_filter: str) -> Image:\n if image_filter == 'max':\n image = image.filter(ImageFilter.MaxFilter())\n elif image_filter == 'min':\n image = image.filter(ImageFilter.MinFilter())\n elif image_filter == 'median':\n image = image.filter(ImageFilter.MedianFilter())\n elif image_filter == 'custom-max':\n image = custom_filter(image)\n image = image.filter(ImageFilter.MaxFilter())\n elif image_filter == 'custom-min':\n image = custom_filter(image)\n image = image.filter(ImageFilter.MinFilter())\n elif image_filter == 'custom-median':\n image = custom_filter(image)\n image = image.filter(ImageFilter.MedianFilter())\n\n return image", "def test_request_wmts_rest_date_from_year_layer(self):\n ref_hash = '7c7fcdfaea0faf91afdd690eb7fe4dea'\n req_url = r'http://localhost/reproject/test/wmts/test_weekly_jpg/default/2012-02-22/GoogleMapsCompatible_Level3/0/0/0.jpeg'\n if DEBUG:\n print('\\nTesting: Request tile with date from \"year\" layer via WMTS (REST)')\n print('URL: ' + req_url)\n check_result = check_tile_request(req_url, ref_hash)\n self.assertTrue(check_result, 'WMTS (REST) date request from \"year\" layer does not match what\\'s expected. URL: ' + req_url)", "def top5_countries_by_date(day: int, month: int, year: int = 2020) -> List[str]:\r\n\r\n # Your code goes here (remove pass)\r\n data=\"%d/%d/20\"%(month,day)", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def filter(self, img: np.ndarray) -> np.ndarray:\n raise NotImplemented", "def GEEmacaGCMs(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,models,\n username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n MACA = (ee.ImageCollection('IDAHO_EPSCOR/MACAv2_METDATA_MONTHLY')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n yearsEE = ee.List(years)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax'),\n (met == 'huss'),(met == 'rsds'),\n (met == 'was')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif (timeStep == 'month'):\n \n img_col = MACA.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)", "def filter_dataframe(df, start_date_dt, end_date_dt):\n\n dff = df \n # df[\n # (df[\"timestamp\"].dt.date >= dt.date(start_date_dt.year, start_date_dt.month, start_date_dt.day))\n # & (df[\"timestamp\"].dt.date <= dt.date(end_date_dt.year, end_date_dt.month, end_date_dt.day))\n # ]\n # if (lat_min != -90) or (lat_max != 90):\n # dff = dff[\n # (dff[\"lat\"] >= lat_min)\n # & (dff[\"lat\"] <= lat_max)\n # ]\n # if (lon_min != -90) or (lon_max != 90):\n # dff = dff[\n # (dff[\"lon\"] >= lon_min)\n # & (dff[\"lon\"] <= lon_max)\n # ]\n\n return dff", "def get_this_year_stat(cls):\n year = dt.date.today().year\n stat = []\n for item in Item.objects.all():\n duration_sum = sum(cls.objects.filter(item=item).filter(\n time__year=year).values_list('duration', flat=True))\n # drop item whoes duration_sum is 0;\n if duration_sum > 0:\n stat.append({\n 'name': item.name,\n 'sum': duration_sum,\n })\n return stat", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def time_search_year(year):\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM entries\n WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}\n ORDER BY entries.published DESC\n \"\"\".format(year=int(year)))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def date_range(all_files,start_year,start_month,start_day,end_year,end_month,\r\n end_day):\r\n\r\n d1 = date(start_year,start_month,start_day)\r\n d_last = date(end_year,end_month,end_day)\r\n day_range = (d_last - d1).days\r\n #print('day range: %s' %day_range)\r\n files = []\r\n for t in range(day_range):\r\n d2 = d1 + timedelta(t)\r\n d2_str1 = str(d2)\r\n d2_str2 = d2.strftime('%Y_%m_%d')\r\n # print(d2)\r\n for f in all_files:\r\n if d2_str1 in str(f) or d2_str2 in str(f):\r\n files.append(f)\r\n return(files)", "def year_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_DEMAND[y]", "def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]", "def north_america_countries():\r\n north_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in north_america:\r\n north_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in north_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def filtering(self):\n from numpy import fft\n import numpy as np\n\n _image_dft = fft.fft2(self.image)\n _image_dft = fft.fftshift(_image_dft)\n # dft = DFT.DFT()\n # plt.figure(1) \n # plt.imshow(self.image)\n # plt.figure(2)\n # plt.imshow(20*np.log10(abs(_image_dft))) \n # print(_image_dft)\n # print(abs(_image_dft))\n # plt.show()\n filter = self.filter(self.image.shape, self.cutoff, self.order) \\\n if self.filter_name.startswith('butterworth') \\\n else self.filter(self.image.shape, self.cutoff)\n \n _image_dft_filtered = _image_dft * filter\n _image_filtered = abs(fft.ifft2(_image_dft_filtered))\n \n return [ self.post_process_image(_image_filtered), \\\n self.post_process_image(20*np.log10(abs(_image_dft)+.00001)), \\\n self.post_process_image(20*np.log10(abs(_image_dft_filtered)+.00001)) ]", "def climatology_monthly(da, climatology_slice=None, time_dim='time'):\n if climatology_slice is None:\n clim = da.groupby(time_dim+'.month').mean()\n else:\n clim = da.sel({time_dim: climatology_slice}).groupby(time_dim+'.month').mean()\n return clim", "def filter_by_age(files, comparator):\n today = date.today()\n results = []\n for f in files:\n m = DATE_REGEX.search(f)\n if not m:\n continue\n f_date = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))\n\n if comparator(today - f_date):\n results.append((f_date, f))\n\n results.sort()\n return [f for _, f in results]", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))" ]
[ "0.6298117", "0.5903462", "0.58848464", "0.5747459", "0.5735303", "0.5665092", "0.54974794", "0.5486667", "0.5385302", "0.5352185", "0.5291872", "0.5263054", "0.5232077", "0.52078956", "0.519673", "0.51886106", "0.5142435", "0.510914", "0.50958353", "0.50748765", "0.50723386", "0.5071877", "0.50454485", "0.5031448", "0.5014739", "0.50142187", "0.499967", "0.49894515", "0.49519676", "0.4909264", "0.49089798", "0.49059397", "0.48918086", "0.48671773", "0.48620108", "0.48618558", "0.48606482", "0.4856698", "0.48394525", "0.48308516", "0.48253307", "0.4820293", "0.48190352", "0.48131806", "0.48112035", "0.4808181", "0.47963223", "0.47948572", "0.47947338", "0.47937292", "0.47909242", "0.4780387", "0.47648203", "0.47601014", "0.47489807", "0.47400823", "0.47399095", "0.47291404", "0.47260064", "0.47180277", "0.47163525", "0.47156885", "0.4714704", "0.4713346", "0.47105196", "0.47082147", "0.47023258", "0.4700683", "0.4697939", "0.46978775", "0.46786663", "0.4668119", "0.4663913", "0.46514586", "0.4646702", "0.46445838", "0.4642481", "0.46418712", "0.46411777", "0.46400437", "0.46343702", "0.46322134", "0.462881", "0.46283105", "0.46204987", "0.46189943", "0.4614495", "0.46073183", "0.46024162", "0.45984292", "0.45959884", "0.4591706", "0.45876577", "0.45830113", "0.45820218", "0.4577851", "0.45751303", "0.45748", "0.4572979", "0.45680535" ]
0.77574176
0
Zonal statistics with rasters as input and rasters and lists as output
def zonalStatsToRaster(image,zonesImage,geometry,maxPixels,reducerType): # reducertype can be mean, max, sum, first. Count is always included for QA # the resolution of the zonesimage is used for scale reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mean"),ee.Reducer.mean(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"max"),ee.Reducer.max(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"sum"),ee.Reducer.sum(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"first"),ee.Reducer.first(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mode"),ee.Reducer.mode(),"error")))) ) reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName="zones") scale = zonesImage.projection().nominalScale().getInfo() zonesImage = zonesImage.select(zonesImage.bandNames(),["zones"]) totalImage = ee.Image(image).addBands(zonesImage) resultsList = ee.List(totalImage.reduceRegion( geometry= geometry, reducer= reducer, scale= scale, maxPixels=maxPixels ).get("groups")) resultsList = resultsList.map(ensure_default_properties); zoneList = mapList(resultsList, 'zones'); countList = mapList(resultsList, 'count'); valueList = mapList(resultsList, reducerType); valueImage = zonesImage.remap(zoneList, valueList).select(["remapped"],[reducerType]) countImage = zonesImage.remap(zoneList, countList).select(["remapped"],["count"]) newImage = zonesImage.addBands(countImage).addBands(valueImage) return newImage,zoneList,valueList,countList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zonal_stats(src_poly, src_raster, operator=['mean'], features=None):\n assert src_raster.geo_transform is not None, \"src_raster.geo_transform should not be None\"\n assert isinstance(operator, list), \"operator should be a list of string. ex: ['mean']\"\n features = list(range(src_raster.bands)) if features is None else features\n assert len(features) == src_raster.bands, \"length of features should equals number of bands of the raster\"\n df_shp = src_poly.copy()\n df_shp['poly_idx'] = list(range(len(df_shp)))\n df_shp['poly_idx'] = df_shp['poly_idx'].astype('float')\n poly_rst = tgp.ShapeGrid.rasterize_layer(df_shp, src_raster.rows, src_raster.cols, src_raster.geo_transform, 'poly_idx', all_touched=True, no_data_value=np.nan)\n X_combine = np.concatenate([poly_rst.data, src_raster.data], axis=-1)\n X_combine_df = pd.DataFrame(X_combine.reshape(-1, src_raster.bands))\n X_groupby = X_combine_df.groupby(0, as_index=False)\n for op in operator:\n columns = {0:'poly_idx'}\n for f_idx, f in enumerate(features):\n columns[f_idx+1] = f'zonal_{op}_{f}'\n if op == 'mean':\n df_shp = df_shp.merge(X_groupby.mean().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'max':\n df_shp = df_shp.merge(X_groupby.max().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'min':\n df_shp = df_shp.merge(X_groupby.min().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'median':\n df_shp = df_shp.merge(X_groupby.median().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'sum':\n df_shp = df_shp.merge(X_groupby.sum().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'std':\n df_shp = df_shp.merge(X_groupby.std().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'count':\n df_shp = df_shp.merge(X_groupby.count().rename(columns=columns), on='poly_idx', how='left')\n else:\n assert False, \"no this operator\"\n return df_shp", "def zonal_statistics(wrksppath, timestamp, region, model):\n logging.info('\\nDoing Zonal Statistics on ' + region)\n # Define app workspace and sub-paths\n resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')\n shp_path = os.path.join(wrksppath, region, 'shapefiles', 'ffgs_' + region + '.shp')\n\n stat_file = os.path.join(wrksppath, region, model + 'results.csv')\n\n # check that there are resampled tiffs to do zonal statistics on\n if not os.path.exists(resampleds):\n logging.info('There are no resampled tiffs to do zonal statistics on. Skipping Zonal Statistics')\n return\n\n # List all Resampled GeoTIFFs\n files = os.listdir(resampleds)\n files = [tif for tif in files if tif.endswith('.tif')]\n files.sort()\n\n # do zonal statistics for each resampled tiff file and put it in the stats dataframe\n stats_df = pd.DataFrame()\n for i in range(len(files)):\n logging.info('starting zonal statistics for ' + files[i])\n ras_path = os.path.join(resampleds, files[i])\n stats = rasterstats.zonal_stats(\n shp_path,\n ras_path,\n stats=['count', 'max', 'mean'],\n geojson_out=True\n )\n\n timestep = files[i][:10]\n\n # for each stat that you get out, write it to the dataframe\n logging.info('writing the statistics for this file to the dataframe')\n for j in range(len(stats)):\n\n temp_data = stats[j]['properties']\n temp_data.update({'Forecast Timestamp': timestamp})\n temp_data.update({'Timestep': timestep})\n\n temp_df = pd.DataFrame([temp_data])\n stats_df = stats_df.append(temp_df, ignore_index=True)\n\n # write the resulting dataframe to a csv\n logging.info('\\ndone with zonal statistics, rounding values, writing to a csv file')\n stats_df = stats_df.round({'max': 1, 'mean': 1})\n stats_df.to_csv(stat_file, index=False)\n\n # delete the resampled tiffs now that we dont need them\n logging.info('deleting the resampled tiffs directory')\n shutil.rmtree(resampleds)\n\n return", "def ZonalStatsRasterArray(zonegeodf, rasterarr, transaffine, stats, nodatavalue=0):\n zonaloutput = zonal_stats(vectors=zonegeodf.geometry, raster=rasterarr, nodata=nodatavalue, affine=transaffine, stats=stats, all_touched=True)\n indexname = 'index' if zonegeodf.index.name is None else zonegeodf.index.name\n zonegeodf.reset_index(inplace=True)\n output = zonegeodf.join(pd.DataFrame(zonaloutput))\n output.set_index(indexname, inplace=True)\n return output", "def zonal_stats(in_path, raster, grid_id_name='GRIDMET_ID'):\n if not os.path.isfile(in_path):\n raise FileNotFoundError('Input summary CSV file given'+\\\n ' was invalid or not found')\n # look for fishnet created in 'in_path/spatial'\n path_root = os.path.split(in_path)[0]\n file_name = os.path.split(in_path)[1]\n # get variable names from input file prefix\n grid_var = file_name.split('_summ')[0]\n var_name = Path(raster).name.split('.')[0]\n # grid is in the \"spatial\" subdir of in_path\n grid_file = OPJ(path_root, 'spatial', 'grid.shp')\n # save zonal stats to summary CSV in same dir as raster as of version 0.3\n raster_root = os.path.split(raster)[0]\n out_file = OPJ(raster_root, 'zonal_stats.csv')\n\n # this error would only occur when using within Python \n if not os.path.isfile(grid_file):\n raise FileNotFoundError(\n os.path.abspath(grid_file),\n '\\ndoes not exist, create it using spatial.make_grid first'\n )\n print(\n 'Calculating', grid_var, 'zonal means for', var_name\n )\n\n # calc zonal stats and open for grid IDs\n with fiona.open(grid_file, 'r') as source:\n zs = zstats(source, raster, all_touched=True)\n grid_ids = [f['properties'].get(grid_id_name) for f in source]\n\n # get just mean values, zonal_stats can do other stats...\n means = [z['mean'] for z in zs]\n out_df = pd.DataFrame(\n data={\n grid_id_name: grid_ids, \n var_name: means\n }\n )\n out_df[grid_id_name] = out_df[grid_id_name].astype(int)\n # drop rows for cells outside of gridMET master grid\n out_df = out_df.drop(out_df[out_df[grid_id_name] == -999].index)\n\n # save or update existing csv file\n if not os.path.isfile(out_file):\n print(\n os.path.abspath(out_file),\n '\\ndoes not exist, creating file'\n )\n out_df.to_csv(out_file, index=False)\n else:\n # overwrite column values if exists, else append\n existing_df = pd.read_csv(out_file)\n existing_df[grid_id_name] = existing_df[grid_id_name].astype(int)\n if var_name in existing_df.columns:\n # may throw error if not same size as original grid\n try:\n existing_df.update(out_df)\n existing_df.to_csv(out_file, index=False) \n except:\n print('Zonal stats for this variable already exist but they',\n 'appear to have been calculated with a different grid',\n 'overwriting existing file at:\\n',\n os.path.abspath(out_file)\n )\n out_df.to_csv(out_file, index=False)\n else:\n existing_df = existing_df.merge(out_df, on=grid_id_name)\n #existing_df = pd.concat([existing_df, out_df], axis=1).drop_duplicates()\n existing_df.to_csv(out_file, index=False)", "def zonalStatsToFeatureCollection(image,zonesImage,geometry,maxPixels,reducerType):\n # reducertype can be mean, max, sum, first. Count is always included for QA\n # the resolution of the zonesimage is used for scale\n\n reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mean\"),ee.Reducer.mean(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"max\"),ee.Reducer.max(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"sum\"),ee.Reducer.sum(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"first\"),ee.Reducer.first(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mode\"),ee.Reducer.mode(),\"error\"))))\n )\n reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName=\"zones\") \n\n scale = zonesImage.projection().nominalScale().getInfo()\n zonesImage = zonesImage.select(zonesImage.bandNames(),[\"zones\"])\n\n totalImage = ee.Image(image).addBands(zonesImage)\n resultsList = ee.List(totalImage.reduceRegion(\n geometry= geometry, \n reducer= reducer,\n scale= scale,\n maxPixels=maxPixels\n ).get(\"groups\"))\n\n resultsList = resultsList.map(ensure_default_properties); \n fc = ee.FeatureCollection(resultsList.map(dict_to_feature))\n\n return fc", "def gen_zonal_stats(\n vectors, raster,\n layer=0,\n band=1,\n nodata=None,\n affine=None,\n stats=None,\n all_touched=True,\n percent_cover_selection=None,\n percent_cover_weighting=True,\n percent_cover_scale=20,\n categorical=False,\n category_map=None,\n add_stats=None,\n zone_func=None,\n raster_out=False,\n prefix=None,\n geojson_out=False, **kwargs):\n stats, run_count = check_stats(stats, categorical)\n\n # check inputs related to percent coverage\n percent_cover = False\n if percent_cover_weighting or percent_cover_selection is not None:\n percent_cover = True\n if percent_cover_scale is None:\n warnings.warn('No value for `percent_cover_scale` was given. '\n 'Using default value of 10.')\n percent_cover_scale = 10\n\n try:\n if percent_cover_scale != int(percent_cover_scale):\n warnings.warn('Value for `percent_cover_scale` given ({0}) '\n 'was converted to int ({1}) but does not '\n 'match original value'.format(\n percent_cover_scale, int(percent_cover_scale)))\n\n percent_cover_scale = int(percent_cover_scale)\n\n if percent_cover_scale <= 1:\n raise Exception('Value for `percent_cover_scale` must be '\n 'greater than one ({0})'.format(\n percent_cover_scale))\n\n except:\n raise Exception('Invalid value for `percent_cover_scale` '\n 'provided ({0}). Must be type int.'.format(\n percent_cover_scale))\n\n if percent_cover_selection is not None:\n try:\n percent_cover_selection = float(percent_cover_selection)\n except:\n raise Exception('Invalid value for `percent_cover_selection` '\n 'provided ({0}). Must be able to be converted '\n 'to a float.'.format(percent_cover_selection))\n\n # if not all_touched:\n # warnings.warn('`all_touched` was not enabled but an option requiring '\n # 'percent_cover calculations was selected. Automatically '\n # 'enabling `all_touched`.')\n # all_touched = True\n\n with Raster(raster, affine, nodata, band) as rast:\n features_iter = read_features(vectors, layer)\n for _, feat in enumerate(features_iter):\n geom = shape(feat['geometry'])\n\n if 'Point' in geom.type:\n geom = boxify_points(geom, rast)\n percent_cover = False\n\n geom_bounds = tuple(geom.bounds)\n fsrc = rast.read(bounds=geom_bounds)\n\n if percent_cover:\n cover_weights = rasterize_pctcover_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n scale=percent_cover_scale,\n all_touched=all_touched)\n rv_array = cover_weights > (percent_cover_selection or 0)\n else:\n rv_array = rasterize_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n all_touched=all_touched)\n\n # nodata mask\n isnodata = (fsrc.array == fsrc.nodata)\n\n # add nan mask (if necessary)\n if np.issubdtype(fsrc.array.dtype, float) and \\\n np.isnan(fsrc.array.min()):\n isnodata = (isnodata | np.isnan(fsrc.array))\n\n # Mask the source data array\n # mask everything that is not a valid value or not within our geom\n masked = np.ma.MaskedArray(\n fsrc.array,\n mask=(isnodata | ~rv_array))\n\n # execute zone_func on masked zone ndarray\n if zone_func is not None:\n if not callable(zone_func):\n raise TypeError(('zone_func must be a callable '\n 'which accepts function a '\n 'single `zone_array` arg.'))\n zone_func(masked)\n\n if masked.compressed().size == 0:\n # nothing here, fill with None and move on\n feature_stats = dict([(stat, None) for stat in stats])\n if 'count' in stats: # special case, zero makes sense here\n feature_stats['count'] = 0\n else:\n if run_count:\n keys, counts = np.unique(masked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n\n if categorical:\n feature_stats = dict(pixel_count)\n if category_map:\n feature_stats = remap_categories(category_map, feature_stats)\n else:\n feature_stats = {}\n\n if 'min' in stats:\n feature_stats['min'] = float(masked.min())\n if 'max' in stats:\n feature_stats['max'] = float(masked.max())\n if 'mean' in stats:\n if percent_cover:\n feature_stats['mean'] = float(\n np.sum(masked * cover_weights) /\n np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['mean'] = float(masked.mean())\n if 'count' in stats:\n if percent_cover:\n feature_stats['count'] = float(np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['count'] = int(masked.count())\n # optional\n if 'sum' in stats:\n if percent_cover:\n feature_stats['sum'] = float(np.sum(masked * cover_weights))\n else:\n feature_stats['sum'] = float(masked.sum())\n if 'std' in stats:\n feature_stats['std'] = float(masked.std())\n if 'median' in stats:\n feature_stats['median'] = float(np.median(masked.compressed()))\n if 'majority' in stats:\n feature_stats['majority'] = float(key_assoc_val(pixel_count, max))\n if 'minority' in stats:\n feature_stats['minority'] = float(key_assoc_val(pixel_count, min))\n if 'unique' in stats:\n feature_stats['unique'] = len(list(pixel_count.keys()))\n if 'range' in stats:\n try:\n rmin = feature_stats['min']\n except KeyError:\n rmin = float(masked.min())\n try:\n rmax = feature_stats['max']\n except KeyError:\n rmax = float(masked.max())\n feature_stats['range'] = rmax - rmin\n\n for pctile in [s for s in stats if s.startswith('percentile_')]:\n q = get_percentile(pctile)\n pctarr = masked.compressed()\n feature_stats[pctile] = np.percentile(pctarr, q)\n\n if 'nodata' in stats:\n featmasked = np.ma.MaskedArray(fsrc.array, mask=np.logical_not(rv_array))\n feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())\n\n if add_stats is not None:\n for stat_name, stat_func in add_stats.items():\n feature_stats[stat_name] = stat_func(masked)\n\n if raster_out:\n feature_stats['mini_raster_array'] = masked\n feature_stats['mini_raster_affine'] = fsrc.affine\n feature_stats['mini_raster_nodata'] = fsrc.nodata\n\n if prefix is not None:\n prefixed_feature_stats = {}\n for key, val in feature_stats.items():\n newkey = \"{}{}\".format(prefix, key)\n prefixed_feature_stats[newkey] = val\n feature_stats = prefixed_feature_stats\n\n if geojson_out:\n for key, val in feature_stats.items():\n if 'properties' not in feat:\n feat['properties'] = {}\n feat['properties'][key] = val\n yield feat\n else:\n yield feature_stats", "def zonal_stats(self, gdf, stats, all_touched=False):\n _ST = [\"count\", \"min\", \"max\", \"sum\", \"mean\", \"std\", \"median\"]\n\n def rmd(ds, stat):\n return {var: f\"{var}_{stat}\" for var in ds.raster.vars}\n\n def gen_zonal_stat(ds, geoms, stats, all_touched=False):\n dims = (ds.raster.y_dim, ds.raster.x_dim)\n for i, geom in enumerate(geoms):\n # add buffer to work with point geometries\n ds1 = ds.raster.clip_bbox(geom.bounds, buffer=2).raster.mask_nodata()\n if np.any(np.asarray(ds1.raster.shape) < 2):\n continue\n mask = full(ds1.raster.coords, nodata=0, dtype=np.uint8)\n features.rasterize(\n [(geom, 1)],\n out_shape=mask.raster.shape,\n fill=0,\n transform=mask.raster.transform,\n out=mask.data,\n all_touched=all_touched,\n )\n ds1 = ds1.where(mask == 1)\n dss = []\n for stat in stats:\n if stat in _ST:\n ds1_stat = getattr(ds1, stat)(dims)\n dss.append(ds1_stat.rename(rmd(ds1, stat)))\n elif isinstance(stat, str) and stat.startswith(\"q\"):\n qs = np.array([float(q) for q in stat.strip(\"q\").split(\",\")])\n dss.append(\n ds1.quantile(qs / 100, dims).rename(rmd(ds1, \"quantile\"))\n )\n elif callable(stat):\n dss.append(\n ds1.reduce(stat, dims).rename(rmd(ds1, stat.__name__))\n )\n else:\n raise ValueError(f\"Stat {stat} not valid.\")\n yield xr.merge(dss), i\n\n if isinstance(stats, str):\n stats = stats.split()\n elif callable(stats):\n stats = list([stats])\n\n if gdf.crs is not None and self.crs is not None and gdf.crs != self.crs:\n gdf = gdf.to_crs(self.crs)\n geoms = gdf[\"geometry\"].values\n\n ds = self._obj.copy()\n if isinstance(ds, xr.DataArray):\n if ds.name is None:\n ds.name = \"values\"\n ds = ds.to_dataset()\n\n out = list(gen_zonal_stat(ds, geoms, stats, all_touched))\n if len(out) == 0:\n raise IndexError(\"All geometries outside raster domain\")\n\n dss, idx = zip(*out)\n ds_out = xr.concat(dss, \"index\")\n ds_out[\"index\"] = xr.IndexVariable(\"index\", gdf.index.values[np.array(idx)])\n\n return ds_out", "def test_rasters_and_arrays(self):\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A1 = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)\n lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == 105.25\n assert A1[0, 0] == linear_function(lon[0], lat[4])\n\n # Lower left corner\n assert A1[4, 0] == 103.25\n assert A1[4, 0] == linear_function(lon[0], lat[0])\n\n # Upper right corner\n assert A1[0, 7] == 112.25\n assert A1[0, 7] == linear_function(lon[7], lat[4])\n\n # Lower right corner\n assert A1[4, 7] == 110.25\n assert A1[4, 7] == linear_function(lon[7], lat[0])\n\n # Generate raster object and write\n projection = ('GEOGCS[\"WGS 84\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS 84\",6378137,298.2572235630016,'\n 'AUTHORITY[\"EPSG\",\"7030\"]],'\n 'AUTHORITY[\"EPSG\",\"6326\"]],'\n 'PRIMEM[\"Greenwich\",0],'\n 'UNIT[\"degree\",0.0174532925199433],'\n 'AUTHORITY[\"EPSG\",\"4326\"]]')\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n msg = 'Longitudes not as expected: %s' % str(longitudes)\n assert numpy.allclose(longitudes, [100.5, 101.5, 102.5, 103.5, 104.5,\n 105.5, 106.5, 107.5]), msg\n\n msg = 'Latitudes not as expected: %s' % str(latitudes)\n assert numpy.allclose(latitudes, [5.5, 6.5, 7.5, 8.5, 9.5]), msg\n\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, geotransform,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster object')\n assert numlat == R1.rows, msg\n assert numlon == R1.columns, msg\n\n # Write back to new (tif) file\n out_filename = unique_filename(suffix='.tif')\n R1.write_to_file(out_filename)\n assert R1.filename == out_filename\n\n # Check nodata in original layer\n assert numpy.isnan(R1.get_nodata_value())\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n # Check nodata in read layer\n assert numpy.isnan(R2.get_nodata_value())\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, numlat, numlon))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert numlat == R2.rows, msg\n assert numlon == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = 'Array values of written raster array were not as expected'\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n # Exercise projection __eq__ method\n assert R1.projection == R2.projection\n\n # Check that equality raises exception when type is wrong\n try:\n R1.projection == 234\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n # Check keywords\n assert R1.keywords == R2.keywords\n\n # Check override of ==\n assert R1 == R2", "def output_rasters(self, arr, outdir, outname):\n\n outpath = os.path.join(outdir, outname)\n print('the outpath for file {} is {}'.format(outname, outpath))\n\n # get the geoinfo from sample tiff to output intermediate files\n ds = rasterio.open(self.geoproperties_file)\n band1 = arr\n with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # TODO - Set an AWS Cloud flag in the config_dict file to activate this function or not...\n # delete files created locally and put in bucket\n # PathManager.s3_delete_local(from_file, bucket, prefix_no_slash)", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]", "def get_z_ranges(self):\n\n summary = self.get_rasters_summary()\n\n # Convert to dict in format:\n # { 'stat' : { 'z': (min, max), ... } ... }\n\n ranges = summary.groupby(['stat', 'z'], as_index=False)\n ranges = ranges.agg({'min': 'min', 'max': 'max'})\n ranges['vals'] = ranges.apply(\n lambda row: {\n row['z']: (row['min'], row['max'])\n }, axis=1)\n ranges = ranges.groupby('stat')['vals'].apply(\n lambda group: group.values)\n ranges = ranges.apply(\n lambda group: {\n int(k): v for d in group for k,\n v in d.items()})\n\n return ranges.to_dict()", "def _sum_n_rasters(\n raster_path_list, target_raster_path):\n LOGGER.info('Summing %s rasters to %s', len(raster_path_list),\n target_raster_path)\n LOGGER.debug('Attempting to open %s', raster_path_list[0])\n pygeoprocessing.new_raster_from_base(\n raster_path_list[0], target_raster_path, gdal.GDT_Float32,\n [NODATA_FLOAT32_MIN])\n\n target_raster = gdal.OpenEx(\n target_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n target_band = target_raster.GetRasterBand(1)\n\n n_pixels_to_process = (\n (target_raster.RasterXSize * target_raster.RasterYSize) *\n len(raster_path_list))\n n_pixels_processed = 0\n last_log_time = time.time()\n\n raster_tuple_list = []\n for raster_path in raster_path_list:\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n raster_tuple_list.append((raster, band, nodata))\n\n for block_info in pygeoprocessing.iterblocks(\n (raster_path_list[0], 1), offset_only=True):\n\n sum_array = numpy.empty(\n (block_info['win_ysize'], block_info['win_xsize']),\n dtype=numpy.float32)\n sum_array[:] = 0.0\n\n # Assume everything is valid until proven otherwise\n pixels_touched = numpy.zeros(sum_array.shape, dtype=bool)\n for (_, band, nodata) in raster_tuple_list:\n if time.time() - last_log_time >= 5.0:\n percent_complete = round(\n n_pixels_processed / n_pixels_to_process, 4)*100\n LOGGER.info(f'Summation {percent_complete:.2f}% complete')\n last_log_time = time.time()\n\n array = band.ReadAsArray(**block_info)\n valid_pixels = slice(None)\n if nodata is not None:\n valid_pixels = ~utils.array_equals_nodata(array, nodata)\n\n sum_array[valid_pixels] += array[valid_pixels]\n pixels_touched[valid_pixels] = 1\n n_pixels_processed += sum_array.size # for logging\n\n sum_array[~pixels_touched] = NODATA_FLOAT32_MIN\n\n target_band.WriteArray(\n sum_array, block_info['xoff'], block_info['yoff'])\n\n LOGGER.info('Summation 100.00% complete')\n raster_tuple_list = None\n\n target_band.ComputeStatistics(0)\n target_band = None\n target_raster = None", "def zonal_stats_workflow():\n save_as = \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/summary/monthly_quickflow.csv\"\n scenario_dict = {\n 'pre-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/pre_decline\",\n 'post-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/post_decline\",\n }\n df_list = []\n for scenario in scenario_dict.iterkeys():\n results_dict = {\n 'scenario': [],\n 'month': [],\n 'sum_quickflow': [],\n }\n folder = scenario_dict[scenario]\n aoi_shp = os.path.join(folder, 'aggregated_results.shp')\n for month in xrange(1, 13):\n qf_raster = os.path.join(\n folder, 'intermediate_outputs', 'qf_{}.tif'.format(month))\n zonal_stats = pygeoprocessing.zonal_statistics(\n (qf_raster, 1), aoi_shp)\n sum_QF = zonal_stats[0]['sum']\n results_dict['scenario'].append(scenario)\n results_dict['month'].append(month)\n results_dict['sum_quickflow'].append(sum_QF)\n results_df = pandas.DataFrame(data=results_dict)\n df_list.append(results_df)\n combined_list = pandas.concat(df_list)\n combined_list.to_csv(save_as, index=False)", "def zonal_grid_statistics(stats, zones, categories=None, grids=None,\n aspect=None, shortnames=True):\n # Check inputs\n zones = _validation.input_file(zones, 'grid', False)\n\n if not (stats.endswith('.txt') or stats.endswith('.csv')):\n stats += '.csv'\n\n if categories is None:\n category_list = 'NULL'\n elif type(categories) is str:\n categories = [_validation.input_file(categories, 'grid', False)]\n category_list = categories[0]\n elif type(categories) in (list, tuple):\n categories = _validation.input_file(categories, 'grid', False)\n category_list = ';'.join(categories)\n else:\n raise TypeError('Wrong argument type to categories!')\n\n if grids is None:\n grids_list = 'NULL'\n elif type(grids) is str:\n grids = [_validation.input_file(grids, 'grid', False)]\n grids_list = grids[0]\n elif type(grids) in (list, tuple):\n grids = _validation.input_file(grids, 'grid', False)\n grids_list = ';'.join(grids)\n else:\n raise TypeError('Wrong argument type to grids!')\n\n if aspect is None:\n aspect = 'NULL'\n elif type(aspect) is str:\n aspect = _validation.input_file(zones, 'grid', False)\n else:\n raise TypeError('Wrong argument type to grids!')\n\n # Check inputs\n shortnames = str(int(shortnames))\n # Create cmd\n cmd = ['saga_cmd', '-f=q', 'statistics_grid', '5', '-ZONES', zones,\n '-CATLIST', category_list, '-STATLIST', grids_list, '-ASPECT',\n aspect, '-OUTTAB', stats, '-SHORTNAMES', shortnames]\n # Run command\n flag = _env.run_command_logged(cmd)\n if not flag:\n raise EnvironmentError(_ERROR_TEXT.format(_sys._getframe().f_code.co_name, _env.errlog))", "def zonal_statistics(self, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n process_id = 'zonal_statistics'\n args = {\n 'imagery': self.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n return self.graph_add_process(process_id, args)", "def exact_zonalstats(self, ras_path, vec_path, fid, col, stats, output_csv):\n cmd = \"exactextract --raster grid:%s --polygons %s --fid %s --stat %s=%s\\(grid\\) --output %s\" %(ras_path, vec_path, fid, col, stats, output_csv)\n # Apply zonalstatistics\n os.system(cmd)", "def comparing_urban_zonal_stats(self, zonal_path = '../data/zonal/', fid = 'uid', stats = 'sum', gpd_ls = ['gpw', 'ghs_pop', 'worldpop'], \n schema = 'urban_pop', table = 'global_grid'):\n \n # Create folder if does not already exist\n if not os.path.exists(zonal_path): \n os.makedirs(zonal_path)\n \n for iso in self.country_iso3:\n \n # Define name of temp shp\n file_name = 'temp.gpkg'\n # And file path\n file_path = '../data/gpkg/'\n # Define full path \n vec_path = ''.join([file_path + file_name])\n \n if os.path.exists(vec_path):\n os.remove(vec_path)\n \n # Join schema and table together\n layer = '.'.join([schema, table])\n # Define sql statement to extract from table e.g. urban_pop.global_grid \n sql = \"SELECT * FROM %s WHERE gid_0 LIKE '%s'\" %(layer, iso)\n # Define column name of output zonal stats\n\n # Define db connection class \n db_conn = postgres_conn(section = 'postgresql', config_path = '../src/config/', config_file = 'database.ini', country_iso3 = iso)\n # Get vector geometries from postgres and store as temp shp\n #db_conn.psql_to_shp(file_name, file_path, schema, table, sql)\n db_conn.psql_to_gpkg(file_name, file_path, schema, table, sql)\n \n # Define full vector path including layer name\n vec_path = vec_path + '[gridded]'\n \n for gpd in gpd_ls:\n \n col = gpd + '_' + stats\n output_path = '../data/zonal/' + iso + '_' + gpd + '.csv'\n\n if 'gpw' == gpd:\n \n # Define input raster path\n ras_path = '../data/gpw/cropped/gpw_' + iso + '.tif'\n \n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)\n \n # Apply zonal statistics if db is ghs_pop\n elif 'ghs_pop' == gpd:\n \n # Define input raster path\n ras_path = '../data/ghs_pop/cropped/ghs_pop_' + iso + '.tif'\n\n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)\n \n # Apply zonal statistics if db is worldpop\n elif 'worldpop' == gpd:\n \n # Define input raster path\n ras_path = '../data/worldpop/MOSAIC_ppp_prj_2015/ppp_prj_2015_' + iso + '.tif'\n\n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def useZstat(zstat, file_path_name_save, file_path_conte, file_path_name_resting_atlas):\n\n import matplotlib.pyplot as plt\n import os\n from glob import glob\n import numpy as np\n import nibabel as nb\n import nibabel.gifti as gifti\n\n # Crucial: xvfb must be imported and started before importing mayavi\n from xvfbwrapper import Xvfb\n print('XVb pre')\n vdisplay = Xvfb()\n vdisplay.start()\n\n print('pre maya')\n # Crashes on this line if run with plain python (not xvfb-run ... python) and if xvfbwrapper is after it.\n from mayavi import mlab\n print('post maya')\n from tvtk.api import tvtk\n print('post tvtk')\n import math\n\n print('display')\n mlab.options.offscreen = True #offscreen window for rendering\n\n img = nb.load(file_path_name_resting_atlas)\n #img = nb.load('/Users/MathiasMacbook/Desktop/rfMRI_REST1_LR_Atlas.dtseries.nii')\n mim = img.header.matrix.mims[1]\n #for idx, bm in enumerate(mim.brainModels):\n # print((idx, bm.indexOffset, bm.brainStructure))\n bm1 = mim.brainModels[0]\n lidx = bm1.vertexIndices.indices\n bm2 = mim.brainModels[1]\n ridx = bm1.surfaceNumberOfVertices + bm2.vertexIndices.indices\n bidx = np.concatenate((lidx, ridx))\n\n axis = [0, 0, 1]\n theta = np.pi\n\n inflated = True\n split_brain = True\n\n surf = gifti.read(file_path_conte + '/Conte69.L.midthickness.32k_fs_LR.surf.gii') \n verts_L_data = surf.darrays[0].data\n faces_L_data = surf.darrays[1].data\n\n surf = gifti.read(file_path_conte + '/Conte69.R.midthickness.32k_fs_LR.surf.gii') \n verts_R_data = surf.darrays[0].data\n faces_R_data = surf.darrays[1].data\n\n if inflated:\n surf = gifti.read(file_path_conte + '/Conte69.L.inflated.32k_fs_LR.surf.gii')\n verts_L_display = surf.darrays[0].data\n faces_L_display = surf.darrays[1].data\n surf = gifti.read(file_path_conte + '/Conte69.R.inflated.32k_fs_LR.surf.gii')\n verts_R_display = surf.darrays[0].data\n faces_R_display = surf.darrays[1].data\n else:\n verts_L_display = verts_L_data.copy()\n verts_R_display = verts_R_data.copy()\n faces_L_display = faces_L_data.copy()\n faces_R_display = faces_R_data.copy()\n\n verts_L_display[:, 0] -= max(verts_L_display[:, 0])\n verts_R_display[:, 0] -= min(verts_R_display[:, 0])\n verts_L_display[:, 1] -= (max(verts_L_display[:, 1]) + 1)\n verts_R_display[:, 1] -= (max(verts_R_display[:, 1]) + 1)\n\n faces = np.vstack((faces_L_display, verts_L_display.shape[0] + faces_R_display))\n\n if split_brain:\n verts2 = rotation_matrix(axis, theta).dot(verts_R_display.T).T\n else:\n verts_L_display[:, 1] -= np.mean(verts_L_display[:, 1])\n verts_R_display[:, 1] -= np.mean(verts_R_display[:, 1])\n verts2 = verts_R_display\n\n verts_rot = np.vstack((verts_L_display, verts2))\n verts = np.vstack((verts_L_data, verts_R_data))\n #print verts.shape\n #print faces.shape\n\n if not os.path.exists(os.path.split(file_path_name_save)[0]):\n os.makedirs(os.path.split(file_path_name_save)[0]) \n\n print('use zstat')\n img = nb.load(zstat)\n print('loaded img')\n \n threshold = 2.3 # 1000, lower limit\n display_threshold = 6 #8000, upper limit\n\n data = img.get_data()\n aff = img.affine\n indices = np.round((np.linalg.pinv(aff).dot(np.hstack((verts, \n np.ones((verts.shape[0], 1)))).T))[:3, :].T).astype(int)\n scalars2 = data[indices[:, 0], indices[:, 1], indices[:, 2]]\n scalars2[np.abs(scalars2) < threshold] = 0.\n scalars = np.zeros(verts.shape[0])\n scalars[bidx] = scalars2[bidx]\n\n negative = positive = False\n if np.any(scalars < 0):\n negative = True\n if np.any(scalars > 0):\n positive = True\n\n nlabels = 2\n vmin = 0\n vmax = 0\n if negative and positive:\n maxval = max(-scalars.min(), scalars.max())\n if maxval > display_threshold:\n maxval = display_threshold\n vmin = -maxval\n vmax = maxval\n nlabels = 3\n vmin = -display_threshold ######\n vmax = display_threshold ######\n elif negative:\n vmin = scalars.min()\n if vmin < -display_threshold:\n vmin = -display_threshold\n vmax = 0\n vmin = -display_threshold ######\n elif positive:\n vmax = scalars.max()\n if vmax > display_threshold:\n vmax = display_threshold\n vmin = 0\n vmax = display_threshold ######\n #print zstat\n \n dual_split = True\n\n fig1 = mlab.figure(1, bgcolor=(0, 0, 0))\n mlab.clf()\n mesh = tvtk.PolyData(points=verts_rot, polys=faces)\n mesh.point_data.scalars = scalars\n mesh.point_data.scalars.name = 'scalars'\n surf = mlab.pipeline.surface(mesh, colormap='autumn', vmin=vmin, vmax=vmax)\n if dual_split:\n verts_rot_shifted = verts_rot.copy()\n verts_rot_shifted = rotation_matrix(axis, theta).dot(verts_rot_shifted.T).T\n verts_rot_shifted[:, 2] -= (np.max(verts_rot_shifted[:, 2]) - np.min(verts_rot_shifted[:, 2]))\n verts_rot_shifted[:, 0] -= np.max(verts_rot_shifted[:, 0])\n mesh2 = tvtk.PolyData(points=verts_rot_shifted, polys=faces)\n mesh2.point_data.scalars = scalars\n mesh2.point_data.scalars.name = 'scalars'\n surf2 = mlab.pipeline.surface(mesh2, colormap='autumn', vmin=vmin, vmax=vmax)\n colorbar = mlab.colorbar(surf, nb_labels=nlabels) #, orientation='vertical')\n lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()\n\n if negative and positive:\n half_index = lut.shape[0] / 2\n index = int(half_index * threshold / vmax)\n lut[(half_index - index + 1):(half_index + index), :] = 192\n lut[(half_index + index):, :] = 255 * plt.cm.autumn(np.linspace(0, 255, half_index - index).astype(int))\n lut[:(half_index - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, half_index - index).astype(int))\n elif negative:\n index = int(lut.shape[0] * threshold / abs(vmin))\n lut[(lut.shape[0] - index):, :] = 192\n lut[:(lut.shape[0] - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n elif positive:\n index = int(lut.shape[0] * threshold / vmax)\n lut[:index, :] = 192\n lut[index:, :] = 255 * plt.cm.autumn(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n lut[:, -1] = 255\n\n surf.module_manager.scalar_lut_manager.lut.table = lut\n if dual_split:\n surf2.module_manager.scalar_lut_manager.lut.table = lut\n surf.module_manager.scalar_lut_manager.show_scalar_bar = False\n surf.module_manager.scalar_lut_manager.show_legend = False\n surf.module_manager.scalar_lut_manager.label_text_property.font_size = 10\n surf.module_manager.scalar_lut_manager.show_scalar_bar = True\n surf.module_manager.scalar_lut_manager.show_legend = True\n mlab.draw()\n\n translate = [0, 0, 0]\n if inflated:\n zoom = -700\n else:\n zoom = -600\n if dual_split:\n if inflated:\n translate = [0, 0, -104.01467148]\n else:\n translate = [0, 0, -54.76305802] \n if inflated:\n zoom = -750\n else:\n zoom = -570\n \n #mlab.view(0, 90.0, zoom, translate)\n mlab.view(9, 90.0)\n\n print(file_path_name_save)\n \n mlab.savefig(file_path_name_save, figure=fig1, magnification=5)\n\n vdisplay.stop()", "def zonal_statistics(self, imagery, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n\n graph = {\n 'process_id': 'zonal_statistics',\n 'imagery': imagery.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n imagery.graph = graph\n\n return imagery", "def getCompStats(self, photoz = \"z_peak\", verbose = True):\n\n specz = self.zout[\"z_spec\"]\n photoz = self.zout[photoz]\n\n dz = (photoz - specz)\n diff = (dz / (1.+specz))\n\n nmad = 1.4826 * np.median( np.abs( dz - np.median(dz) ) )\n mean_offset = np.mean(diff)\n median_offset = np.median(diff)\n dz1s = np.mean(np.abs(diff))\n\n outlier1 = ((np.abs(diff) > 0.15).sum(dtype = float) / self.NOBJ)\n outlier2 = ((np.abs(diff) > 3.*nmad).sum(dtype = float) / self.NOBJ)\n\n # print np.mean(np.abs(diff))\n\n # print nmad, outlier1, outlier2, mean_offset, median_offset\n\n if verbose:\n print \"#\"*35\n print \"NMAD: \\t\\t\\t{0:1.3f}\".format(nmad)\n print \"dz/1+z:\\t\\t\\t{0:1.3f}\".format(dz1s)\n print \"nu 1: \\t\\t\\t{0:1.1f}%\".format(outlier1*100.)\n print \"nu 2: \\t\\t\\t{0:1.1f}%\".format(outlier2*100.)\n print \"mean offset: \\t\\t{0:1.3f}\".format(mean_offset)\n print \"median offset: \\t\\t{0:1.3f}\".format(median_offset)\n print \"#\"*35\n\n keys = [\"nmad\", \"nu1\", \"nu2\", \"mean_offset\", \"median_offset\"]\n values = [nmad, outlier1, outlier2, mean_offset, median_offset]\n\n return dict(zip(keys, values))", "def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D", "def azmap (scores, compare, dimension=0):\r\n mns = amean(compare,dimension)\r\n sstd = asamplestdev(compare,0)\r\n return (scores - mns) / sstd", "def reduce_rasters(stack, statistic, no_data_value=None, dtype=None):\n if statistic not in STATISTICS:\n percentile = parse_percentile_statistic(statistic)\n if percentile is None:\n raise KeyError('Unknown statistic \"{}\"'.format(statistic))\n else:\n statistic = \"percentile\"\n\n if len(stack) == 0:\n raise ValueError(\"Cannot reduce a zero-length stack\")\n\n # get the output array properties (dtype, no_data_value, shape)\n if dtype is None:\n dtype = stack[0][\"values\"].dtype\n if no_data_value is None:\n no_data_value = stack[0][\"no_data_value\"]\n shape = stack[0][\"values\"].shape\n\n # sum, count and nans output do not contain no data: fill zeroes right away\n if statistic in {\"sum\", \"count\", \"nans\"}:\n fill_value = 0\n else:\n fill_value = no_data_value\n\n # create the output array\n out = np.full(shape, fill_value, dtype)\n\n if statistic == \"last\":\n # populate 'out' with the last value that is not 'no data'\n for data in stack:\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n out[index] = data[\"values\"][index]\n elif statistic == \"first\":\n # populate 'out' with the first value that is not 'no data'\n for data in stack[::-1]:\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n out[index] = data[\"values\"][index]\n elif statistic == \"count\":\n # count the number of values that are not 'no data'\n for data in stack:\n out += get_index(data[\"values\"], data[\"no_data_value\"])\n else:\n if statistic == \"percentile\":\n func = partial(np.nanpercentile, q=percentile)\n else:\n func = STATISTICS[statistic]\n # transform 'no data' into 'nan' to be able to use numpy functions\n # NB: the dtype is at least float16 to accomodate NaN\n stack_array = np.full(\n (len(stack),) + shape, np.nan, np.result_type(dtype, np.float16)\n )\n for i, data in enumerate(stack):\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n stack_array[i, index] = data[\"values\"][index]\n\n # protect against all-NaN slice warnings and errors\n not_all_nan = ~np.all(np.isnan(stack_array), axis=0)\n\n # perform the math\n out[not_all_nan] = func(stack_array[:, not_all_nan], axis=0)\n\n return {\"values\": out, \"no_data_value\": no_data_value}", "def obj_s2n_z(s2n_dict, z_bins, flux_bins, otype, outfile=None, ax=None):\n logs = get_logger()\n nz = z_bins.size\n nfx = flux_bins.size\n s2n_sum = np.zeros((nz-1,nfx-1))\n s2n_N = np.zeros((nz-1,nfx-1)).astype(int)\n # Loop on exposures+wedges (can do just once if these are identical for each)\n for jj, wave in enumerate(s2n_dict['waves']):\n # Turn wave into z\n zELG = wave / 3728. - 1.\n z_i = np.digitize(zELG, z_bins) - 1\n m_i = np.digitize(s2n_dict['OII'][jj]*1e17, flux_bins) - 1\n mmm = []\n for ll in range(nfx-1): # Only need to do once\n mmm.append(m_i == ll)\n #\n for kk in range(nz-1):\n all_s2n = s2n_dict['s2n'][jj][:,z_i==kk]\n for ll in range(nfx-1):\n if np.any(mmm[ll]):\n s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])\n s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]\n\n sty_otype = get_sty_otype()\n\n # Plot\n if ax is None:\n fig = plt.figure(figsize=(6, 6.0))\n ax= plt.gca()\n # Title\n fig.suptitle('{:s}: Redshift Summary'.format(sty_otype[otype]['lbl']),\n fontsize='large')\n\n # Plot em up\n z_cen = (z_bins + np.roll(z_bins,-1))/2.\n lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]\n mxy = 1e-9\n for ss in range(nfx-1):\n if np.sum(s2n_N[:,ss]) == 0:\n continue\n lbl = 'OII(1e-17) = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])\n ax.plot(z_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],\n label=lbl, color=sty_otype[otype]['color'])\n mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))\n\n ax.set_xlabel('Redshift')\n ax.set_xlim(z_bins[0], z_bins[-1])\n ax.set_ylabel('Mean S/N per Ang in dz bins')\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_ylim(0.1, mxy*1.1)\n\n legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,\n handletextpad=0.3, fontsize='medium', numpoints=1)\n\n # Finish\n plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)\n plt.subplots_adjust(top=0.92)\n if outfile is not None:\n plt.savefig(outfile, dpi=600)\n print(\"Wrote: {:s}\".format(outfile))", "def raw2outputs(raw, z_vals, rays_d, render_mask=False):\n raw2alpha = lambda x, y: 1. - torch.exp(-x * y)\n device = raw.device\n\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.tensor([1e-2], device=device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples]\n\n dists = dists * torch.norm(rays_d[..., None, :], dim=-1)\n\n rgb = raw[..., :3]\n\n alpha = raw2alpha(raw[..., 3], dists) # [N_rays, N_samples]\n weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1), device=device), 1. - alpha + 1e-10], -1), -1)[:,:-1]\n\n rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]\n\n weights_norm = weights.detach() + 1e-5\n weights_norm /= weights_norm.sum(dim=-1, keepdim=True)\n depth_map = torch.sum(weights_norm * z_vals, -1)\n\n if render_mask:\n density = raw[..., 3] # [N_rays, N_samples]\n mask_map = torch.sum(weights * density, dim=1) # [N_rays,]\n return rgb_map, depth_map, weights_norm, mask_map\n\n return rgb_map, depth_map, weights_norm", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def test_read_raster(self):\n\n # FIXME (Ole): Some datasets show very large differences between extrema in the array and \n # those computed by GDAL. This may warrant another bug report to GEOS\n \n for coverage_name in ['test_grid', \n 'shakemap_padang_20090930',\n 'population_padang_1',\n 'population_padang_2',\n #'fatality_padang_1',\n #'fatality_padang_2'\n ]:\n \n \n filename = 'data/%s.asc' % coverage_name\n \n for R in [Raster(filename), read_coverage(filename)]:\n \n min, max = R.get_extrema()\n \n A = R.get_data(nan=True)\n B = R.get_data(nan=True)\n\n assert numpy.nanmax(A - B) == 0\n\n \n # FIXME (Ole): These tolerances are not really acceptable. Report to GEOS.\n assert numpy.allclose(min, numpy.nanmin(A[:]), rtol=1e-2)\n \n if coverage_name != 'population_padang_2':\n assert numpy.allclose(max, numpy.nanmax(A[:]), rtol=1e-2)", "def merge_rasters(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n rasters = [str(x) for x in i.joinpath('subnational').iterdir() if not x.name.endswith('txt') if x.name.endswith('norm.tif')]\n outfile = i.joinpath(f'{self.country}_{month}_normalised.tif')\n tiffs = \" \".join(rasters)\n gdal_cmd = f\"gdal_merge.py -o {outfile} -a_nodata -99999.0 -of gtiff {tiffs}\"\n subprocess.call(gdal_cmd, shell=True)", "def azs (a):\r\n zscores = []\r\n for item in a:\r\n zscores.append(z(a,item))\r\n return N.array(zscores)", "def merge_hpx_counts_cubes(filelist):\n out_prim = None\n out_skymap = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n out_name = None\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_null_primary(fin[0], out_prim)\n out_name = fin[1].name\n\n map_in = HpxMap.create_from_hdulist(fin)\n out_skymap = update_hpx_skymap_allsky(map_in, out_skymap)\n if i == 0:\n try:\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n except KeyError:\n out_ebounds = update_energies(fin[\"ENERGIES\"], out_ebounds)\n try:\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n except KeyError:\n pass\n\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n try:\n date_end = fin[0].header['DATE-END']\n except KeyError:\n date_end = None\n else:\n fin.close()\n\n out_skymap_hdu = out_skymap.create_image_hdu(\"SKYMAP\")\n\n hdulist = [out_prim, out_skymap_hdu, out_ebounds]\n\n if len(datalist_gti) > 0:\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n hdulist.append(out_gti)\n\n for hdu in hdulist:\n if date_end:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n\n return fits.HDUList(hdulist)", "def GeneralProfile(binsz, filename):\n data_image = fits.open(filename)\n data_image= data_image[1]\n glons = np.arange(LonLow, LonHigh+binsz, binsz) \n glon_bounds = Table()\n glon_bounds['CHANNEL'] = np.arange(len(glons) - 1)\n glon_bounds['GLON_MIN'] = np.float64(glons[:-1])\n glon_bounds['GLON_MAX'] = np.float64(glons[1:])\n data = compute_longitude_profile(glon_bounds = glon_bounds, binsz=binsz, image=data_image, datatype=2, emission=4, tev=0)\n return data", "def astrometry_script(filename, catalog=\"PS\", rotation_scaling=True, xy_transformation=True, fine_transformation=True, images=False, vignette=3,vignette_rectangular=1., cutouts=None, ra=None, dec=None, projection_ra=None, projection_dec=None, verbose=False, save_images=False, ignore_header_rot=False, radius=-1., save_bad_result=False, silent=False, sigma_threshold_for_source_detection=5, high_res = False, hdul_idx=0, filename_for_sources=None, FWHM=4):\n #print(\"Program version: 1.2\")\n\n report = {}\n if(images):\n plt.ioff()\n warnings.simplefilter('ignore', UserWarning)\n fits_image_filename = filename\n\n print(\"> Astrometry for {} \".format(fits_image_filename))\n\n with fits.open(fits_image_filename) as hdul:\n #print(hdul.info())\n #print(hdul[0].header)\n\n hdu = hdul[hdul_idx]\n #hdu.verify('fix')\n hdr = hdu.header\n\n\n image_or = hdul[hdul_idx].data.astype(float)\n median = np.nanmedian(image_or)\n image_or[np.isnan(image_or)]=median\n image = image_or - median\n\n observation = find_sources(image, vignette,vignette_rectangular,cutouts, sigma_threshold_for_source_detection, FWHM=FWHM)\n #print(observation)\n\n #changed order of positions to [(x,y), (x,y),...] for compatibility with photutils 1.4\n xcenters = np.array(observation['xcenter'])\n ycenters = np.array(observation['ycenter'])\n positions = [(xcenters[i], ycenters[i]) for i in range(len(xcenters))]\n apertures = CircularAperture(positions, r=4.)\n\n\n #world coordinates\n if(not silent):\n print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n print(WCS(hdr))\n\n hdr[\"NAXIS1\"] = image.shape[0]\n hdr[\"NAXIS2\"] = image.shape[1]\n\n #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n wcsprm = WCS(hdr).wcs\n wcsprm_original = WCS(hdr).wcs\n wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, ra, dec,projection_ra, projection_dec, ignore_header_rot, radius)\n if(verbose):\n print(WCS(wcsprm.to_header()))\n coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n if(not PIXSCALE_UNCLEAR):\n if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n if(not silent):\n print(\"central value outside of the image, moving it to the center\")\n coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n #print(wcsprm)\n\n\n\n #better: put in nice wrapper! with repeated tries and maybe try synchron!\n if(not silent):\n print(\">Dowloading catalog data\")\n radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n catalog_data = query.get_data(coord, radius, catalog)\n report[\"catalog\"] = catalog\n #reference = reference.query(\"mag <20\")\n \n\n if(catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n if(not silent):\n print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n catalog_data2 = query.get_data(coord, radius, \"PS\")\n report[\"catalog\"] = \"PS\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n elif(catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n if(not silent):\n print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n report[\"catalog\"] = \"GAIA\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n\n max_sources = 400\n if(INCREASE_FOV_FLAG):\n max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n if(catalog_data.shape[0]>max_sources):\n catalog_data = catalog_data.nsmallest(400, \"mag\")\n #remove duplicates in catalog?\n\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Input for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n\n plt.xlim(-200,image.shape[0]+200)\n plt.ylim(-200,image.shape[1]+200)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_before.pdf\")\n\n ###tranforming to match the sources\n if(not silent):\n print(\"---------------------------------\")\n print(\">Finding the transformation\")\n if(rotation_scaling):\n if(not silent):\n print(\"Finding scaling and rotation\")\n wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=verbose)\n if(xy_transformation):\n if(not silent):\n print(\"Finding offset\")\n wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= verbose, silent=silent)\n\n #correct subpixel error\n compare_threshold = 3\n if(high_res):\n compare_threshold = 100\n obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=compare_threshold)#3\n if (len(distances) == 0): #meaning the list is empty\n best_score = 0\n else:\n rms = np.sqrt(np.mean(np.square(distances)))\n best_score = len(obs_x)/(rms+10) #start with current best score\n fine_transformation_success = False\n if(fine_transformation):\n print(\"Finding scaling and rotation\")\n lis = [2,3,5,8,10,6,4, 20,2,1,0.5]\n if(high_res):\n lis = [200,300,100,150,80,40,70, 20, 100, 30,9,5]\n skip_rot_scale = True\n for i in lis:\n wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i, compare_threshold=compare_threshold, skip_rot_scale=skip_rot_scale)\n if(i == 20):\n #only allow rot and scaling for the last few tries\n skip_rot_scale = False\n if(score> best_score):\n wcsprm = wcsprm_new\n best_score = score\n fine_transformation_success = True\n if not fine_transformation_success:\n if(not silent):\n print(\"Fine transformation did not improve result so will be discarded.\")\n else:\n if(not silent):\n print(\"Fine transformation applied to improve result\")\n #register.calculate_rms(observation, catalog_data,wcs)\n\n #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n wcs =WCS(wcsprm.to_header())\n if(verbose):\n print(wcs)\n from astropy.wcs import utils\n scales = utils.proj_plane_pixel_scales(wcs)\n #print(scales)\n cdelt = wcsprm.get_cdelt()\n #print(cdelt)\n scale_ratio = scales/cdelt\n #print(scale_ratio)\n pc = np.array(wcsprm.get_pc())\n pc[0,0] = pc[0,0]/scale_ratio[0]\n pc[1,0] = pc[1,0]/scale_ratio[1]\n pc[0,1] = pc[0,1]/scale_ratio[0]\n pc[1,1] = pc[1,1]/scale_ratio[1]\n wcsprm.pc = pc\n wcsprm.cdelt = scales\n\n #WCS difference before and after\n if(not silent):\n print(\"> Compared to the input the Wcs was changed by: \")\n scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n if(not silent):\n print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n #sources:\n #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n def unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / max(np.linalg.norm(vector), 1e-10)\n def matrix_angle( B, A ):\n \"\"\" comment cos between vectors or matrices \"\"\"\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n #bugfix: multiplying by cdelt otherwise the calculated angle is off by a tiny bit\n rotation_angle = matrix_angle(wcsprm.get_pc()@wcsprm.get_cdelt(), wcsprm_original.get_pc()@wcsprm_original.get_cdelt()) /2./np.pi*360.\n if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n text = \"counterclockwise\"\n else:\n text = \"clockwise\"\n if(not silent):\n print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n if(not silent):\n print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n\n\n #check final figure\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Result for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_after.pdf\")\n if(not silent):\n print(\"--- Evaluate how good the transformation is ----\")\n dic_rms = register.calculate_rms(observation, catalog_data,wcsprm)\n #updating file\n converged = determine_if_fit_converged(dic_rms, catalog_data, observation, wcsprm, image.shape[0], image.shape[1], silent)\n report[\"converged\"] = converged\n report[\"matches\"] = dic_rms[\"matches\"]\n report[\"match_radius\"] = dic_rms[\"radius_px\"]\n if(converged or save_bad_result):\n write_wcs_to_hdr(fits_image_filename, wcsprm, report, hdul_idx=hdul_idx)\n if(filename_for_sources != None):\n wcs =WCS(wcsprm.to_header())\n observation_on_sky = wcs.wcs_pix2world(observation[[\"xcenter\",\"ycenter\"]], 1)\n #catalog_from_obs = np.zeros(observation_on_sky.shape[0], dtype={'names':('ra', 'dec', 'aperture_sum'),'formats':('f8', 'f8', 'f8')})\n catalog_from_obs = pd.DataFrame()\n catalog_from_obs[\"ra\"]= observation_on_sky[:,0]\n catalog_from_obs[\"dec\"]= observation_on_sky[:,1]\n catalog_from_obs[\"aperture_sum\"]= observation[\"aperture_sum\"]\n catalog_from_obs[\"mag\"]= -1.* observation[\"aperture_sum\"]#this is fine since we only use the mag to order the sources!\n catalog_from_obs.to_csv(filename_for_sources+\".csv\")\n if(images):\n plt.show()\n\n return converged, dic_rms #dictionary with short info about fit, \"matches\" gives a number of objects matched within certain radius", "def zonal_avg(data,Log=False):\n print 'computing zonal average'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n if data.ndim == 3:\n new_data = MA.zeros((data.shape[0],nlat,nlon),dtype=float)\n elif data.ndim == 2:\n new_data = MA.zeros((nlat,nlon),dtype=float)\n else:\n print 'Check field dimensions'\n sys.exit()\n\n # geometric mean?\n if Log:\n work = MA.log(data)\n else:\n work = data\n\n # remap data to new regular grid\n for i in range(nlat):\n #print 'lat = %.2f'%(lat_t[i])\n for j in range(nlon):\n new_data[:,i,j] = extract_loc(lon_t[j],lat_t[i],tlon,tlat,work)\n\n # compute zonal average\n if Log:\n za_data = (MA.exp(MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape))))\n else:\n za_data = (MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape)))\n\n return za_data, lat_t", "def average_rasters(*raster_list, clamp=None):\r\n nodata_list = [\r\n pygeoprocessing.get_raster_info(path)['nodata'][0]\r\n for path in raster_list[:-1]]\r\n target_nodata = -1.\r\n\r\n def average_op(*array_list):\r\n result = numpy.empty_like(array_list[0])\r\n result[:] = target_nodata\r\n valid_mask = numpy.ones(result.shape, dtype=numpy.bool)\r\n clamped_list = []\r\n for array, nodata in zip(array_list, nodata_list):\r\n valid_mask &= array != nodata\r\n if clamp:\r\n clamped_list.append(\r\n numpy.where(array > clamp, clamp, array))\r\n else:\r\n clamped_list.append(array)\r\n\r\n if valid_mask.any():\r\n array_stack = numpy.stack(clamped_list)\r\n result[valid_mask] = numpy.average(\r\n array_stack[numpy.broadcast_to(\r\n valid_mask, array_stack.shape)].reshape(\r\n len(array_list), -1), axis=0)\r\n return result\r\n\r\n pygeoprocessing.raster_calculator(\r\n [(path, 1) for path in raster_list[:-1]], average_op,\r\n raster_list[-1], gdal.GDT_Float32, target_nodata)", "def lzs (inlist):\r\n zscores = []\r\n for item in inlist:\r\n zscores.append(z(inlist,item))\r\n return zscores", "def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))", "def get_resample(name: str) -> str:\n\n methods = {\n \"first\":\n \"\"\"\nimport numpy as np\n\ndef first(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in reversed(range(len(in_ar))):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"last\":\n \"\"\"\nimport numpy as np\n\ndef last(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in range(len(in_ar)):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"max\":\n \"\"\"\nimport numpy as np\n\ndef max(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.max(in_ar, axis=0)\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"average\":\n \"\"\"\nimport numpy as np\n\ndef average(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n div = np.zeros(in_ar[0].shape)\n for i in range(len(in_ar)):\n div += (in_ar[i] != 0)\n div[div == 0] = 1\n \n y = np.sum(in_ar, axis = 0, dtype = 'uint16')\n y = y / div\n \n np.clip(y,0,255, out = out_ar)\n\"\"\"}\n\n if name not in methods:\n raise ValueError(\n \"ERROR: Unrecognized resampling method (see documentation): '{}'.\".\n format(name))\n\n return methods[name]", "def run_zrtest(self): # Unweighted z-test\r\n n = reduce(lambda x, y: x+(y.bw_ratio() > 0), self.sorted_r, 0)\r\n if n == 0: return (0, 0)\r\n avg = reduce(lambda x, y: x+y.bw_ratio(), self.sorted_r, 0)/float(n)\r\n def notlambda(x, y):\r\n if y.bw_ratio() <= 0: return x+0\r\n else: return x+(y.bw_ratio()-avg)*(y.bw_ratio()-avg)\r\n stddev = math.sqrt(reduce(notlambda, self.sorted_r, 0)/float(n))\r\n if not stddev: return (avg, stddev)\r\n for r in self.sorted_r:\r\n if r.bw_ratio() > 0:\r\n r.z_ratio = abs((r.bw_ratio()-avg)/stddev)\r\n r.prob_zr = TorUtil.zprob(-r.z_ratio)\r\n return (avg, stddev)", "def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d", "def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl", "def proc_modscag(fn_list, extent=None, t_srs=None):\n #Use cubic spline here for improve upsampling \n ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')\n stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) \n #Create stack here - no need for most of mastack machinery, just make 3D array\n #Mask values greater than 100% (clouds, bad pixels, etc)\n ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)\n\n stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)\n stack_count.set_fill_value(0)\n stack_min = ma_stack.min(axis=0).astype(np.uint8)\n stack_min.set_fill_value(0)\n stack_max = ma_stack.max(axis=0).astype(np.uint8)\n stack_max.set_fill_value(0)\n stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)\n stack_med.set_fill_value(0)\n\n out_fn = stack_fn + '_count.tif'\n iolib.writeGTiff(stack_count, out_fn, ds_list[0])\n out_fn = stack_fn + '_max.tif'\n iolib.writeGTiff(stack_max, out_fn, ds_list[0])\n out_fn = stack_fn + '_min.tif'\n iolib.writeGTiff(stack_min, out_fn, ds_list[0])\n out_fn = stack_fn + '_med.tif'\n iolib.writeGTiff(stack_med, out_fn, ds_list[0])\n\n ds = gdal.Open(out_fn)\n return ds", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n # print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # TODO - make the default configurable.\n# if src.crs == None:\n# src.crs = CRS.from_epsg(4326)\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n # print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def calc_shape_statistics(self, stat_names):\n stats = {}\n try:\n all_props = [regionprops(m) for m in self.masks]\n except TypeError:\n raise TypeError(\"masks not the right type\")\n for stat in stat_names:\n stats[stat] = np.mean([p[0][stat] for p in all_props])\n return stats", "def desforestation_base(ras, threshold=25):\n \"\"\"input raster path -> return stats\"\"\"\n\n # get area grid\n area_grid = raster_area_lat(ras) # true WGS84 spheroid\n\n # getting numpy object\n ras_np_raw = gdal_tif_to_numpy(ras)\n # masking data not need as further masked below\n\n # create mask greater than 25, the same used by Hansen\n # ras_sub_mask = numpy.ma.masked_greater_equal(ras_np_raw, 10)\n ras_sub_mask = numpy.ma.masked_greater_equal(ras_np_raw, threshold)\n\n # use count (no mask) NOT size (including mask)\n # count_pixel = ras_sub.count()\n count_pixel = ras_sub_mask.mask.sum()\n\n # True is treated as 1\n total_area = (ras_sub_mask.mask * area_grid).sum(dtype ='float64')\n\n result = [count_pixel, total_area]\n\n return result", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all", "def analyze_results(results): #, result_nonprivate):\n res_dimensions = zip(*results)\n mean, std = [], []\n \n for resdim in res_dimensions:\n mean.append ( numpy.average(resdim) )\n std.append ( numpy.std(resdim) )\n\n return mean, std", "def output_rasters(self, arr, outdir, outname):\n\n # make the subdirectories if we need 'em\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n if self.config_dict['path_mode'] == 'local':\n outpath = os.path.join(outdir, outname)\n print('the outpath for file {} is {}'.format(outname, outpath))\n\n band1 = arr\n with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n else:\n print('PATH MODE in config is not set properly for the local implementation of output_Rasters')\n sys.exit(0)", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def drizzle_array_groups(sci_list, wht_list, wcs_list, scale=0.1, kernel='point', pixfrac=1., verbose=True):\n from drizzlepac.astrodrizzle import adrizzle\n from stsci.tools import logutil\n log = logutil.create_logger(__name__)\n \n # Output header / WCS \n header, outputwcs = compute_output_wcs(wcs_list, pixel_scale=scale)\n shape = (header['NAXIS2'], header['NAXIS1'])\n \n # Output arrays\n outsci = np.zeros(shape, dtype=np.float32)\n outwht = np.zeros(shape, dtype=np.float32)\n outctx = np.zeros(shape, dtype=np.int32)\n \n # Do drizzle\n N = len(sci_list)\n for i in range(N):\n if verbose:\n log.info('Drizzle array {0}/{1}'.format(i+1, N))\n \n adrizzle.do_driz(sci_list[i].astype(np.float32, copy=False), \n wcs_list[i], \n wht_list[i].astype(np.float32, copy=False),\n outputwcs, outsci, outwht, outctx, 1., 'cps', 1,\n wcslin_pscale=wcs_list[i].pscale, uniqid=1, \n pixfrac=pixfrac, kernel=kernel, fillval=0, \n stepsize=10, wcsmap=None)\n \n return outsci, outwht, outctx, header, outputwcs", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def calculate_maps(self,statistic=None,titles=None,filenames=\"auto\") :\n\n if statistic is not None :\n self.statistic = statistic\n\n if isinstance(self.statistic,str) :\n self.statistic = [self.statistic]\n \n # declare array of nans to fill with maps\n self.maps = np.full([self.num_hists * len(self.statistic)] + \n list(np.shape(self.hists[0])[1:]),np.nan)\n\n if titles is not None :\n self.titles = titles\n else :\n self.titles = [str(x) for x in range(self.num_hists * len(self.statistic))]\n\n if isinstance(filenames,str) and filenames == \"auto\" :\n self.filenames = [str(x) for x in range(self.num_hists * len(self.statistic))]\n else :\n self.filenames = filenames\n\n\n mapnum = 0\n hist_inds = []\n stat_inds = []\n for i in range(len(self.statistic)) :\n for j in range(self.num_hists) :\n \n self.maps[mapnum,:,:] = calculate_map_from_hists(\n self.hists[j],self.statistic[i],self.hist_specs[j]['bin_centers'])\n\n if titles is None :\n if filenames == \"auto\" :\n self.titles[mapnum], self.filenames[mapnum] = gen_map_title(**{\n **self.hist_specs[j],\n 'statistic':self.statistic[i]},filename=True)\n else :\n self.titles[mapnum] = gen_map_title(**{\n **self.hist_specs[j],\n 'statistic':self.statistic[i]},filename=False)\n\n hist_inds = hist_inds + [j]\n stat_inds = stat_inds + [i]\n\n mapnum += 1\n\n self.num_maps = mapnum\n\n self.map_specs = {'hist' : hist_inds, 'statistic' : stat_inds}\n\n return self", "def get_area_stats(\n src,\n bounds,\n max_img_size=512,\n indexes=None,\n nodata=None,\n resampling_method=\"bilinear\",\n bbox_crs=\"epsg:4326\",\n histogram_bins=20,\n histogram_range=None,\n):\n if isinstance(indexes, int):\n indexes = [indexes]\n elif isinstance(indexes, tuple):\n indexes = list(indexes)\n\n with rasterio.open(src) as src_dst:\n bounds = transform_bounds(bbox_crs, src_dst.crs, *bounds, densify_pts=21)\n\n vrt_params = dict(add_alpha=True, resampling=Resampling[resampling_method])\n\n indexes = indexes if indexes is not None else src_dst.indexes\n nodata = nodata if nodata is not None else src_dst.nodata\n\n def _get_descr(ix):\n \"\"\"Return band description.\"\"\"\n name = src_dst.descriptions[ix - 1]\n if not name:\n name = \"band{}\".format(ix)\n return name\n\n band_descriptions = [(ix, _get_descr(ix)) for ix in indexes]\n\n vrt_transform, vrt_width, vrt_height = get_vrt_transform(\n src_dst, bounds, bounds_crs=src_dst.crs\n )\n vrt_params.update(\n dict(transform=vrt_transform, width=vrt_width, height=vrt_height)\n )\n\n width = round(vrt_width) if vrt_width < max_img_size else max_img_size\n height = round(vrt_height) if vrt_height < max_img_size else max_img_size\n out_shape = (len(indexes), width, height)\n if nodata is not None:\n vrt_params.update(dict(nodata=nodata, add_alpha=False, src_nodata=nodata))\n\n if has_alpha_band(src_dst):\n vrt_params.update(dict(add_alpha=False))\n\n with WarpedVRT(src_dst, **vrt_params) as vrt:\n arr = vrt.read(out_shape=out_shape, indexes=indexes, masked=True)\n if not arr.any():\n return None, band_descriptions\n\n params = {}\n if histogram_bins:\n params.update(dict(bins=histogram_bins))\n if histogram_range:\n params.update(dict(range=histogram_range))\n\n stats = {\n indexes[b]: _stats(arr[b], **params)\n for b in range(arr.shape[0])\n if vrt.colorinterp[b] != ColorInterp.alpha\n }\n\n return stats, band_descriptions", "def test_reading_and_writing_of_real_rasters(self):\n\n for rastername in ['Earthquake_Ground_Shaking_clip.tif',\n 'Population_2010_clip.tif',\n 'shakemap_padang_20090930.asc',\n 'population_padang_1.asc',\n 'population_padang_2.asc']:\n\n filename = '%s/%s' % (TESTDATA, rastername)\n R1 = read_layer(filename)\n assert R1.filename == filename\n\n # Check consistency of raster\n A1 = R1.get_data()\n M, N = A1.shape\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster file %s' % R1.filename)\n assert M == R1.rows, msg\n assert N == R1.columns, msg\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, R1.get_geotransform(),\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Write back to new file\n for ext in ['.tif']: # Would like to also have , '.asc']:\n out_filename = unique_filename(suffix=ext)\n write_raster_data(A1,\n R1.get_projection(),\n R1.get_geotransform(),\n out_filename,\n keywords=R1.keywords)\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, M, N))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert M == R2.rows, msg\n assert N == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = ('Array values of written raster array were not as '\n 'expected')\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n msg = 'Keywords were different: %s != %s' % (R1.keywords,\n R2.keywords)\n assert R1.keywords == R2.keywords, msg\n\n # Use overridden == and != to verify\n assert R1 == R2\n assert not R1 != R2\n\n # Check equality within tolerance\n R3 = R1.copy()\n\n R3.data[-1, -1] += 1.0e-5 # This is within tolerance\n assert R1 == R3\n\n R3.data[-1, -1] += 1.0e-2 # This is outside tolerance\n assert R1 != R3\n\n # Check that equality raises exception when type is wrong\n try:\n R1 == Vector()\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)", "def testStatsZebra(self):\n image2 = self.image.Factory(self.image, True)\n #\n # Add 1 to every other row, so the variance is 1/4\n #\n self.assertEqual(image2.getHeight()%2, 0)\n width = image2.getWidth()\n for y in range(1, image2.getHeight(), 2):\n sim = image2.Factory(image2, afwGeom.Box2I(afwGeom.Point2I(0, y), afwGeom.Extent2I(width, 1)),\n afwImage.LOCAL)\n sim += 1\n\n if display:\n ds9.mtv(self.image, frame = 0)\n ds9.mtv(image2, frame = 1)\n\n stats = afwMath.makeStatistics(image2,\n afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean = stats.getResult(afwMath.MEAN)\n n = stats.getValue(afwMath.NPOINT)\n sd = stats.getValue(afwMath.STDEV)\n\n self.assertEqual(mean[0], image2.get(0, 0) + 0.5)\n self.assertEqual(sd, 1/math.sqrt(4.0)*math.sqrt(n/(n - 1)))\n self.assertAlmostEqual(mean[1], sd/math.sqrt(image2.getWidth()*image2.getHeight()), 10)\n\n meanSquare = afwMath.makeStatistics(image2, afwMath.MEANSQUARE).getValue()\n self.assertEqual(meanSquare, 0.5*(image2.get(0, 0)**2 + image2.get(0, 1)**2))", "def run(layers):\n\n # Value above which people are regarded affected\n # For this dataset, 0 is no data, 1 is cloud, 2 is normal water level\n # and 3 is overflow.\n threshold = 0\n\n # Identify hazard and exposure layers\n inundation = get_hazard_layer(layers)\n\n [population] = get_exposure_layers(layers)\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Scale the population layer\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n # Assume an evenly distributed population for Gender\n G = 0.5\n pregnant_ratio = 0.024 # 2.4% of women are estimated to be pregnant\n\n # Calculate breakdown\n P_female = P * G\n P_male = P - P_female\n P_pregnant = P_female * pregnant_ratio\n\n I_female = I * G\n I_male = I - I_female\n I_pregnant = I_female * pregnant_ratio\n\n # Generate text with result for this study\n total = str(int(sum(P.flat) / 1000))\n count = str(int(sum(I.flat) / 1000))\n\n total_female = str(int(sum(P_female.flat) / 1000))\n total_male = str(int(sum(P_male.flat) / 1000))\n total_pregnant = str(int(sum(P_pregnant.flat) / 1000))\n\n affected_female = str(int(sum(I_female.flat) / 1000))\n affected_male = str(int(sum(I_male.flat) / 1000))\n affected_pregnant = str(int(sum(I_pregnant.flat) / 1000))\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'total': total, 'count': count,\n 'total_female': total_female, 'affected_female': affected_female,\n 'total_male': total_male, 'affected_male': affected_male,\n 'total_pregnant': total_pregnant, 'affected_pregnant': affected_pregnant,\n })\n return R", "def remove_all_rasters():\n for fname in g.list_grouped(['raster'])['PERMANENT']:\n g.run_command('g.remove', flags='f', type='raster',\n name=fname)", "def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,\n upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,\n image_dates, imageSummary):\n\n # list containing median depths for each image\n median_depths = list()\n median_depths_est = list()\n\n # contains output data for JSON file\n depth_output = {}\n\n # num of images\n num_images = len(imgs)\n\n # create output dictionary for images\n depths = dict()\n\n # create excel workbook and add worksheet\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n\n # create format\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n\n # add titles\n worksheet.write(0, 0, \"Image\", cell_format)\n worksheet.write(0, 1, \"Date\", cell_format)\n worksheet.write(0, len(tensors) + 2, \"Median Depth (mm)\", cell_format)\n worksheet.write(0, len(tensors) + 3, \"Median Estimate (mm)\", cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i+2, (\"Stake %s\" % str(i)), cell_format)\n\n # start from the first cell\n row = 1\n col = 0\n\n # image iterator\n iterator = 0\n\n # iterate through images\n for img_ in tqdm.tqdm(imgs):\n # create an image to overlay points on if debugging\n if(debug):\n img_overlay = img_.copy()\n\n # list to hold calculated depths\n depths_stake = list()\n estimate_stake = list()\n\n # get image name\n img_name = img_names[iterator]\n\n # reset column\n col = 0\n\n # write to excel file\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)\n col = 2\n\n # get intersection coordiantes\n coords_stake = intersectionCoords[img_name]\n\n # get blob intersection distances\n intersection_dist_stake = intersectionDist[img_name]\n\n # iterate through stakes in image\n for i, stake in enumerate(coords_stake):\n # if stake is valid and intersection point was found\n if stakeValidity[img_name][i] and stake[\"average\"][1] != False:\n # add reference circles to output image if debugging\n # shows intersection point of image with reference to template\n if(debug):\n cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)\n cv2.circle(img_overlay, (int(stake[\"average\"][0]), int(stake[\"average\"][1])), 5, (0,255,0), 2)\n\n # calculate change in snow depth in mm\n tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]\n depth_change = ((templateIntersections[i][1] - upperBorder) - stake[\"average\"][1]) * tensor\n\n # calculate change in snow depth using blob distances\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0\n\n # write to excel file\n worksheet.write(row, col + i, \"%.2f (%.2f)\" % (depth_change, distance_estimate), cell_format)\n\n # add to list\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n\n # if stake wasn't valid or intersection point not found\n else:\n # if stake was valid\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, \"Not Found\", cell_format)\n # invalid stake\n else:\n worksheet.write(row, col + i, \"Invalid Stake\", cell_format)\n\n # append false to array\n depths_stake.append(False)\n estimate_stake.append(False)\n\n # output debug image\n if(debug):\n cv2.imwrite(debug_directory + img_name, img_overlay)\n\n # add list to dictionary\n depths[img_name] = depths_stake\n\n # determine median depth\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n\n if(len(valid_depths) > 0):\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n\n # add to median depth list\n median_depths.append(median)\n median_depths_est.append(median_est)\n\n # write median to excel file\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, \"%.2f\" % median, cell_format)\n worksheet.write(row, len(tensors) + 3, \"%.2f\" % median_est, cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, \"0.0\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"0.0\", cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, \"n/a\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"n/a\", cell_format)\n\n # increment row\n row += 1\n\n # increment iterator\n iterator += 1\n\n # update image summary\n imageSummary[img_name][\" \"] = \"\"\n imageSummary[img_name][\"Stake (Depth Calculation)\"] = \"Depth (mm) Estimate (mm)\"\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][\" %d \" % (e+1)] = \"%0.2f %0.2f \" % \\\n (depth, estimate_stake[e])\n else:\n imageSummary[img_name][\" %d \" % (e+1)] = \"%s %s \" % \\\n (\"n/a\", \"n/a\")\n\n # close workbook\n workbook.close()\n\n # remove negative values\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n\n # generate plot\n fig,ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Snow Depth (mm)\")\n plt.xticks(rotation=75)\n plt.tight_layout()\n\n # save figure\n plt.savefig(debug_directory + \"depth-graph.jpg\")\n plt.close()\n\n # return dictionary containing snow depth changes\n return depths, imageSummary", "def test_rasters_created_with_projected_srs(self):\n\n # Create test data\n x_ul = 220534 # x value of upper left corner\n y_ul = 827790 # y_value of upper left corner\n numx = 8 # Number of xs\n numy = 5 # Number of ys\n dx = 200\n dy = -200\n\n # Define array where ys are rows and xs columns\n A1 = numpy.zeros((numy, numx))\n\n # Establish coordinates for lower left corner\n y_ll = y_ul - numy * dy\n x_ll = x_ul\n\n # Define pixel centers along each direction\n x = numpy.linspace(x_ll + 0.5, x_ll + numx - 0.5, numx)\n y = numpy.linspace(y_ll + 0.5, y_ll + numy - 0.5, numy)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numy):\n for j in range(numx):\n A1[numy - 1 - i, j] = linear_function(x[j], y[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == linear_function(x[0], y[4])\n\n # Lower left corner\n assert A1[4, 0] == linear_function(x[0], y[0])\n\n # Upper right corner\n assert A1[0, 7] == linear_function(x[7], y[4])\n\n # Lower right corner\n assert A1[4, 7] == linear_function(x[7], y[0])\n\n # Generate raster object and write\n projection = \"\"\"PROJCS[\"DGN95 / Indonesia TM-3 zone 48.2\",\n GEOGCS[\"DGN95\",\n DATUM[\"Datum_Geodesi_Nasional_1995\",\n SPHEROID[\"WGS 84\",6378137,298.257223563,\n AUTHORITY[\"EPSG\",\"7030\"]],\n TOWGS84[0,0,0,0,0,0,0],\n AUTHORITY[\"EPSG\",\"6755\"]],\n PRIMEM[\"Greenwich\",0,\n AUTHORITY[\"EPSG\",\"8901\"]],\n UNIT[\"degree\",0.01745329251994328,\n AUTHORITY[\"EPSG\",\"9122\"]],\n AUTHORITY[\"EPSG\",\"4755\"]],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n PROJECTION[\"Transverse_Mercator\"],\n PARAMETER[\"latitude_of_origin\",0],\n PARAMETER[\"central_meridian\",106.5],\n PARAMETER[\"scale_factor\",0.9999],\n PARAMETER[\"false_easting\",200000],\n PARAMETER[\"false_northing\",1500000],\n AUTHORITY[\"EPSG\",\"23834\"],\n AXIS[\"X\",EAST],\n AXIS[\"Y\",NORTH]]\"\"\"\n\n geotransform = (x_ul, dx, 0, y_ul, 0, dy)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n assert nanallclose(R1.get_data(), A1, rtol=1.0e-12)\n assert nanallclose(R1.get_geotransform(), geotransform,\n rtol=1.0e-12)\n assert 'DGN95' in R1.get_projection()", "def heatmap(island_results):\n kart_herb = []\n kart_carn = []\n for row in island_results:\n h_row = []\n c_row = []\n for cell in row:\n h_row.append(cell[\"herbivores\"])\n c_row.append(cell[\"carnivores\"])\n kart_herb.append(h_row)\n kart_carn.append(c_row)\n return kart_herb, kart_carn", "def py_SurfStatAvSurf(filenames, fun = np.add, output_surfstat=False):\n \n if filenames.ndim is not 2:\n raise ValueError('Filenames must be a 2-dimensional array.')\n \n for i in range(0, filenames.shape[0]):\n surfaces = np.empty(filenames.shape[1], dtype=np.object)\n for j in range(0, filenames.shape[1]):\n \n # Check whether input is BSPolyData or a filename. \n if isinstance(filenames[i,j], BSPolyData):\n surfaces[j] = filenames[i,j] \n else:\n surfaces[j] = read_surface(filenames[i,j])\n \n # Concatenate second dimension of filenames. \n if j is 0:\n tri = get_cells(surfaces[j]) \n coord = get_points(surfaces[j])\n else:\n tri = np.concatenate((tri, get_cells(surfaces[j]) + coord.shape[0]), axis=0)\n coord = np.concatenate((coord, get_points(surfaces[j])), axis=0)\n \n if i is 0:\n m = 1\n coord_all = coord\n else:\n coord_all = fun(coord_all,coord)\n m = fun(m,1)\n \n coord_all = coord_all / m \n \n if output_surfstat:\n surface = {'tri': np.array(tri) + 1, 'coord': np.array(coord_all).T}\n else:\n surface = build_polydata(coord_all, tri)\n \n return surface", "def av_cmaps(cmaps,nres,resnames,outdir,name_mod,mtype=\"NULL\"):\n\tplt.clf()\n\tnframes = len(cmaps) \n\tav = np.zeros((cmaps[0].shape))\n\n\t# save cmaps to npy file. Data must first be reshaped.\n\tif mtype == \"NULL\":\n\t\tcmaps = np.array(cmaps)\n\t\tresh = cmaps.reshape(cmaps.shape[0],cmaps.shape[1]*cmaps.shape[2])\n\t\tnp.savetxt(outdir+\"CMAPS\" + name_mod + \"_raw.npy\",resh)\n\n\t\tfor i in range(cmaps[0].shape[0]):\n\t\t\tfor j in range(cmaps[0].shape[1]):\n\t\t\t\t# for each element of the matrix\n\t\t\t\tif j > i: # don't compute things twice\n\t\t\t\t\tl = []\n\t\t\t\t\tfor c in cmaps:\n\t\t\t\t\t\t# for each map, determine if there was a contact at that position\n\t\t\t\t\t\tif c[i][j] < 0.7: # nm\n\t\t\t\t\t\t\tl.append(1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tl.append(0)\n\t\t\t\t\tav[i][j] = np.std(l)/(np.sqrt(nframes)-1)\n\t\t\t\t\tav[j][i] = np.mean(l)\n\t\t\t\t# dont consider contacts from neighbors\n\t\t\t\tif i == j or abs(i-j) <= 2:\n\t\t\t\t\tav[i][j] = 0\n\t\t\t\t\tav[j][i] = 0\n\telse:\n\t\tfor m in range(nres):\n\t\t\tfor n in range(nres):\n\t\t\t\tfor fr in range(nframes):\n\t\t\t\t\tav[n][m] += cmaps[fr][m][n]\n\t\tav/=nframes\n\tfig, ax = plt.subplots()\n\tplt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\tif mtype == \"gremlin\":\n\t\tim = ax.imshow(av, cmap='PuBu')\n\t\tcbar = fig.colorbar(im)\n\t\tax.set_title(\"Average Contact Maps from Rosetta+Gremlin Output\")\n\t\tplt.savefig(outdir+\"gremlin_compare_CMAPS\" + name_mod + \".png\")\n\t\tnp.savetxt(outdir+\"gremlin_CMAPS\" + name_mod + \"_av.npy\",av)\n\telif mtype == \"surface\":\n\t\thydrophobic = ['GLY', 'ALA', 'VAL', 'ILE', 'LEU', 'MET', 'PHE', 'TYR', 'TRP', 'PRO', 'CYS']\n\t\thydrophilic = ['SER', 'THR', 'ASN', 'GLN', 'HIS']\n\t\tposcharge = ['ARG', 'LYS']\n\t\tnegcharge = ['ASP', 'GLU']\n\n\t\tfor it,rn in enumerate(resnames):\n\t\t\tif rn in hydrophobic:\n\t\t\t\tplt.axhline(y=it,c='yellow',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='yellow',linewidth=1.5)\n\t\t\telif rn in hydrophilic:\n\t\t\t\tplt.axhline(y=it,c='g',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='g',linewidth=1.5)\n\t\t\telif rn in poscharge:\n\t\t\t\tplt.axhline(y=it,c='b',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='b',linewidth=1.5)\n\t\t\telif rn in negcharge:\n\t\t\t\tplt.axhline(y=it,c='r',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='r',linewidth=1.5)\n\t\t\telse:\n\t\t\t\tprint \"unknown restype:\", rn\n\t\tax.set_title(\"Average Contact Maps of Surface Residues\")\n\t\tim = ax.imshow(av, cmap='Greys')\n\t\tcbar = fig.colorbar(im)\n\t\tplt.savefig(outdir+\"surface_CMAPS\" + name_mod + \".png\")\n\t\tnp.savetxt(outdir+\"surface_CMAPS\" + name_mod + \"_av.npy\",av)\n\telse:\n\t\tim = ax.imshow(av)\n\t\tcbar = fig.colorbar(im)\n\t\tax.set_title(\"Average Contact Maps\")\n\t\tplt.savefig(outdir+\"CMAPS\" + name_mod + \".png\")\n\t\tnp.savetxt(outdir+\"CMAPS\" + name_mod + \"_av.npy\",av)\n\treturn av", "def test_calc_res():\n with xr.open_rasterio(TEST_RASTER_PATH) as src:\n xr_res = ds.utils.calc_res(src)\n with rasterio.open(TEST_RASTER_PATH) as src:\n rio_res = src.res\n assert np.allclose(xr_res, rio_res)", "def radialAvg(data,xi,zi,rad):\r\n #Remove all nan values from data\r\n data=np.nan_to_num(data)\r\n \r\n #Array with x values\r\n xArr=xi[0]\r\n \r\n #Array with z values\r\n zArr=np.transpose(zi)[0]\r\n \r\n #Create the array to store the values\r\n avgData=[]\r\n \r\n #Go over each z position\r\n for i in range(len(zArr)):\r\n \r\n #Store the total at each z position\r\n zPosTot=0\r\n \r\n #Counter to help average\r\n count=0\r\n \r\n #Go over each x position\r\n for j in range(len(xArr)):\r\n \r\n #Check if we are within the radius\r\n if np.abs(xArr[j])<=rad:\r\n \r\n #Add the data to the position total\r\n zPosTot+=data[i][j]\r\n \r\n #Increment the counter\r\n count+=1\r\n \r\n #Calculate the radial average\r\n zPosAvg=zPosTot/count\r\n \r\n #Add to the array\r\n avgData.append(zPosAvg)\r\n \r\n return avgData", "def applyPhotoZ (self,arr):\n print \"Applying Template SED PZs\"\n\n ztrue = arr['z']\n\n #select a template\n templates = ['El_B2004a.sed']+['Sbc_B2004a.sed','Scd_B2004a.sed']\n templates = templates +['Im_B2004a.sed','SB3_B2004a.sed','SB2_B2004a.sed','ssp_25Myr_z008.sed','ssp_5Myr_z008.sed']\n\n #read in f_mod files, interpolate, get values of f_mod_b\n ngals = len(ztrue)\n\n f_mod_o = np.zeros((self.nb, ngals))\n for z in range(ngals):\n #currently templates are randomly chosen but probably should be an input with true z\n templateno = np.random.choice(range(self.nt))\n for b in range(self.nb):\n spl = InterpolatedUnivariateSpline(self.z_grid, self.f_mod[:,templateno,b])\n f_mod_o[b][z] = spl(ztrue[z])\n\n #select sigma_b - 10% for now\n sigma = 0.1*f_mod_o\n #select observed fluxes f_obs_b = f_mod_b + sigma_b*rando\n f_obs = f_mod_o+ sigma * (np.random.normal(0.,1.,self.nb*ngals).reshape((self.nb,ngals)))\n # I don't seem to be able to find a more efficient way\n arrx=np.zeros(ngals,dtype=[('pz_f_obs',float,(self.nb,)),('pz_flux_sigma',float,(self.nb,))])\n arrx['pz_f_obs']=f_obs.T\n arrx['pz_flux_sigma']=sigma.T\n arr = recfunctions.merge_arrays((arr,arrx),flatten=True,usemask=False)\n return arr", "def spectra_stacker(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n data_shape = np.shape(image_data)\n ra_axis = data_shape[2]\n dec_axis = data_shape[1]\n wl_axis = data_shape[0]\n\n pxl_total = ra_axis * dec_axis\n \n data_unwrap = [] \n for i_ra in range(ra_axis):\n for i_dec in range(dec_axis):\n pixel_data = image_data[:][:,i_dec][:,i_ra]\n \n data_unwrap.append(pixel_data)\n\n data_stacked = np.zeros((pxl_total, wl_axis))\n for i_row in range(np.shape(data_unwrap)[0]):\n data_row = data_unwrap[i_row]\n for i_pixel in range(len(data_row)):\n data_stacked[i_row][i_pixel] = data_row[i_pixel]\n\n # writing data to a fits file\n hdr = fits.Header()\n hdr['CTYPE1'] = 'pixel'\n hdr['CRPIX1'] = 1\n hdr['CRVAL1'] = data_stacked[0][0]\n hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]\n\n primary_hdu = fits.PrimaryHDU(header=hdr)\n hdu = fits.ImageHDU(data_stacked)\n\n hdul = fits.HDUList([primary_hdu, hdu])\n\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n hdul.writeto(data_dir + '/stacked.fits')\n return data_unwrap", "def snow_summary(code, scalingFactor, statistics=\"SUM\", outcellsize='1000', monthRange='', yearRange='',\n path=\"H:/GIS/SNODAS/SNWDS/\", outpath=\"H:/GIS/SNODAS.gdb/\", area=''):\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n # arcpy.env.mask = area\n\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n\n for y in range(yearRange[0], yearRange[1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[1] + 1): # set months converted here\n g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year\n for name in sorted(\n glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n # print(g[code+str(y)+str(m).zfill(2)])\n # ifnull = 'in_memory/ifnull'\n # arcpy sa functions that summarize the daily data to monthly data\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,\n ignore_nodata=\"DATA\")\n div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001\n calc = Con(div < 0.0, 0.0, div) # remove negative and null values\n ifnull = Con(IsNull(calc), 0, calc) # remove null\n # WKID 102039\n outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis\n # define save path for file\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]\n memoryFeature = \"in_memory/myMemoryFeature\"\n # memoryFeature = outnm\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n # Execute ExtractByMask to clip snodas data to Utah watersheds\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management(\"in_memory\")", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def clip_multiple_raster(raster_name_list, output_suffix='clip', overwrite=False, resample=False, n_jobs=2):\n\n global suffix, o, r\n o = overwrite\n r = resample\n suffix = output_suffix\n\n # Check if r.clip is well installed\n if not gscript.find_program('r.clip', '--help'):\n message = _(\"You first need to install the addon r.clip.\\n\")\n message += _(\" You can install the addon with 'g.extension r.clip'\")\n gscript.fatal(message)\n\n # Clip the rasters in multiprocessing pool of jobs\n p = Pool(n_jobs)\n output = p.map(clip, raster_name_list) # Launch the processes for as many items in the list (if function with a return, the returned results are ordered thanks to 'map' function)\n p.close()\n p.join()\n print \"\\n\".join(output)", "def retrieveDensity_reg(slidedir:str, filename : str, resultsdir : str, suffix : str = '_results_dirreg.npz'):\n TILESIZE_X = 512\n TILESIZE_Y = 512\n sl = openslide.open_slide(slidedir+os.sep+filename)\n\n tiles_total_x = int(np.floor(sl.dimensions[0] / TILESIZE_X))\n tiles_total_y = int(np.floor(sl.dimensions[1] / TILESIZE_Y))\n\n # calculate 10 HPFs with highest mitotic activity\n # 1 HPF = 0.237 mm^2 \n A = 2.37 # mm^2 \n W_hpf_microns = np.sqrt(A*4/3) * 1000 # in microns\n H_hpf_microns = np.sqrt(A*3/4) * 1000 # in microns\n\n micronsPerPixel = sl.properties[openslide.PROPERTY_NAME_MPP_X]\n\n W_hpf = int(W_hpf_microns / float(micronsPerPixel)) \n H_hpf = int(H_hpf_microns / float(micronsPerPixel))\n\n W_x = int(W_hpf / TILESIZE_X)\n W_y = int(H_hpf / TILESIZE_Y)\n\n f = np.load(bz2.BZ2File(resultsdir + os.sep + filename + suffix+'.bz2','rb'))\n \n\n scorefield=np.zeros((np.max(f['tilesProcessed'][:,1])+1,1+np.max(f['tilesProcessed'][:,0])))\n scorefield[f['tilesProcessed'][:,1],f['tilesProcessed'][:,0]] = np.reshape(f['scores'],-1)\n\n completeMap = scorefield\n\n kernel = np.ones((W_y,W_x),np.float32)\n ma = cv2.filter2D(completeMap, -1, kernel )\n\n return ma, completeMap", "def output_rasters_cloud(self, arr, outname):\n\n if self.config_dict['path_mode'] == 'aws':\n # later on deleted by s3_delete_local()\n # local_outpath = os.path.join(self.config_dict['temp_folder'], outname)\n local_outname = outname.split('/')[-1]\n local_outpath = os.path.join(self.temp_folder, local_outname)\n self.log.debug('local_outpath {}'.format(local_outpath))\n\n t0 = t_now()\n\n band1 = arr\n # write to a temp folder\n with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # Buckets are not directories but you can treat them like they are\n # bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data\n # bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1\n bucket_name = self.config_dict['out_root'].split('/')[0]\n bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]\n print(bucket_prefix_list)\n bucket_prefix = '/'.join(bucket_prefix_list)\n print(\"bucket prefix =\", bucket_prefix)\n bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)\n\n # uploads to aws bucket with filepath\n self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)\n t_total = t_now() - t0\n self.log.info(\"OUTPUT - TIME - {} - {}\".format(t_total, bucket_filepath))\n\n elif self.config_dict['path_mode'] == 'google':\n print('google path mode not yet implemented')\n sys.exit(0)\n\n else:\n print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')\n sys.exit(0)", "def find_location_gs(source_name, source_alt_az,\n minute, hour, day, month, year,\n plot_grids=True):\n\n alt, az = source_alt_az\n source_obj = Vizier.query_object(source_name, catalog='V/50')[0]\n source_ra_dec = (source_obj['RAJ2000'][0], source_obj['DEJ2000'][0])\n\n source_ra_hms = tuple(map(float, source_ra_dec[0].split()))\n source_dec_dms = tuple(map(float, source_ra_dec[1].split()))\n\n source_ra = Angle(source_ra_hms, unit='hourangle').degree\n source_dec = Angle(source_dec_dms, unit=u.deg).degree\n\n lats = np.arange(-90., 90, res)\n longs = np.arange(-180, 180, res)\n\n ra_grid = np.zeros((len(lats), len(longs)))\n dec_grid = np.zeros((len(lats), len(longs)))\n score_grid = np.zeros((len(lats), len(longs)))\n\n # Run the grid\n lat_counter, long_counter = 0, 0\n for i in range(len(lats)):\n for j in range(len(longs)):\n # Need to sort out angular units\n lat, long = lats[i], longs[j]\n\n ra, dec = altaz_to_radec((alt, az), pos=(lat, long),\n minute=minute, hour=hour, day=day,\n month=month, year=year, tz_offset=5)\n\n # pos_grid[i, j] = {'RA': ra, 'DEC': dec}\n ra_grid[i, j] = ra\n dec_grid[i, j] = dec\n\n # Bad - planar:\n score = np.sqrt((ra - source_ra)**2 + (dec - source_dec)**2)\n\n # Good - spherical:\n # score = np.arccos(np.sin(dec) * np.sin(source_dec) + np.cos(dec) * np.cos(source_dec) * np.cos(abs(ra - source_ra)))\n\n score_grid[i, j] = score\n\n verbose = False\n if verbose is True:\n print('RA, Source RA:', ra, source_ra)\n print('DEC, Source DEC:', dec, source_dec)\n print('Score:', score)\n print('\\n')\n else:\n step = long_counter + lat_counter * len(lats)\n print (str(step) + '/' + str(len(lats) * len(longs)))\n long_counter += 1\n\n outname = 'latlong-gridsearch-results_' + str(res)\n score_df = pd.DataFrame(score_grid)\n score_df.to_csv(outname + '.csv')\n\n if plot_grids is True:\n lat_coord = (90 + local_latlong[0]) * res\n long_coord = (180 + local_latlong[1]) * res\n\n plt.contour(score_grid)\n plt.plot([lat_coord], [long_coord], 'or')\n plt.matshow(score_grid, cmap='magma')\n\n xtick_locs = np.arange(0, len(longs), len(longs)/6)\n xtick_labs = [int(longs[i]) for i in xtick_locs]\n plt.xticks(xtick_locs, xtick_labs)\n\n # plt.ylim(max(lats), min(lats))\n ytick_locs = np.arange(0, len(lats), len(lats)/10)\n ytick_labs = [int(lats[i]) for i in ytick_locs]\n plt.yticks(ytick_locs, ytick_labs)\n\n plt.savefig(outname + '.png', dpi=200)\n plt.show(block=False)\n\n\n return {'RA': ra_grid, 'DEC': dec_grid, 'SCORE': score_grid}", "def main(raster_file):\n with rasterio.open(raster_file) as src:\n data = src.read() # gets ALL the data\n single_band = data[0] # gets the first band OR src.read(1)\n\n print (f'Normally expect this shape from rasterio: {data.shape}')\n # https://rasterio.readthedocs.io/en/latest/topics/image_processing.html\n\n image = reshape_as_image(data)\n\n print(f'After reshaping as image: {image.shape}')\n\n reshaped_to_raster = reshape_as_raster(image)\n\n print(f'After reshaping as raster: {reshaped_to_raster.shape}')\n\n print('---------------')\n\n print(f'first band, or a single band image: {single_band.shape}')\n\n added_dimension = np.expand_dims(single_band, axis=2)\n\n print(f'After adding a dimension: {added_dimension.shape}')\n print('---------------')\n print(added_dimension[:,:,0])", "def radiance_map(file, config, vmax=4200, levels=20, typ=''):\n \n # Select data from configuration \n azimuths = config['skymap'][:, 0] # +180 # azimuths\n zeniths = config['skymap'][:, 1] # zeniths\n\n if typ == 'sim':\n # look for wavelength index in array\n waves_sim = dataset.attrs['simulated_Columns'].split('nm')[0].split('[')[1].split(\n ']')[0].split(',')\n waves = np.asarray(list(map(int, waves_sim)))\n wave_indx = np.where(waves == wave)\n try:\n wave_indx = np.int(wave_indx[0][0])\n except:\n print(\"Wavelength is not in dataset\")\n z = dataset.simulated[:, wave_indx, time_indx]\n\n elif typ == 'meas':\n wave_indx = int((config['wavelength'] - 250 - config['wave_correction']) / 0.446)\n with h5py.File(file, 'r') as data:\n z = data['data'][:, wave_indx]\n else:\n print('Select a input data type(sim or meas)')\n\n # Add values in the origin to close the surface interpolation\n azimuths = np.append(azimuths, [270, 0, 0, 0, 0, 0, 0, 0])\n zeniths = np.append(zeniths, [0, 12, 24, 36, 48, 60, 72, 84])\n z = np.append(z, [z[0], z[3], z[9], z[19], z[33], z[51], z[73], z[99]])\n\n # Convert x to radians\n azimuths = np.radians(azimuths)\n zeniths = np.radians(zeniths)\n\n # Remove dead channels of the dataset\n azimuths = np.delete(azimuths, config['dead_fibre'])\n zeniths = np.delete(zeniths, config['dead_fibre'])\n z = np.delete(z, config['dead_fibre'])\n\n # Set up a regular grid of interpolation point\n thetai, ri = np.linspace(azimuths.min(), azimuths.max(),\n num=len(azimuths)), \\\n np.linspace(zeniths.min(), zeniths.max(), num=len(zeniths))\n\n ri, thetai = np.meshgrid(ri, thetai, indexing='ij')\n\n # zi = scipy.interpolate.griddata((azimuths, zeniths), z, (thetai, ri),\n # method='linear')\n\n rbf = scipy.interpolate.Rbf(azimuths, zeniths, z, fucntion='gaussian',\n epsilon=0.05)\n\n ZI = rbf(thetai, ri)\n\n if typ == 'sim':\n name = str(dataset.time[time_indx].values) # ''\n else:\n name = 'testing' #str(dataset.time[time_indx].values)\n\n # Create the directory to save the results\n # os.makedirs(os.path.dirname(config['path_note'] + '/figures/'),\n # exist_ok=True)\n if vmax == 'default':\n vmax = 4200\n else:\n vmax = vmax\n\n # Plot the dataset\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n cmap = 'Spectral_r' # 'rainbow'\n a = plt.contourf(thetai, ri, ZI, levels, cmap=cmap, vmin=0,\n vmax=vmax) # , vmax=4932)\n plt.title('{} UTC {}nm'.format(name, config['wavelength']))\n plt.axis([0, 2*np.pi, 0, 1.48])\n\n plt.scatter(azimuths, zeniths, cmap=cmap, s=1)\n ax.grid(False)\n ax.set_theta_zero_location(\"N\") # Set the direction of polar plot\n ax.set_theta_direction(1) # Set the increase direction on azimuth angles\n # (-1 to clockwise, 1 counterclockwise)\n cbar = plt.colorbar(a)\n cbar.set_label(\"counts\", rotation=90)\n\n # if typ == 'sim':\n # plt.savefig(\n # 'figures/skymap/simulated/skymap{}nm_{}UTC_sim.jpeg'.format(wave,\n # name),\n # dpi=300)\n # plt.show();\n # else:\n # plt.savefig(\n # 'figures/skymap/measured/skymap{}nm_{}UTC_meas.jpeg'.format(wave,\n # name),\n # dpi=300)", "def map(z):\n pass", "def level1_hitmaps(filename,\n image_directory,\n band_average=True,\n feed_average=False,\n feeds=[1],\n make_hits=True,\n make_sky=True,\n field_width=None,\n cdelt=[1./60.,1./60.],\n ctype=['RA---TAN','DEC--TAN'],\n crval=None,\n source='None',\n plot_circle=False,\n plot_circle_radius=1,\n AzElMode=False,\n SunMode=False):\n\n\n try:\n fd = h5py.File(filename,'r')\n except OSError:\n print('Unable to open file {}'.format(filename))\n return\n\n # cdelt given in arcmin\n if not isinstance(field_width, type(None)):\n xpixelWidth = int(field_width[0]/cdelt[0]*60)\n ypixelWidth = int(field_width[1]/cdelt[1]*60)\n image_width = [xpixelWidth, ypixelWidth]\n else:\n image_width = None\n\n if isinstance(image_directory, type(None)):\n image_directory = filename.split('/')[-1].split('.')[0]\n if not os.path.exists(image_directory):\n os.makedirs(image_directory)\n\n\n if AzElMode:\n mapper = MapperAzEl(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n image_directory=image_directory,\n ctype=ctype)\n elif SunMode:\n mapper = MapperSun(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n image_directory=image_directory,\n ctype=ctype)\n \n else:\n mapper = Mapper(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n image_directory=image_directory,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n ctype=ctype)\n \n \n mapper.setLevel1(fd, source)\n if 'all' in feeds:\n feeds = [feed for feed in fd['spectrometer/feeds'][:] if feed != 20]\n if feed_average:\n \n maps = mapper(feeds, usetqdm=True)\n fstr = '-'.join(['{:02d}'.format(feed) for feed in feeds if feed in mapper.feed_ids])\n outdir = '{}'.format(image_directory)\n\n mapper.plotImages(feeds,\n '{}/Hitmap_FeedAvg.png'.format(outdir),\n '{}/BandAverage_FeedAvg.png'.format(outdir),\n plot_circle,\n plot_circle_radius)\n # mapper.SaveMaps('{}/BandAverage_FeedAvg.fits'.format(image_directory))\n \n \n for feed in tqdm(feeds):\n if not isinstance(mapper.map_bavg,type(None)):\n mapper.map_bavg *= 0.\n mapper.hits = None\n\n maps = mapper(feed)\n\n fstr = '-'.join(['{:02d}'.format(feed)])\n outdir = '{}'.format(image_directory)\n\n mapper.plotImages([feed],\n '{}/Hitmap_Feed{:02d}.png'.format(outdir,feed),\n '{}/BandAverage_Feed{:02d}.png'.format(outdir,feed),\n plot_circle,\n plot_circle_radius)\n #mapper.SaveMaps('{}/BandAverage_Feed{:02d}.fits'.format(image_directory,feed))", "def test_xyz_to_smiles(self):\n xyz1 = \"\"\"S -0.06618943 -0.12360663 -0.07631983\nO -0.79539707 0.86755487 1.02675668\nO -0.68919931 0.25421823 -1.34830853\nN 0.01546439 -1.54297548 0.44580391\nC 1.59721519 0.47861334 0.00711000\nH 1.94428095 0.40772394 1.03719428\nH 2.20318015 -0.14715186 -0.64755729\nH 1.59252246 1.51178950 -0.33908352\nH -0.87856890 -2.02453514 0.38494433\nH -1.34135876 1.49608206 0.53295071\"\"\"\n\n xyz2 = \"\"\"O 2.64631000 -0.59546000 0.29327900\nO 2.64275300 2.05718500 -0.72942300\nC 1.71639100 1.97990400 0.33793200\nC -3.48200000 1.50082200 0.03091100\nC -3.85550400 -1.05695100 -0.03598300\nC 3.23017500 -1.88003900 0.34527100\nC -2.91846400 0.11144600 0.02829400\nC 0.76935400 0.80820200 0.23396500\nC -1.51123800 -0.09830700 0.09199100\nC 1.28495500 -0.50051800 0.22531700\nC -0.59550400 0.98573400 0.16444900\nC -0.94480400 -1.39242500 0.08331900\nC 0.42608700 -1.59172200 0.14650400\nH 2.24536500 1.93452800 1.29979800\nH 1.14735500 2.91082400 0.31665700\nH -3.24115200 2.03800800 0.95768700\nH -3.08546100 2.10616100 -0.79369800\nH -4.56858900 1.48636200 -0.06630800\nH -4.89652000 -0.73067200 -0.04282300\nH -3.69325500 -1.65970000 -0.93924100\nH -3.72742500 -1.73294900 0.81894100\nH 3.02442400 -2.44854700 -0.56812500\nH 4.30341500 -1.72127600 0.43646000\nH 2.87318600 -2.44236600 1.21464900\nH -0.97434200 2.00182800 0.16800300\nH -1.58581300 -2.26344700 0.02264400\nH 0.81122400 -2.60336100 0.13267800\nH 3.16280800 1.25020800 -0.70346900\"\"\"\n\n xyz3 = \"\"\"N 2.24690600 -0.00006500 0.11597700\nC -1.05654800 1.29155000 -0.02642500\nC -1.05661400 -1.29150400 -0.02650600\nC -0.30514100 0.00000200 0.00533200\nC 1.08358900 -0.00003400 0.06558000\nH -0.39168300 2.15448600 -0.00132500\nH -1.67242600 1.35091400 -0.93175000\nH -1.74185400 1.35367700 0.82742800\nH -0.39187100 -2.15447800 0.00045500\nH -1.74341400 -1.35278100 0.82619100\nH -1.67091600 -1.35164600 -0.93286400\"\"\"\n\n xyz4 = \"\"\"C -0.86594600 0.19886100 2.37159000\nC 0.48486900 -0.16232000 1.75422500\nC 1.58322700 0.83707500 2.14923200\nC 0.88213600 -1.51753600 2.17861400\nN 1.17852900 -2.57013900 2.53313600\nN 0.51051200 -0.21074800 0.26080100\nN -0.51042000 0.21074000 -0.26079600\nC -0.48479200 0.16232300 -1.75422300\nC 0.86590400 -0.19926100 -2.37161200\nC -1.58344900 -0.83674100 -2.14921800\nC -0.88166600 1.51765700 -2.17859800\nN -1.17777100 2.57034900 -2.53309500\nH -1.16019200 1.20098300 2.05838400\nH -1.64220300 -0.50052400 2.05954500\nH -0.78054100 0.17214100 3.45935000\nH 1.70120000 0.85267300 3.23368300\nH 2.53492600 0.56708700 1.69019900\nH 1.29214500 1.83331400 1.80886700\nH 1.15987300 -1.20145600 -2.05838100\nH 0.78046800 -0.17257000 -3.45937100\nH 1.64236100 0.49992400 -2.05962300\nH -2.53504500 -0.56650600 -1.69011500\nH -1.70149200 -0.85224500 -3.23366300\nH -1.29263300 -1.83308300 -1.80892900\"\"\"\n\n xyz5 = \"\"\"O 0.90973400 -0.03064000 -0.09605500\nO 0.31656600 -0.00477100 -1.21127600\nO 2.17315400 -0.03069900 -0.09349100\"\"\"\n\n xyz6 = \"\"\"S 0.38431300 0.05370100 0.00000000\nN -1.13260000 0.07859900 0.00000000\nH 0.85151800 -1.28998600 0.00000000\"\"\"\n\n xyz7 = \"\"\"N 0.00000000 0.00000000 0.44654700\nN 0.00000000 0.00000000 -0.77510900\nH 0.86709400 0.00000000 1.02859700\nH -0.86709400 0.00000000 1.02859700\"\"\"\n\n xyz8 = \"\"\"N 0.00000000 0.00000000 0.65631400\nC 0.00000000 0.00000000 -0.50136500\nH 0.00000000 0.00000000 -1.57173600\"\"\"\n\n# xyz9 = \"\"\"S -0.00866000 -0.60254900 0.00000000\n# N -0.96878800 0.63275900 0.00000000\n# N 1.01229100 0.58298500 0.00000000\"\"\"\n#\n# xyz10 = \"\"\"O -0.79494500 -0.93969200 0.00000000\n# O -0.32753500 1.24003800 0.00000000\n# O 1.28811400 -0.24729000 0.00000000\n# N 0.14143500 0.11571500 0.00000000\n# H -1.65602000 -0.48026800 0.00000000\"\"\"\n#\n# xyz11 = \"\"\"O 1.64973000 -0.57433600 0.02610800\n# O 0.49836300 1.28744800 -0.18806200\n# N -0.57621600 -0.65116600 0.24595200\n# N -1.78357200 -0.10211200 -0.14953800\n# N 0.61460400 0.08152700 -0.00952700\n# H -0.42001200 -1.61494900 -0.03311600\n# H -1.72480300 0.33507600 -1.06884500\n# H -2.07362100 0.59363400 0.53038600\"\"\"\n\n xyz12 = \"\"\"O 1.10621000 0.00000000 -0.13455300\nO -1.10621000 0.00000000 -0.13455300\nN 0.00000000 0.00000000 0.33490500\"\"\"\n\n# xyz13 = \"\"\"O -0.37723000 -1.27051900 0.00000000\n# N -0.12115000 -0.04252600 0.00000000\n# N -0.95339100 0.91468300 0.00000000\n# C 1.31648000 0.33217600 0.00000000\n# H 1.76422500 -0.11051900 -0.89038300\n# H 1.76422500 -0.11051900 0.89038300\n# H 1.40045900 1.41618100 0.00000000\n# H -1.88127600 0.47189500 0.00000000\"\"\"\n\n xyz14 = \"\"\"S -0.12942800 0.11104800 0.22427200\nO 0.98591500 -1.00752300 -0.31179100\nO -1.43956200 -0.44459900 -0.15048900\nO 0.32982400 1.44755400 -0.21682700\nH 1.85512700 -0.56879900 -0.36563700\"\"\"\n\n xyz15 = \"\"\"N 1.11543700 0.11100500 0.00000000\nN -0.11982300 -0.03150800 0.00000000\nN -1.25716400 0.01530300 0.00000000\nH 1.57747800 -0.80026300 0.00000000\"\"\"\n\n xyz16 = \"\"\"O 1.21678000 -0.01490600 0.00000000\nN 0.04560300 0.35628400 0.00000000\nC -1.08941100 -0.23907800 0.00000000\nH -1.97763400 0.37807800 0.00000000\nH -1.14592100 -1.32640500 0.00000000\"\"\"\n\n xyz17 = \"\"\"S 0.00000000 0.00000000 0.18275300\nO -0.94981300 -0.83167500 -0.84628900\nO 0.94981300 0.83167500 -0.84628900\nO 0.80426500 -0.99804200 0.85548500\nO -0.80426500 0.99804200 0.85548500\nH -1.67833300 -0.25442300 -1.13658700\nH 1.67833300 0.25442300 -1.13658700\"\"\"\n\n xyz18 = \"\"\"S 0.00000000 0.00000000 0.12264300\nO 1.45413200 0.00000000 0.12264300\nO -0.72706600 1.25931500 0.12264300\nO -0.72706600 -1.25931500 0.12264300\"\"\"\n\n xyz19 = \"\"\"N 1.16672400 0.35870400 -0.00000400\nN -1.16670800 0.35879500 -0.00000400\nC -0.73775600 -0.89086600 -0.00000100\nC 0.73767000 -0.89093000 -0.00000100\nC 0.00005200 1.08477000 -0.00000500\nH -1.40657400 -1.74401100 0.00000000\nH 1.40645000 -1.74411900 0.00000000\nH 0.00009400 2.16788100 -0.00000700\"\"\"\n\n xyz20 = \"\"\"C 3.09980400 -0.16068000 0.00000600\nC 1.73521600 0.45534600 -0.00002200\nC 0.55924400 -0.24765400 -0.00000300\nC -0.73300200 0.32890400 -0.00001600\nC -1.93406200 -0.42115800 0.00001300\nC -3.19432700 0.11090700 0.00000900\nH 3.67991400 0.15199400 -0.87914100\nH 3.67984100 0.15191400 0.87923000\nH 3.04908000 -1.25419800 -0.00004300\nH 1.68713300 1.54476700 -0.00005100\nH -0.81003200 1.41627100 -0.00004600\nH -1.83479400 -1.50747300 0.00004100\nH 0.61489300 -1.33808300 0.00002500\nH -3.35410300 1.18597200 -0.00001700\nH -4.07566100 -0.52115800 0.00003300\"\"\"\n\n mol1 = converter.molecules_from_xyz(converter.str_to_xyz(xyz1))[1]\n mol2 = converter.molecules_from_xyz(converter.str_to_xyz(xyz2))[1]\n mol3 = converter.molecules_from_xyz(converter.str_to_xyz(xyz3))[1]\n mol4 = converter.molecules_from_xyz(converter.str_to_xyz(xyz4))[1]\n mol5 = converter.molecules_from_xyz(converter.str_to_xyz(xyz5))[1]\n mol6 = converter.molecules_from_xyz(converter.str_to_xyz(xyz6), multiplicity=1)[1]\n mol7 = converter.molecules_from_xyz(converter.str_to_xyz(xyz7), multiplicity=1)[1]\n mol8 = converter.molecules_from_xyz(converter.str_to_xyz(xyz8))[1]\n # mol9 = converter.molecules_from_xyz(converter.str_to_xyz(xyz9), multiplicity=1)[1]\n # mol10 = converter.molecules_from_xyz(converter.str_to_xyz(xyz10))[1]\n # mol11 = converter.molecules_from_xyz(converter.str_to_xyz(xyz11))[1]\n mol12 = converter.molecules_from_xyz(converter.str_to_xyz(xyz12))[1]\n # mol13 = converter.molecules_from_xyz(converter.str_to_xyz(xyz13))[1]\n mol14 = converter.molecules_from_xyz(converter.str_to_xyz(xyz14))[1]\n mol15 = converter.molecules_from_xyz(converter.str_to_xyz(xyz15))[1]\n mol16 = converter.molecules_from_xyz(converter.str_to_xyz(xyz16))[1]\n mol17 = converter.molecules_from_xyz(converter.str_to_xyz(xyz17))[1]\n mol18 = converter.molecules_from_xyz(converter.str_to_xyz(xyz18))[1]\n mol19 = converter.molecules_from_xyz(converter.str_to_xyz(xyz19))[1]\n mol20 = converter.molecules_from_xyz(converter.str_to_xyz(xyz20))[1]\n\n self.assertEqual(mol1.to_smiles(), '[NH-][S+](=O)(O)C')\n self.assertIn(mol2.to_smiles(), ['COC1=C(CO)C=C([C](C)C)C=C1', 'COC1C=CC(=CC=1CO)[C](C)C'])\n self.assertEqual(mol3.to_smiles(), '[N]=C=C(C)C')\n self.assertEqual(mol4.to_smiles(), 'N#CC(N=NC(C#N)(C)C)(C)C')\n self.assertEqual(mol5.to_smiles(), '[O-][O+]=O')\n self.assertEqual(mol6.to_smiles(), 'N#S')\n self.assertEqual(mol7.to_smiles(), '[N-]=[NH2+]')\n self.assertEqual(mol8.to_smiles(), 'C#N')\n # self.assertEqual(mol9.to_smiles(), '[N-]=[S+]#N') # gives [N]S#N, multiplicity 3\n # self.assertEqual(mol10.to_smiles(), '[N+](=O)(O)[O-]') # gives None\n # self.assertEqual(mol11.to_smiles(), 'N(N)[N+](=O)[O-]') # gives None\n self.assertEqual(mol12.to_smiles(), '[O]N=O')\n # self.assertEqual(mol13.to_smiles(), 'C[N+]([NH-])=O') # gives None\n self.assertEqual(mol14.to_smiles(), '[O]S(=O)O')\n self.assertEqual(mol15.to_smiles(), '[N-]=[N+]=N')\n self.assertEqual(mol16.to_smiles(), '[O]N=C')\n self.assertEqual(mol17.to_smiles(), '[O-][S+](=O)(O)O')\n self.assertEqual(mol18.to_smiles(), 'O=S(=O)=O')\n self.assertEqual(mol19.to_adjacency_list(), \"\"\"multiplicity 2\n1 N u1 p1 c0 {4,S} {5,S}\n2 N u0 p1 c0 {3,S} {5,D}\n3 C u0 p0 c0 {2,S} {4,D} {6,S}\n4 C u0 p0 c0 {1,S} {3,D} {7,S}\n5 C u0 p0 c0 {1,S} {2,D} {8,S}\n6 H u0 p0 c0 {3,S}\n7 H u0 p0 c0 {4,S}\n8 H u0 p0 c0 {5,S}\n\"\"\") # cannot read SMILES 'c1ncc[n]1' (but can generate them)\n self.assertEqual(mol20.to_smiles(), 'C=C[CH]C=CC')", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def CalcNesz(numRxScanAngles, altitude, chirpBandwidth, ias, srs, txPat, rxPat, wl, txPeakPow, pl, prf):\n neszPeaks = {}\n NESZ_Arr = []\n for i in range(numRxScanAngles):\n nesz_arr = NESZ_BRET(R = srs[i],\n V = SEM.PlatformVelocity(altitude),\n Ia = ias[i],\n Lsa = 0,\n T = Constants.STANDARD_TEMPERATURE,\n Brg = chirpBandwidth,\n F = 4.3,\n L = 4.1,\n Ptx = txPeakPow,\n Gtx = txPat[i],\n Grx = rxPat[i],\n wavelength = wl,\n Pl = pl,\n Prf = prf)\n NESZ_Arr.append(nesz_arr)\n\n # keep track of the NESZ peak values\n for idx, ia in enumerate(ias[i]):\n incidenceAngle = int(ia*1000)\n if incidenceAngle in neszPeaks:\n if neszPeaks[incidenceAngle] > nesz_arr[idx]:\n neszPeaks[incidenceAngle] = nesz_arr[idx]\n else:\n neszPeaks[incidenceAngle] = nesz_arr[idx]\n\n # convert to numpy array\n NESZ_Arr = np.asarray(NESZ_Arr)\n # get the power values\n Nesz_dB_Arr = 10*np.log10(NESZ_Arr)\n\n # Get the maximum NESZ values and the angles where they occur\n Nesz_max_incidence_angles = np.fromiter(neszPeaks.keys(), dtype=float)\n Nesz_max_values = np.fromiter(neszPeaks.values(), dtype=float)\n \n return NESZ_Arr, Nesz_max_incidence_angles, Nesz_max_values", "def GetZScrInfoForAllScaffolds(PosScfBC_d):\n total_pos_and_nIns_l = []\n\n # We initially run through the scaffolds to get SD, mean\n for scf in PosScfBC_d[\"scaffolds\"].keys():\n scf_info = PosScfBC_d[\"scaffolds\"][scf]\n pos_and_nIns_l = [[int(x), scf_info[\"positions\"][x][\"nIns\"]] for x in \\\n scf_info[\"positions\"].keys()]\n total_pos_and_nIns_l += pos_and_nIns_l\n\n\n just_insertions_l = [x[1] for x in total_pos_and_nIns_l]\n mean = float(sum(just_insertions_l))/float(len(just_insertions_l))\n\n SD = GetStandardDeviation(just_insertions_l, mean)\n\n\n Scf_Pos_ZScr_vals = {\"scaffolds\": {}}\n total_max_z = 0\n # Now we run through the scaffolds again to get relation of values to total\n # SD and mean and store them in output dict\n for scf in PosScfBC_d[\"scaffolds\"].keys():\n scf_info = PosScfBC_d[\"scaffolds\"][scf]\n pos_and_nIns_l = [[int(x), scf_info[\"positions\"][x][\"nIns\"]] for x in \\\n scf_info[\"positions\"].keys()]\n scf_max_z, pos_to_Zscr_l = GetZScrValuesForPoints(pos_and_nIns_l, mean, SD)\n if scf_max_z > total_max_z:\n total_max_z = scf_max_z\n\n Scf_Pos_ZScr_vals[\"scaffolds\"][scf] = {\n \"scaffold_length\": scf_info[\"scaffold_length\"],\n \"max_z\": scf_max_z,\n \"pos_to_Zscr_l\": pos_to_Zscr_l\n }\n\n\n\n Scf_Pos_ZScr_vals[\"mean\"] = mean\n Scf_Pos_ZScr_vals[\"SD\"] = SD\n Scf_Pos_ZScr_vals[\"max_z\"] = total_max_z\n Scf_Pos_ZScr_vals[\"analysis_type\"] = \"AllGenomeStats\"\n\n return Scf_Pos_ZScr_vals", "def z_score_transformation(data, numeric_list):\n\n transformed_data = data[numeric_list].apply(stats.zscore())\n\n return transformed_data", "def L_radec():\n\treturn sdssimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=img_width, img_height=img_height)", "def main(parameters):\n metadata = get_metadata(parameters)\n # pprint(metadata)\n image_api = NswSatelliteImages(parameters, metadata)\n print('Zoom level:', image_api.zoom_level,\n 'Resolution:', image_api.resolution,\n 'Scale:', image_api.scale)\n image_api.download_tile(xtile=39000, ytile=60000)", "def score_scene(sr, hr, clearhr, norm, num_crop=6):\n zSR = []\n max_x, max_y = np.array(hr.shape) - num_crop\n sr_ = sr[num_crop//2:-num_crop//2, num_crop//2:-num_crop//2]\n \n np.place(clearhr, clearhr==0, np.nan)\n \n zSR = np.zeros((num_crop + 1, num_crop + 1), np.float64)\n for x_off in prange(0, num_crop+1):\n for y_off in prange(0, num_crop+1):\n \n clearHR_ = clearhr[x_off : x_off + max_x, y_off : y_off + max_y]\n\n hr_ = hr[x_off:x_off + max_x, y_off:y_off + max_y]\n\n diff = (hr_- sr_)* clearHR_\n\n b = np.nanmean(diff)\n\n\n ## compute cMSE\n cMSE = np.nanmean( (diff-b)**2) \n\n cPSNR = -10.0*np.log10(cMSE)\n \n zSR[x_off, y_off] = norm/cPSNR\n\n return zSR.min()", "def compress_color_data(self):\n avg_rgb_vals_dict = {} # dictionary of average color coordinates\n for label in self.labels_list:\n try:\n avg_rgb = np.mean(\n np.mean(np.mean(self.jzazbz_dict[label], axis=0), axis=0), axis=0\n )\n avg_rgb_vals_dict[label] = avg_rgb\n except Exception as exc:\n self.log.error(exc)\n self.log.error(label + \" failed\")\n self.avg_rgb_vals_dict = avg_rgb_vals_dict\n\n jzazbz_dict_simp = {}\n for label in self.labels_list:\n avg_jzazbz = np.mean(self.jzazbz_dist_dict[label], axis=0)\n jzazbz_dict_simp[label] = avg_jzazbz\n self.jzazbz_dict_simp = jzazbz_dict_simp", "def compile_global_stats(results_dir='./../data/*/*cr_sizes*hdf5'):\n\n flist = glob.glob(results_dir)\n output = defaultdict(list)\n flist = [f for f in flist if 'nicmos' not in f]\n print(flist)\n flist.append('./../data/STIS/stis_cr_sizes.hdf5')\n results = [dask.delayed(tally_stats)(f) for f in flist]\n results = list(dask.compute(*results, scheduler='processes'))\n\n for instr, data in results:\n output[instr].append(data)\n\n for key in output.keys():\n cr_count = 0\n img_count = 0\n total_exptime = 0\n for val in output[key]:\n cr_count += val.cr_count\n img_count += val.img_count\n total_exptime += val.total_exptime\n output[key] = [cr_count, img_count, total_exptime]\n\n df = pd.DataFrame(output, index=['cr_count', 'img_count', 'total_exptime'])\n print(df)\n print('Total CR count: {}'.format(df.loc['cr_count', :].sum()))\n print('Total number of images analyzed: {}'.format(df.loc['img_count', :].sum()))\n print('Cumulative exposure time: {}'.format(df.loc['total_exptime', :].sum()))", "def zscore(vals):", "def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist", "def main(S, N):\n\n z_binary, z_density = point_count(N, S)\n\n extent = [-2, 2, -2, 2]\n plt.imshow(z_binary, extent=extent, cmap='Greys')\n plt.colorbar()\n plt.show()\n plt.close('all')\n\n log_zd = np.log10(z_density)\n plt.imshow(z_density, extent=extent, cmap='jet')\n plt.colorbar()\n plt.show()\n plt.close('all')\n\n log_zd = np.log10(z_density)\n plt.imshow(log_zd, extent=extent, cmap='jet')\n plt.colorbar()\n plt.show()\n plt.close('all')", "def main(ancillary_ws, zero_elev_nodata_flag=False, overwrite_flag=False):\n logging.info('\\nProcess DAYMET ancillary rasters')\n\n # Site URL\n # ancillary_url = 'http://daymet.ornl.gov/files/ancillary_files.tgz'\n\n # Build output workspace if it doesn't exist\n if not os.path.isdir(ancillary_ws):\n os.makedirs(ancillary_ws)\n\n # Input paths\n # ancillary_targz = os.path.join(\n # ancillary_ws, os.path.basename(ancillary_url))\n # dem_nc = os.path.join(ancillary_ws, 'dem_data.nc')\n # mask_nc = os.path.join(ancillary_ws, 'mask_data.nc')\n\n # Output paths\n dem_raster = os.path.join(ancillary_ws, 'daymet_elev.img')\n lat_raster = os.path.join(ancillary_ws, 'daymet_lat.img')\n lon_raster = os.path.join(ancillary_ws, 'daymet_lon.img')\n # mask_raster = os.path.join(ancillary_ws, 'daymet_mask.img')\n\n # Spatial reference parameters\n daymet_proj4 = (\n \"+proj=lcc +datum=WGS84 +lat_1=25 n \"\n \"+lat_2=60n +lat_0=42.5n +lon_0=100w\")\n daymet_osr = drigo.proj4_osr(daymet_proj4)\n daymet_osr.MorphToESRI()\n daymet_proj = daymet_osr.ExportToWkt()\n daymet_cs = 1000\n # daymet_nodata = -9999\n\n # For now, hardcode the DAYMET extent/geo\n snap_xmin, snap_ymin = -4560750, -3090500\n daymet_rows, daymet_cols = 8075, 7814\n # snap_xmin, snap_ymin = -4659000, -3135000\n # daymet_rows, daymet_cols = 8220, 8011\n # daymet_geo = (\n # snap_xmin, daymet_cs, 0.,\n # snap_ymin + daymet_cs * daymet_rows, 0., -daymet_cs)\n daymet_extent = drigo.Extent([\n snap_xmin, snap_ymin,\n snap_xmin + daymet_cs * daymet_cols,\n snap_ymin + daymet_cs * daymet_rows])\n daymet_geo = daymet_extent.geo(daymet_cs)\n logging.debug(\" Extent: {}\".format(daymet_extent))\n logging.debug(\" Geo: {}\".format(daymet_geo))\n # logging.debug(\" Cellsize: {}\".format(daymet_cs))\n # logging.debug(\" Shape: {}\".format(daymet_extent.shape(daymet_cs)))\n\n # # Download the ancillary raster tar.gz\n # if overwrite_flag or not os.path.isfile(ancillary_targz):\n # logging.info('\\nDownloading ancillary tarball files')\n # logging.info(\" {}\".format(os.path.basename(ancillary_url)))\n # logging.debug(\" {}\".format(ancillary_url))\n # logging.debug(\" {}\".format(ancillary_targz))\n # url_download(ancillary_url, ancillary_targz)\n # try:\n # urllib.urlretrieve(ancillary_url, ancillary_targz)\n # except:\n # logging.error(\" ERROR: {}\\n FILE: {}\".format(\n # sys.exc_info()[0], ancillary_targz))\n # os.remove(ancillary_targz)\n\n # # Extract the ancillary rasters\n # ancillary_list = [dem_nc]\n # # ancillary_list = [dem_nc, mask_nc]\n # if (os.path.isfile(ancillary_targz) and\n # (overwrite_flag or\n # not all([os.path.isfile(os.path.join(ancillary_ws, x))\n # for x in ancillary_list]))):\n # logging.info('\\nExtracting ancillary rasters')\n # logging.debug(\" {}\".format(ancillary_targz))\n # tar = tarfile.open(ancillary_targz)\n # for member in tar.getmembers():\n # print member.name\n # member.name = os.path.basename(member.name)\n # # Strip off leading numbers from ancillary raster name\n # member.name = member.name.split('_', 1)[1]\n # member_path = os.path.join(ancillary_ws, member.name)\n # if not member.name.endswith('.nc'):\n # continue\n # elif member_path not in ancillary_list:\n # continue\n # elif os.path.isfile(member_path):\n # continue\n # logging.debug(\" {}\".format(member.name))\n # tar.extract(member, ancillary_ws)\n # tar.close()\n\n # # Mask\n # if ((overwrite_flag or\n # not os.path.isfile(mask_raster)) and\n # os.path.isfile(mask_nc)):\n # logging.info('\\nExtracting mask raster')\n # mask_nc_f = netCDF4.Dataset(mask_nc, 'r')\n # logging.debug(mask_nc_f)\n # # logging.debug(mask_nc_f.variables['image'])\n # mask_array = mask_nc_f.variables['image'][:]\n # mask_array[mask_array == daymet_nodata] = 255\n # drigo.array_to_raster(\n # mask_array, mask_raster,\n # output_geo=daymet_geo, output_proj=daymet_proj,\n # output_nodata=255)\n # mask_nc_f.close()\n\n # # DEM\n # if ((overwrite_flag or not os.path.isfile(dem_raster)) and\n # os.path.isfile(dem_nc)):\n # logging.info('\\nExtracting DEM raster')\n # dem_nc_f = netCDF4.Dataset(dem_nc, 'r')\n # logging.debug(dem_nc_f)\n # # logging.debug(dem_nc_f.variables['image'])\n # dem_array = dem_nc_f.variables['image'][:]\n # # Rounding issues of the nodata value when converting to float32\n # dem_array[dem_array == daymet_nodata] -= 1\n # dem_array = dem_array.astype(np.float32)\n # if zero_elev_nodata_flag:\n # dem_array[dem_array <= daymet_nodata] = 0\n # else:\n # dem_array[dem_array <= daymet_nodata] = np.nan\n # drigo.array_to_raster(\n # dem_array, dem_raster,\n # output_geo=daymet_geo, output_proj=daymet_proj)\n # dem_nc_f.close()\n\n # Latitude/Longitude\n if (os.path.isfile(dem_raster) and\n (overwrite_flag or\n not os.path.isfile(lat_raster) or\n not os.path.isfile(lon_raster))):\n logging.info('\\nDAYMET Latitude/Longitude')\n logging.debug(' {}'.format(lat_raster))\n lat_array, lon_array = drigo.raster_lat_lon_func(\n dem_raster, gcs_cs=0.05)\n drigo.array_to_raster(\n lat_array.astype(np.float32), lat_raster,\n output_geo=daymet_geo, output_proj=daymet_proj)\n logging.debug(' {}'.format(lon_raster))\n drigo.array_to_raster(\n lon_array.astype(np.float32), lon_raster,\n output_geo=daymet_geo, output_proj=daymet_proj)\n del lat_array, lon_array\n\n logging.debug('\\nScript Complete')", "def plot_zcalib(args):\n\n start_date = args.start_date\n end_date = args.end_date\n\n start_date_dt = dp.parse(start_date) \n end_date_dt = dp.parse(end_date) \n \n min_date = dp.parse(SETTINGS.MIN_START_DATE)\n max_date = dp.parse(SETTINGS.MAX_END_DATE)\n \n if start_date_dt < min_date or end_date_dt > max_date:\n raise ValueError(f'Date must be in range {SETTINGS.MIN_START_DATE} - {SETTINGS.MAX_END_DATE}')\n\n phi_dir = os.path.join(SETTINGS.PHI_DIR)\n img_dir = os.path.join(SETTINGS.Z_CALIB_DIR,'images/')\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n filelist1 = glob.glob(phi_dir+\"phiest*\")\n filelist2 = glob.glob(phi_dir+\"phiobs*\")\n filelist1.sort()\n filelist2.sort()\n dates=[]\n ind=[]\n\n if len(filelist1) != len(filelist2):\n raise ValueError(\"Number of phiest and phiobs files does not match!\")\n\n #Loop through files to find the indices of those between the inputted start and end dates\n for f in range(0,len(filelist1)):\n match = re.search(r'\\d{8}',filelist1[f])\n file=match.group()\n file_dt=dp.parse(file)\n if file_dt >= start_date_dt and file_dt <= end_date_dt:\n ind.append(f)\n dates.append(file)\n\n ndays=len(ind)\n print(ndays)\n\n #If the number of elevation angles in the volumes changes over time, then the total number of rays also varies\n #This loop finds the maximum number of rays\n for f in range(0,ndays):\n file=np.load(filelist1[ind[f]])\n if f==0:\n [_,a]=file.shape\n nrays=a\n else:\n [_,a2]=file.shape\n if a2>a:\n nrays=a2\n\n #Number of volumes can vary each day \n nvols=250\n phiest=np.zeros((ndays,nvols,nrays))*np.nan\n phiobs=np.zeros((ndays,nvols,nrays))*np.nan\n good_rays=np.zeros((ndays,nvols))*np.nan\n x=np.zeros((nvols,nrays))*np.nan\n \n #Load each phiest and phiobs data for each day and store into 3D array. \n #Calculate number of good rays for each day/volume\n d=0\n for f in range(0,ndays):\n phiest1 = np.load(filelist1[ind[f]])\n [a,b] = phiest1.shape\n phiest[d,0:a,0:b] = phiest1 \n phiobs1 = np.load(filelist2[ind[f]])\n [a,b] = phiobs1.shape\n phiobs[d,0:a,0:b] = phiobs1\n d=d+1\n \n #Calculate number of good rays in each volume. good_rays(ndays,nvols)\n for j in range(ndays): \n for i in range(nvols):\n good_rays[j,i] = np.nansum(np.isfinite(phiest[j,i,:]))\n\n #bias_each_ray (ndays,nvols,nrays)\n #Calculate a bias/offset for each individual ray\n bias_each_ray = (phiest - phiobs) / phiobs\n \n #Only use volumes with more than 10 good rays for calculation of overall bias.\n ind = good_rays>10\n \n #SINGLE VALUES FOR WHOLE TIME PERIOD\n mean_bias = np.nanmean(bias_each_ray[ind,:])\n mean_bias_db = 10.0*np.log10(1000+mean_bias*1000)-30\n \n median_bias = np.nanmedian(bias_each_ray[ind,:])\n median_bias_db = 10.0*np.log10(1000.0+median_bias*1000.0)-30.0\n \n std = np.nanstd(bias_each_ray[ind,:])\n std_db = 10.0*np.log10(1000.0+std*1000.0)-30.0\n \n #print 'Mean bias = ', mean_bias_db, 'Median bias = ', median_bias_db, 'Standard Deviation = ', std_db\n \n #DAILY VALUES OF BIAS\n mean_bias_each_day=np.zeros(ndays)*np.nan\n median_bias_each_day=np.zeros(ndays)*np.nan\n std_each_day=np.zeros(ndays)*np.nan\n std_error_each_day = np.zeros(ndays)*np.nan\n num_rays_day=np.zeros(ndays)\n \n for day in range(ndays):\n #good_rays has shape (days,vols)\n #find index for volumes with more than 10 good rays\n ind = good_rays[day,:]>10\n #find all rays on each day within these volumes\n bias_one_day = bias_each_ray[day,ind,:].flatten()\n ind2 = np.isfinite(bias_one_day) \n if np.sum(ind2)>0:\n std_error_each_day[day] = scipy.stats.sem(bias_one_day[ind2])\n mean_bias_each_day[day] = np.nanmean(bias_one_day)\n median_bias_each_day[day] = np.nanmedian(bias_one_day)\n std_each_day[day] = np.nanstd(bias_one_day)\n \n #Number of rays for each day\n num_rays_day[day] = np.sum(np.isfinite(bias_one_day))\n\n #Convert to dB \n mean_bias_each_day_db = 10.0*np.log10(1000.0+mean_bias_each_day*1000.0)-30.0\n median_bias_each_day_db = 10.0*np.log10(1000.0+median_bias_each_day*1000.0)-30.0\n std_each_day_db = 10.0*np.log10(1000.0+std_each_day*1000.0)-30.0\n std_error_each_day_db = 10.0*np.log10(1000.0+std_error_each_day*1000.0)-30.0\n \n #Put data into dataframe\n time = pd.to_datetime(dates, format = '%Y%m%d')\n data = pd.DataFrame({'Mean Bias' : mean_bias_each_day_db, 'Median Bias' : median_bias_each_day_db, \n 'Standard Error' : std_error_each_day_db, 'Standard Deviation' : std_each_day_db}, \n index=time) \n \n #Make plot \n fig, ax1 = plt.subplots(figsize=(15,8)) \n plt.errorbar(data.index, mean_bias_each_day_db, std_error_each_day_db, \n color='black',fmt='o',markersize='4', elinewidth=2,capsize=4)\n plt.plot([start_date_dt, end_date_dt],[median_bias_db,median_bias_db],'r-',\n label=\"Median Bias = %s\" % round(median_bias_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db,mean_bias_db],'g', \n label=\"Mean Bias = %s\" % round(mean_bias_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db+std_db*2,mean_bias_db+std_db*2],'g--',\n label=\"Standard Deviation = %s\" % round(std_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db-std_db*2,mean_bias_db-std_db*2],'g--')\n \n plt.plot(data.index, median_bias_each_day_db,'rx')\n \n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%y'))\n plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))\n plt.gca().xaxis.set_minor_locator(mdates.WeekdayLocator(interval=1))\n plt.xlim(start_date_dt,end_date_dt)\n\n plt.xticks(rotation=90)\n plt.xlabel('Time',{'fontsize':18})\n plt.ylabel('Z Bias (dBZ)',{'fontsize':18})\n plt.yticks(size=18)\n plt.xticks(size=18)\n plt.grid()\n plt.legend(loc=0,fontsize=18)\n \n #If you want to overlay number of rays for each data point then uncomment these lines.\n #May need some tweaking to get the yaxis scale correct for the data you are plotting. \n# ax2=ax1.twinx()\n# ax2.set_ylim(0,20000)\n# ax2.plot(data.index, num_rays_day,'bx-')\n# ax2.set_yticks([5000, 10000])\n# ax2.set_yticks([1000, 2000, 3000, 4000, 7500],minor=True)\n# plt.ylabel('Total number of Rays',{'fontsize':18})\n# plt.yticks(size=18)\n# plt.xlim(start_date_dt,end_date_dt)\n\n #Save the plot\n imgname = f'{img_dir}/Z_calibration_{start_date}_{end_date}.png'\n plt.tight_layout()\n plt.savefig(imgname,dpi=150)", "def redshift_draws(self, s_grid, num=1000):\n n_obj = len(self)\n z_draws = np.zeros((n_obj, num))\n # i_range = np.random.rand_int(0, n_obj, len(se))\n\n for i in tqdm(range(n_obj)):\n cdf_z = self['cdf_z'][i]\n _, z_draws[i, :] = self.rvs_from_cdf(s_grid, cdf_z, num=num)\n\n return z_draws", "def makemap(d,x,y,ra0=0,dec0=0, cd=1./60., nxpix=600, nypix=600):\n\n xy = np.zeros((x.size,2))\n xy[:,0] = x.flatten()\n xy[:,1] = y.flatten()\n\n from astropy import wcs\n\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [ra0, dec0]\n w.wcs.cdelt = [cd,cd]\n w.wcs.crpix = [nxpix/2., nypix/2.]\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n\n pixels = w.wcs_world2pix(xy,0)\n ygrid, xgrid = np.meshgrid(np.arange(nypix),np.arange(nxpix))\n\n pixCens = w.wcs_pix2world(np.array([xgrid.flatten(), ygrid.flatten()]).T,0)\n pixCens[:,0] += 1./2.*cd\n pixCens[:,1] += 1./2.*cd\n pflat = (pixels[:,1].astype(int) + (nypix)*pixels[:,0].astype(int)).astype(int)\n\n\n pEdges = np.arange(nxpix*nypix+1)\n m = np.histogram(pflat,pEdges, weights=d)[0]\n h = np.histogram(pflat,pEdges)[0]\n m = m/h\n return m,pixCens,w", "def _gdal_preprocessing(self, nodatavalue: float = 1000000.0, z_positive_up: bool = True,\n layer_names: tuple = ('depth', 'vertical_uncertainty')):\n\n if self.is_vr:\n raise NotImplementedError(\"VR surfacing doesn't currently return gridded data arrays yet, have to figure this out\")\n\n layerdata = []\n geo_transform = []\n finalnames = []\n for cnt, layer in enumerate(layer_names):\n nodex, nodey, nodez, valid, newmins, newmaxs = self.return_surf_xyz(layer)\n if cnt == 0:\n cellx = nodex[0] - self.min_grid_size / 2 # origin of the grid is the cell, not the node\n celly = nodey[-1] + self.min_grid_size / 2\n geo_transform = [np.float32(cellx), self.min_grid_size, 0, np.float32(celly), 0, -self.min_grid_size]\n if z_positive_up:\n if layer.lower() == 'depth':\n nodez = nodez * -1 # geotiff depth should be positive up, make all depths negative\n layer = 'Elevation'\n nodez = nodez[:, ::-1]\n nodez[np.isnan(nodez)] = nodatavalue\n layerdata.append(nodez)\n finalnames.append(layer)\n return layerdata, geo_transform, layer_names", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def comparison():\n path = \"Data/data_fronts/\"\n path1 = \"Results/labelled_images1010/fronts/\"\n\n #computes the areas for the first frame in order to normalize the other areas\n pol0 = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.0.png.txt\",sep =' '))\n #makes an object polygon in order to compute the area\n pol0 = np.array(pol0)\n pol0 = Polygon(pol0)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n pol = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.\"+str(i)+\".png.txt\",sep =' '))\n pol = np.array(pol)\n pol = Polygon(pol)\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def compute_at_zref(itile, reso_deg, mode, date, block_choice, tile_dict=None):\n if tile_dict is not None:\n tile = tile_dict\n else:\n tile = stats.date_mode_filter(mode, date, itile)\n CT, SA, RI, BVF2 = tile['CT'], tile['SA'], tile['RHO'], tile['BVF2']\n nanidx = np.where(np.isnan(CT) | np.isnan(SA))\n lat, lon = tile['LATITUDE'], tile['LONGITUDE']\n grid_lat, grid_lon = stats.grid_coordinate(itile, reso_deg)\n lon_deg, lat_deg = np.meshgrid(grid_lon, grid_lat)\n\n lon_rad = np.deg2rad(lon_deg)\n lat_rad = np.deg2rad(lat_deg)\n reso_rad = np.deg2rad(reso_deg)\n\n nlat, nlon = np.shape(lon_deg)\n\n # RI is rho in situ\n\n nz = len(zref)\n nbprof = len(CT)\n\n variables = {}\n\n for b in block_choice:\n # gridded arrays of CT, SA et RI means\n for i, v in enumerate(var_choice['zmean']):\n variables[v] = np.zeros((nz, nlat, nlon))\n\n for k in range(nbprof):\n # print('%4i/%i' % (k, nbprof))\n # todo: weigh in time using juld,\n # e.g. only winter statistics\n time_weight = 1.\n xlon_rad = np.deg2rad(lon[k])\n xlat_rad = np.deg2rad(lat[k])\n weight = general.compute_weight(lon_rad, lat_rad,\n xlon_rad, xlat_rad,\n reso_rad)\n weight *= time_weight\n for l in range(nz):\n if np.isnan(CT[k, l]) or np.isnan(SA[k, l]):\n pass\n else:\n variables['NBbar'][l, :, :] += weight\n variables['CTbar'][l, :, :] += weight*CT[k, l]\n variables['SAbar'][l, :, :] += weight*SA[k, l]\n variables['Ribar'][l, :, :] += weight*RI[k, l]\n variables['BVF2bar'][l, :, :] += weight*BVF2[k, l]\n\n # normalize with the number of profiles (fractional\n # because NBbar is fractionnal)\n coef = 1./variables['NBbar']\n coef[variables['NBbar'] < 1] = np.NaN\n\n variables['CTbar'] *= coef\n variables['SAbar'] *= coef\n variables['Ribar'] *= coef\n variables['BVF2bar'] *= coef\n\n if b == 'zstd' or b == 'zdz':\n xlon_rad = np.deg2rad(lon)\n xlat_rad = np.deg2rad(lat)\n for i, v in enumerate(var_choice[b]):\n variables[v] = np.zeros((nz, nlat, nlon))\n variables['NBstd'] = variables['NBbar']\n\n if len(lat) == 0:\n pass\n else:\n for j in range(nlat):\n for i in range(nlon):\n if len(lat) < j+1:\n pass\n else:\n time_weight = 1.\n weight = general.compute_weight(lon_rad[j, i],\n lat_rad[j, i],\n xlon_rad, xlat_rad,\n reso_rad)\n weight *= time_weight\n drho = RI - variables['Ribar'][:, j, i]\n dbvf2 = BVF2 - variables['BVF2bar'][:, j, i]\n dCT = CT - variables['CTbar'][:, j, i]\n interpolator = ip.interp1d(\n variables['Ribar'][:, j, i],\n zref, bounds_error=False)\n p = gsw.p_from_z(-zref, lat[j])\n g = gsw.grav(lat[j], p)\n cs = gsw.sound_speed(\n variables['SAbar'][:, j, i],\n variables['CTbar'][:, j, i], p)\n rho0 = variables['Ribar'][:, j, i].copy()\n zrho = interpolator(RI)\n dzstar = zrho-zref\n dz = dzstar/(1.+rho0*g*dzstar/(cs**2*drho))\n dSA = SA - variables['SAbar'][:, j, i]\n\n weight = weight[:, np.newaxis] + \\\n np.zeros_like(zref)\n weight[np.where(np.isnan(dz) | np.isnan(\n drho) | np.isnan(dCT) | np.isnan(dSA))] = 0.\n weight[nanidx] = 0.\n\n def average(field):\n return np.nansum(weight*field, axis=0)\n if b == 'zstd':\n variables['CTstd'][:, j, i] = average(dCT**2)\n variables['SAstd'][:, j, i] = average(dSA**2)\n variables['BVF2std'][:, j,\n i] = average(dbvf2**2)\n variables['Ristd'][:, j, i] = average(drho**2)\n\n if b == 'zdz':\n\n variables['DZmean'][:, j, i] = average(dz)\n variables['DZstd'][:, j, i] = average(dz**2)\n variables['DZskew'][:, j, i] = average(dz**3)\n variables['EAPE'][:, j, i] = average(dz*drho)\n\n if b in ['zstd', 'zdz']:\n coef = 1./(variables['NBstd']-1)\n coef[variables['NBstd'] < 2] = np.nan\n\n if b == 'zstd':\n variables['CTstd'] = np.sqrt(coef*variables['CTstd'])\n variables['SAstd'] = np.sqrt(coef*variables['SAstd'])\n variables['Ristd'] = np.sqrt(coef*variables['Ristd'])\n variables['BVF2std'] = np.sqrt(coef*variables['BVF2std'])\n\n elif b == 'zdz':\n variables['DZmean'] *= coef\n variables['DZstd'] = np.sqrt(coef*variables['DZstd'])\n variables['DZskew'] *= coef/variables['DZstd']**3\n variables['EAPE'] *= 0.5*coef\n\n variables['lat'] = lat_deg\n variables['lon'] = lon_deg\n print(variables['CTstd'].min())\n print(variables['CTstd'].max())\n print(variables['SAstd'].min())\n print(variables['SAstd'].max())\n\n return variables", "def test_compare_zmats(self):\n z_1 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2|3_0|1', 'A_2|3_0|1_1|0', None),\n ('R_2|3_0|1', 'A_2|3_0|1_1|0', 'D_3_1_0_2')),\n 'vars': {'R_1_0': 1.2451214479859707, 'D_3_1_0_2': 180.00000435340846, 'R_2|3_0|1': 1.0308198031527174,\n 'A_2|3_0|1_1|0': 112.42663889936155}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n z_2 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2|3_0|1', 'A_2|3_0|1_1|0', None),\n ('R_2|3_0|1', 'A_2|3_0|1_1|0', 'D_3_1_0_2')),\n 'vars': {'R_1_0': 1.2458481980184417, 'D_3_1_0_2': 359.99999758516344, 'R_2|3_0|1': 1.0292894916884854,\n 'A_2|3_0|1_1|0': 115.61126115172507}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n z_3 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2|3_0|1', 'A_2|3_0|1_1|0', None),\n ('R_2|3_0|1', 'A_2|3_0|1_1|0', 'D_3_1_0_2')),\n 'vars': {'R_1_0': 1.24584819, 'D_3_1_0_2': 360, 'R_2|3_0|1': 1.0292894916884854,\n 'A_2|3_0|1_1|0': 115.61126115172507}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n self.assertFalse(converter.compare_zmats(z_1, z_2))\n self.assertTrue(converter.compare_zmats(z_2, z_2))\n self.assertTrue(converter.compare_zmats(z_2, z_3))", "def process_image(overviews, db_graph, input_filename, color, out_raster_srs):\n if verbose > 0:\n print(\"~~~process_image\")\n input_image = gdal.Open(input_filename)\n stem = Path(input_filename).stem\n if not(\"dataSet\" in overviews):\n overviews['dataSet'] = {}\n overviews['dataSet']['boundingBox'] = {}\n overviews['dataSet']['limits'] = {}\n\n tile_limits = get_tile_limits(input_filename)\n\n if not(\"LowerCorner\" in overviews['dataSet']['boundingBox']):\n overviews['dataSet']['boundingBox'] = tile_limits\n else:\n if tile_limits['LowerCorner'][0] < overviews['dataSet']['boundingBox']['LowerCorner'][0]:\n overviews['dataSet']['boundingBox']['LowerCorner'][0] = tile_limits['LowerCorner'][0]\n if tile_limits['LowerCorner'][1] < overviews['dataSet']['boundingBox']['LowerCorner'][1]:\n overviews['dataSet']['boundingBox']['LowerCorner'][1] = tile_limits['LowerCorner'][1]\n if tile_limits['UpperCorner'][0] > overviews['dataSet']['boundingBox']['UpperCorner'][0]:\n overviews['dataSet']['boundingBox']['UpperCorner'][0] = tile_limits['UpperCorner'][0]\n if tile_limits['UpperCorner'][1] > overviews['dataSet']['boundingBox']['UpperCorner'][1]:\n overviews['dataSet']['boundingBox']['UpperCorner'][1] = tile_limits['UpperCorner'][1]\n\n # for z in tiles:\n for tile_z in range(overviews['level']['min'], overviews['level']['max'] + 1):\n print('Niveau de zoom : ', tile_z)\n\n resolution = overviews['resolution'] * 2 ** (overviews['level']['max'] - tile_z)\n\n MinTileCol = \\\n math.floor(round((tile_limits['LowerCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8))\n MinTileRow = \\\n math.floor(round((overviews['crs']['boundingBox']['ymax']-tile_limits['UpperCorner'][1])/(resolution*overviews['tileSize']['height']),8))\n MaxTileCol = \\\n math.ceil(round((tile_limits['UpperCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8)) - 1\n MaxTileRow = \\\n math.ceil(round((overviews['crs']['boundingBox']['ymax']-tile_limits['LowerCorner'][1])/(resolution*overviews['tileSize']['height']),8)) - 1\n\n if not( str(tile_z) in overviews['dataSet']['limits'] ):\n overviews['dataSet']['limits'][str(tile_z)] = {\n 'MinTileCol': MinTileCol,\n 'MinTileRow': MinTileRow,\n 'MaxTileCol': MaxTileCol,\n 'MaxTileRow': MaxTileRow,\n }\n\n else:\n if MinTileCol < overviews['dataSet']['limits'][str(tile_z)]['MinTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileCol'] = MinTileCol\n if MinTileRow < overviews['dataSet']['limits'][str(tile_z)]['MinTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileRow'] = MinTileRow\n if MaxTileCol > overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol'] = MaxTileCol\n if MaxTileRow > overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow'] = MaxTileRow\n\n for tile_x in range(MinTileCol, MaxTileCol + 1): \n for tile_y in range(MinTileRow, MaxTileRow + 1):\n # on cree une image 3 canaux pour la tuile\n opi = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on reech l'OPI dans cette image\n gdal.Warp(opi, input_image)\n # si necessaire on cree le dossier de la tuile\n tile_dir = args.cache+'/'+str(tile_z)+'/'+str(tile_y)+'/'+str(tile_x)\n Path(tile_dir).mkdir(parents=True, exist_ok=True)\n # on export en jpeg (todo: gerer le niveau de Q)\n PNG_DRIVER.CreateCopy(tile_dir+\"/\"+stem+\".png\", opi)\n # on cree une image mono canal pour la tuile\n mask = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on rasterise la partie du graphe qui concerne ce cliche\n gdal.Rasterize(mask, db_graph,\n SQLStatement='select geom from ' + args.table + ' where cliche = \\''+stem+'\\' ')\n img_mask = mask.GetRasterBand(1).ReadAsArray()\n # si le mask est vide, on a termine\n val_max = np.amax(img_mask)\n if val_max > 0:\n # on cree le graphe et l'ortho\n ortho = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n graph = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n if Path(tile_dir+\"/ortho.png\").is_file():\n existing_ortho = gdal.Open(tile_dir+\"/ortho.png\")\n existing_graph = gdal.Open(tile_dir+\"/graph.png\")\n else:\n existing_ortho = False\n existing_graph = False\n for i in range(3):\n opi_i = opi.GetRasterBand(i+1).ReadAsArray()\n if existing_ortho:\n ortho_i = existing_ortho.GetRasterBand(i+1).ReadAsArray()\n else:\n ortho_i = ortho.GetRasterBand(i+1).ReadAsArray()\n opi_i[(img_mask == 0)] = 0\n ortho_i[(img_mask != 0)] = 0\n ortho.GetRasterBand(i+1).WriteArray(np.add(opi_i, ortho_i))\n if existing_graph:\n graph_i = existing_graph.GetRasterBand(i+1).ReadAsArray()\n else:\n graph_i = graph.GetRasterBand(i+1).ReadAsArray()\n graph_i[(img_mask != 0)] = color[i]\n graph.GetRasterBand(i+1).WriteArray(graph_i)\n existing_ortho = None\n existing_graph = None\n PNG_DRIVER.CreateCopy(tile_dir+\"/ortho.png\", ortho)\n PNG_DRIVER.CreateCopy(tile_dir+\"/graph.png\", graph)" ]
[ "0.68493986", "0.6522732", "0.62442964", "0.62096614", "0.6187095", "0.60611725", "0.60029554", "0.5962458", "0.5880883", "0.58487236", "0.5846595", "0.5792119", "0.57755446", "0.57681596", "0.5670428", "0.55730975", "0.5560811", "0.5551571", "0.55226356", "0.55010265", "0.5425854", "0.54105395", "0.54085886", "0.5401833", "0.53954977", "0.5377328", "0.5376893", "0.53562975", "0.5347076", "0.53305864", "0.5329128", "0.53253865", "0.53216124", "0.53145325", "0.5313748", "0.5302233", "0.5300853", "0.52817315", "0.5275051", "0.5263463", "0.5262771", "0.5240082", "0.52333885", "0.52305156", "0.52226126", "0.52173924", "0.5209621", "0.5197813", "0.5189624", "0.51815164", "0.51698697", "0.51679426", "0.5153631", "0.51469797", "0.51451844", "0.5121058", "0.5114411", "0.5110415", "0.5106509", "0.51032144", "0.5101796", "0.5097314", "0.5089722", "0.50891954", "0.50810915", "0.5078652", "0.50745267", "0.5072531", "0.50644255", "0.5057849", "0.5049986", "0.5045194", "0.50414", "0.5030309", "0.5024453", "0.50212455", "0.5016051", "0.5010401", "0.50103086", "0.50056165", "0.49969473", "0.49948537", "0.4993398", "0.49922097", "0.49906644", "0.49906033", "0.49905255", "0.49863213", "0.4982017", "0.4975571", "0.49700728", "0.49637908", "0.495984", "0.49509072", "0.49466792", "0.49298885", "0.49277285", "0.49250275", "0.4920888", "0.49170423" ]
0.71548915
0
Zonal statistics with rasters as input and rasters and lists as output
def zonalStatsToFeatureCollection(image,zonesImage,geometry,maxPixels,reducerType): # reducertype can be mean, max, sum, first. Count is always included for QA # the resolution of the zonesimage is used for scale reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mean"),ee.Reducer.mean(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"max"),ee.Reducer.max(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"sum"),ee.Reducer.sum(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"first"),ee.Reducer.first(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mode"),ee.Reducer.mode(),"error")))) ) reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName="zones") scale = zonesImage.projection().nominalScale().getInfo() zonesImage = zonesImage.select(zonesImage.bandNames(),["zones"]) totalImage = ee.Image(image).addBands(zonesImage) resultsList = ee.List(totalImage.reduceRegion( geometry= geometry, reducer= reducer, scale= scale, maxPixels=maxPixels ).get("groups")) resultsList = resultsList.map(ensure_default_properties); fc = ee.FeatureCollection(resultsList.map(dict_to_feature)) return fc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zonalStatsToRaster(image,zonesImage,geometry,maxPixels,reducerType):\n # reducertype can be mean, max, sum, first. Count is always included for QA\n # the resolution of the zonesimage is used for scale\n\n reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mean\"),ee.Reducer.mean(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"max\"),ee.Reducer.max(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"sum\"),ee.Reducer.sum(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"first\"),ee.Reducer.first(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mode\"),ee.Reducer.mode(),\"error\"))))\n )\n reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName=\"zones\") \n\n scale = zonesImage.projection().nominalScale().getInfo()\n zonesImage = zonesImage.select(zonesImage.bandNames(),[\"zones\"])\n\n totalImage = ee.Image(image).addBands(zonesImage)\n resultsList = ee.List(totalImage.reduceRegion(\n geometry= geometry, \n reducer= reducer,\n scale= scale,\n maxPixels=maxPixels\n ).get(\"groups\"))\n\n resultsList = resultsList.map(ensure_default_properties); \n zoneList = mapList(resultsList, 'zones');\n countList = mapList(resultsList, 'count');\n valueList = mapList(resultsList, reducerType);\n\n valueImage = zonesImage.remap(zoneList, valueList).select([\"remapped\"],[reducerType])\n countImage = zonesImage.remap(zoneList, countList).select([\"remapped\"],[\"count\"])\n newImage = zonesImage.addBands(countImage).addBands(valueImage)\n return newImage,zoneList,valueList,countList", "def zonal_stats(src_poly, src_raster, operator=['mean'], features=None):\n assert src_raster.geo_transform is not None, \"src_raster.geo_transform should not be None\"\n assert isinstance(operator, list), \"operator should be a list of string. ex: ['mean']\"\n features = list(range(src_raster.bands)) if features is None else features\n assert len(features) == src_raster.bands, \"length of features should equals number of bands of the raster\"\n df_shp = src_poly.copy()\n df_shp['poly_idx'] = list(range(len(df_shp)))\n df_shp['poly_idx'] = df_shp['poly_idx'].astype('float')\n poly_rst = tgp.ShapeGrid.rasterize_layer(df_shp, src_raster.rows, src_raster.cols, src_raster.geo_transform, 'poly_idx', all_touched=True, no_data_value=np.nan)\n X_combine = np.concatenate([poly_rst.data, src_raster.data], axis=-1)\n X_combine_df = pd.DataFrame(X_combine.reshape(-1, src_raster.bands))\n X_groupby = X_combine_df.groupby(0, as_index=False)\n for op in operator:\n columns = {0:'poly_idx'}\n for f_idx, f in enumerate(features):\n columns[f_idx+1] = f'zonal_{op}_{f}'\n if op == 'mean':\n df_shp = df_shp.merge(X_groupby.mean().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'max':\n df_shp = df_shp.merge(X_groupby.max().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'min':\n df_shp = df_shp.merge(X_groupby.min().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'median':\n df_shp = df_shp.merge(X_groupby.median().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'sum':\n df_shp = df_shp.merge(X_groupby.sum().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'std':\n df_shp = df_shp.merge(X_groupby.std().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'count':\n df_shp = df_shp.merge(X_groupby.count().rename(columns=columns), on='poly_idx', how='left')\n else:\n assert False, \"no this operator\"\n return df_shp", "def zonal_statistics(wrksppath, timestamp, region, model):\n logging.info('\\nDoing Zonal Statistics on ' + region)\n # Define app workspace and sub-paths\n resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')\n shp_path = os.path.join(wrksppath, region, 'shapefiles', 'ffgs_' + region + '.shp')\n\n stat_file = os.path.join(wrksppath, region, model + 'results.csv')\n\n # check that there are resampled tiffs to do zonal statistics on\n if not os.path.exists(resampleds):\n logging.info('There are no resampled tiffs to do zonal statistics on. Skipping Zonal Statistics')\n return\n\n # List all Resampled GeoTIFFs\n files = os.listdir(resampleds)\n files = [tif for tif in files if tif.endswith('.tif')]\n files.sort()\n\n # do zonal statistics for each resampled tiff file and put it in the stats dataframe\n stats_df = pd.DataFrame()\n for i in range(len(files)):\n logging.info('starting zonal statistics for ' + files[i])\n ras_path = os.path.join(resampleds, files[i])\n stats = rasterstats.zonal_stats(\n shp_path,\n ras_path,\n stats=['count', 'max', 'mean'],\n geojson_out=True\n )\n\n timestep = files[i][:10]\n\n # for each stat that you get out, write it to the dataframe\n logging.info('writing the statistics for this file to the dataframe')\n for j in range(len(stats)):\n\n temp_data = stats[j]['properties']\n temp_data.update({'Forecast Timestamp': timestamp})\n temp_data.update({'Timestep': timestep})\n\n temp_df = pd.DataFrame([temp_data])\n stats_df = stats_df.append(temp_df, ignore_index=True)\n\n # write the resulting dataframe to a csv\n logging.info('\\ndone with zonal statistics, rounding values, writing to a csv file')\n stats_df = stats_df.round({'max': 1, 'mean': 1})\n stats_df.to_csv(stat_file, index=False)\n\n # delete the resampled tiffs now that we dont need them\n logging.info('deleting the resampled tiffs directory')\n shutil.rmtree(resampleds)\n\n return", "def ZonalStatsRasterArray(zonegeodf, rasterarr, transaffine, stats, nodatavalue=0):\n zonaloutput = zonal_stats(vectors=zonegeodf.geometry, raster=rasterarr, nodata=nodatavalue, affine=transaffine, stats=stats, all_touched=True)\n indexname = 'index' if zonegeodf.index.name is None else zonegeodf.index.name\n zonegeodf.reset_index(inplace=True)\n output = zonegeodf.join(pd.DataFrame(zonaloutput))\n output.set_index(indexname, inplace=True)\n return output", "def zonal_stats(in_path, raster, grid_id_name='GRIDMET_ID'):\n if not os.path.isfile(in_path):\n raise FileNotFoundError('Input summary CSV file given'+\\\n ' was invalid or not found')\n # look for fishnet created in 'in_path/spatial'\n path_root = os.path.split(in_path)[0]\n file_name = os.path.split(in_path)[1]\n # get variable names from input file prefix\n grid_var = file_name.split('_summ')[0]\n var_name = Path(raster).name.split('.')[0]\n # grid is in the \"spatial\" subdir of in_path\n grid_file = OPJ(path_root, 'spatial', 'grid.shp')\n # save zonal stats to summary CSV in same dir as raster as of version 0.3\n raster_root = os.path.split(raster)[0]\n out_file = OPJ(raster_root, 'zonal_stats.csv')\n\n # this error would only occur when using within Python \n if not os.path.isfile(grid_file):\n raise FileNotFoundError(\n os.path.abspath(grid_file),\n '\\ndoes not exist, create it using spatial.make_grid first'\n )\n print(\n 'Calculating', grid_var, 'zonal means for', var_name\n )\n\n # calc zonal stats and open for grid IDs\n with fiona.open(grid_file, 'r') as source:\n zs = zstats(source, raster, all_touched=True)\n grid_ids = [f['properties'].get(grid_id_name) for f in source]\n\n # get just mean values, zonal_stats can do other stats...\n means = [z['mean'] for z in zs]\n out_df = pd.DataFrame(\n data={\n grid_id_name: grid_ids, \n var_name: means\n }\n )\n out_df[grid_id_name] = out_df[grid_id_name].astype(int)\n # drop rows for cells outside of gridMET master grid\n out_df = out_df.drop(out_df[out_df[grid_id_name] == -999].index)\n\n # save or update existing csv file\n if not os.path.isfile(out_file):\n print(\n os.path.abspath(out_file),\n '\\ndoes not exist, creating file'\n )\n out_df.to_csv(out_file, index=False)\n else:\n # overwrite column values if exists, else append\n existing_df = pd.read_csv(out_file)\n existing_df[grid_id_name] = existing_df[grid_id_name].astype(int)\n if var_name in existing_df.columns:\n # may throw error if not same size as original grid\n try:\n existing_df.update(out_df)\n existing_df.to_csv(out_file, index=False) \n except:\n print('Zonal stats for this variable already exist but they',\n 'appear to have been calculated with a different grid',\n 'overwriting existing file at:\\n',\n os.path.abspath(out_file)\n )\n out_df.to_csv(out_file, index=False)\n else:\n existing_df = existing_df.merge(out_df, on=grid_id_name)\n #existing_df = pd.concat([existing_df, out_df], axis=1).drop_duplicates()\n existing_df.to_csv(out_file, index=False)", "def gen_zonal_stats(\n vectors, raster,\n layer=0,\n band=1,\n nodata=None,\n affine=None,\n stats=None,\n all_touched=True,\n percent_cover_selection=None,\n percent_cover_weighting=True,\n percent_cover_scale=20,\n categorical=False,\n category_map=None,\n add_stats=None,\n zone_func=None,\n raster_out=False,\n prefix=None,\n geojson_out=False, **kwargs):\n stats, run_count = check_stats(stats, categorical)\n\n # check inputs related to percent coverage\n percent_cover = False\n if percent_cover_weighting or percent_cover_selection is not None:\n percent_cover = True\n if percent_cover_scale is None:\n warnings.warn('No value for `percent_cover_scale` was given. '\n 'Using default value of 10.')\n percent_cover_scale = 10\n\n try:\n if percent_cover_scale != int(percent_cover_scale):\n warnings.warn('Value for `percent_cover_scale` given ({0}) '\n 'was converted to int ({1}) but does not '\n 'match original value'.format(\n percent_cover_scale, int(percent_cover_scale)))\n\n percent_cover_scale = int(percent_cover_scale)\n\n if percent_cover_scale <= 1:\n raise Exception('Value for `percent_cover_scale` must be '\n 'greater than one ({0})'.format(\n percent_cover_scale))\n\n except:\n raise Exception('Invalid value for `percent_cover_scale` '\n 'provided ({0}). Must be type int.'.format(\n percent_cover_scale))\n\n if percent_cover_selection is not None:\n try:\n percent_cover_selection = float(percent_cover_selection)\n except:\n raise Exception('Invalid value for `percent_cover_selection` '\n 'provided ({0}). Must be able to be converted '\n 'to a float.'.format(percent_cover_selection))\n\n # if not all_touched:\n # warnings.warn('`all_touched` was not enabled but an option requiring '\n # 'percent_cover calculations was selected. Automatically '\n # 'enabling `all_touched`.')\n # all_touched = True\n\n with Raster(raster, affine, nodata, band) as rast:\n features_iter = read_features(vectors, layer)\n for _, feat in enumerate(features_iter):\n geom = shape(feat['geometry'])\n\n if 'Point' in geom.type:\n geom = boxify_points(geom, rast)\n percent_cover = False\n\n geom_bounds = tuple(geom.bounds)\n fsrc = rast.read(bounds=geom_bounds)\n\n if percent_cover:\n cover_weights = rasterize_pctcover_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n scale=percent_cover_scale,\n all_touched=all_touched)\n rv_array = cover_weights > (percent_cover_selection or 0)\n else:\n rv_array = rasterize_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n all_touched=all_touched)\n\n # nodata mask\n isnodata = (fsrc.array == fsrc.nodata)\n\n # add nan mask (if necessary)\n if np.issubdtype(fsrc.array.dtype, float) and \\\n np.isnan(fsrc.array.min()):\n isnodata = (isnodata | np.isnan(fsrc.array))\n\n # Mask the source data array\n # mask everything that is not a valid value or not within our geom\n masked = np.ma.MaskedArray(\n fsrc.array,\n mask=(isnodata | ~rv_array))\n\n # execute zone_func on masked zone ndarray\n if zone_func is not None:\n if not callable(zone_func):\n raise TypeError(('zone_func must be a callable '\n 'which accepts function a '\n 'single `zone_array` arg.'))\n zone_func(masked)\n\n if masked.compressed().size == 0:\n # nothing here, fill with None and move on\n feature_stats = dict([(stat, None) for stat in stats])\n if 'count' in stats: # special case, zero makes sense here\n feature_stats['count'] = 0\n else:\n if run_count:\n keys, counts = np.unique(masked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n\n if categorical:\n feature_stats = dict(pixel_count)\n if category_map:\n feature_stats = remap_categories(category_map, feature_stats)\n else:\n feature_stats = {}\n\n if 'min' in stats:\n feature_stats['min'] = float(masked.min())\n if 'max' in stats:\n feature_stats['max'] = float(masked.max())\n if 'mean' in stats:\n if percent_cover:\n feature_stats['mean'] = float(\n np.sum(masked * cover_weights) /\n np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['mean'] = float(masked.mean())\n if 'count' in stats:\n if percent_cover:\n feature_stats['count'] = float(np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['count'] = int(masked.count())\n # optional\n if 'sum' in stats:\n if percent_cover:\n feature_stats['sum'] = float(np.sum(masked * cover_weights))\n else:\n feature_stats['sum'] = float(masked.sum())\n if 'std' in stats:\n feature_stats['std'] = float(masked.std())\n if 'median' in stats:\n feature_stats['median'] = float(np.median(masked.compressed()))\n if 'majority' in stats:\n feature_stats['majority'] = float(key_assoc_val(pixel_count, max))\n if 'minority' in stats:\n feature_stats['minority'] = float(key_assoc_val(pixel_count, min))\n if 'unique' in stats:\n feature_stats['unique'] = len(list(pixel_count.keys()))\n if 'range' in stats:\n try:\n rmin = feature_stats['min']\n except KeyError:\n rmin = float(masked.min())\n try:\n rmax = feature_stats['max']\n except KeyError:\n rmax = float(masked.max())\n feature_stats['range'] = rmax - rmin\n\n for pctile in [s for s in stats if s.startswith('percentile_')]:\n q = get_percentile(pctile)\n pctarr = masked.compressed()\n feature_stats[pctile] = np.percentile(pctarr, q)\n\n if 'nodata' in stats:\n featmasked = np.ma.MaskedArray(fsrc.array, mask=np.logical_not(rv_array))\n feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())\n\n if add_stats is not None:\n for stat_name, stat_func in add_stats.items():\n feature_stats[stat_name] = stat_func(masked)\n\n if raster_out:\n feature_stats['mini_raster_array'] = masked\n feature_stats['mini_raster_affine'] = fsrc.affine\n feature_stats['mini_raster_nodata'] = fsrc.nodata\n\n if prefix is not None:\n prefixed_feature_stats = {}\n for key, val in feature_stats.items():\n newkey = \"{}{}\".format(prefix, key)\n prefixed_feature_stats[newkey] = val\n feature_stats = prefixed_feature_stats\n\n if geojson_out:\n for key, val in feature_stats.items():\n if 'properties' not in feat:\n feat['properties'] = {}\n feat['properties'][key] = val\n yield feat\n else:\n yield feature_stats", "def zonal_stats(self, gdf, stats, all_touched=False):\n _ST = [\"count\", \"min\", \"max\", \"sum\", \"mean\", \"std\", \"median\"]\n\n def rmd(ds, stat):\n return {var: f\"{var}_{stat}\" for var in ds.raster.vars}\n\n def gen_zonal_stat(ds, geoms, stats, all_touched=False):\n dims = (ds.raster.y_dim, ds.raster.x_dim)\n for i, geom in enumerate(geoms):\n # add buffer to work with point geometries\n ds1 = ds.raster.clip_bbox(geom.bounds, buffer=2).raster.mask_nodata()\n if np.any(np.asarray(ds1.raster.shape) < 2):\n continue\n mask = full(ds1.raster.coords, nodata=0, dtype=np.uint8)\n features.rasterize(\n [(geom, 1)],\n out_shape=mask.raster.shape,\n fill=0,\n transform=mask.raster.transform,\n out=mask.data,\n all_touched=all_touched,\n )\n ds1 = ds1.where(mask == 1)\n dss = []\n for stat in stats:\n if stat in _ST:\n ds1_stat = getattr(ds1, stat)(dims)\n dss.append(ds1_stat.rename(rmd(ds1, stat)))\n elif isinstance(stat, str) and stat.startswith(\"q\"):\n qs = np.array([float(q) for q in stat.strip(\"q\").split(\",\")])\n dss.append(\n ds1.quantile(qs / 100, dims).rename(rmd(ds1, \"quantile\"))\n )\n elif callable(stat):\n dss.append(\n ds1.reduce(stat, dims).rename(rmd(ds1, stat.__name__))\n )\n else:\n raise ValueError(f\"Stat {stat} not valid.\")\n yield xr.merge(dss), i\n\n if isinstance(stats, str):\n stats = stats.split()\n elif callable(stats):\n stats = list([stats])\n\n if gdf.crs is not None and self.crs is not None and gdf.crs != self.crs:\n gdf = gdf.to_crs(self.crs)\n geoms = gdf[\"geometry\"].values\n\n ds = self._obj.copy()\n if isinstance(ds, xr.DataArray):\n if ds.name is None:\n ds.name = \"values\"\n ds = ds.to_dataset()\n\n out = list(gen_zonal_stat(ds, geoms, stats, all_touched))\n if len(out) == 0:\n raise IndexError(\"All geometries outside raster domain\")\n\n dss, idx = zip(*out)\n ds_out = xr.concat(dss, \"index\")\n ds_out[\"index\"] = xr.IndexVariable(\"index\", gdf.index.values[np.array(idx)])\n\n return ds_out", "def test_rasters_and_arrays(self):\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A1 = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)\n lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == 105.25\n assert A1[0, 0] == linear_function(lon[0], lat[4])\n\n # Lower left corner\n assert A1[4, 0] == 103.25\n assert A1[4, 0] == linear_function(lon[0], lat[0])\n\n # Upper right corner\n assert A1[0, 7] == 112.25\n assert A1[0, 7] == linear_function(lon[7], lat[4])\n\n # Lower right corner\n assert A1[4, 7] == 110.25\n assert A1[4, 7] == linear_function(lon[7], lat[0])\n\n # Generate raster object and write\n projection = ('GEOGCS[\"WGS 84\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS 84\",6378137,298.2572235630016,'\n 'AUTHORITY[\"EPSG\",\"7030\"]],'\n 'AUTHORITY[\"EPSG\",\"6326\"]],'\n 'PRIMEM[\"Greenwich\",0],'\n 'UNIT[\"degree\",0.0174532925199433],'\n 'AUTHORITY[\"EPSG\",\"4326\"]]')\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n msg = 'Longitudes not as expected: %s' % str(longitudes)\n assert numpy.allclose(longitudes, [100.5, 101.5, 102.5, 103.5, 104.5,\n 105.5, 106.5, 107.5]), msg\n\n msg = 'Latitudes not as expected: %s' % str(latitudes)\n assert numpy.allclose(latitudes, [5.5, 6.5, 7.5, 8.5, 9.5]), msg\n\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, geotransform,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster object')\n assert numlat == R1.rows, msg\n assert numlon == R1.columns, msg\n\n # Write back to new (tif) file\n out_filename = unique_filename(suffix='.tif')\n R1.write_to_file(out_filename)\n assert R1.filename == out_filename\n\n # Check nodata in original layer\n assert numpy.isnan(R1.get_nodata_value())\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n # Check nodata in read layer\n assert numpy.isnan(R2.get_nodata_value())\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, numlat, numlon))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert numlat == R2.rows, msg\n assert numlon == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = 'Array values of written raster array were not as expected'\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n # Exercise projection __eq__ method\n assert R1.projection == R2.projection\n\n # Check that equality raises exception when type is wrong\n try:\n R1.projection == 234\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n # Check keywords\n assert R1.keywords == R2.keywords\n\n # Check override of ==\n assert R1 == R2", "def output_rasters(self, arr, outdir, outname):\n\n outpath = os.path.join(outdir, outname)\n print('the outpath for file {} is {}'.format(outname, outpath))\n\n # get the geoinfo from sample tiff to output intermediate files\n ds = rasterio.open(self.geoproperties_file)\n band1 = arr\n with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # TODO - Set an AWS Cloud flag in the config_dict file to activate this function or not...\n # delete files created locally and put in bucket\n # PathManager.s3_delete_local(from_file, bucket, prefix_no_slash)", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]", "def get_z_ranges(self):\n\n summary = self.get_rasters_summary()\n\n # Convert to dict in format:\n # { 'stat' : { 'z': (min, max), ... } ... }\n\n ranges = summary.groupby(['stat', 'z'], as_index=False)\n ranges = ranges.agg({'min': 'min', 'max': 'max'})\n ranges['vals'] = ranges.apply(\n lambda row: {\n row['z']: (row['min'], row['max'])\n }, axis=1)\n ranges = ranges.groupby('stat')['vals'].apply(\n lambda group: group.values)\n ranges = ranges.apply(\n lambda group: {\n int(k): v for d in group for k,\n v in d.items()})\n\n return ranges.to_dict()", "def _sum_n_rasters(\n raster_path_list, target_raster_path):\n LOGGER.info('Summing %s rasters to %s', len(raster_path_list),\n target_raster_path)\n LOGGER.debug('Attempting to open %s', raster_path_list[0])\n pygeoprocessing.new_raster_from_base(\n raster_path_list[0], target_raster_path, gdal.GDT_Float32,\n [NODATA_FLOAT32_MIN])\n\n target_raster = gdal.OpenEx(\n target_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n target_band = target_raster.GetRasterBand(1)\n\n n_pixels_to_process = (\n (target_raster.RasterXSize * target_raster.RasterYSize) *\n len(raster_path_list))\n n_pixels_processed = 0\n last_log_time = time.time()\n\n raster_tuple_list = []\n for raster_path in raster_path_list:\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n raster_tuple_list.append((raster, band, nodata))\n\n for block_info in pygeoprocessing.iterblocks(\n (raster_path_list[0], 1), offset_only=True):\n\n sum_array = numpy.empty(\n (block_info['win_ysize'], block_info['win_xsize']),\n dtype=numpy.float32)\n sum_array[:] = 0.0\n\n # Assume everything is valid until proven otherwise\n pixels_touched = numpy.zeros(sum_array.shape, dtype=bool)\n for (_, band, nodata) in raster_tuple_list:\n if time.time() - last_log_time >= 5.0:\n percent_complete = round(\n n_pixels_processed / n_pixels_to_process, 4)*100\n LOGGER.info(f'Summation {percent_complete:.2f}% complete')\n last_log_time = time.time()\n\n array = band.ReadAsArray(**block_info)\n valid_pixels = slice(None)\n if nodata is not None:\n valid_pixels = ~utils.array_equals_nodata(array, nodata)\n\n sum_array[valid_pixels] += array[valid_pixels]\n pixels_touched[valid_pixels] = 1\n n_pixels_processed += sum_array.size # for logging\n\n sum_array[~pixels_touched] = NODATA_FLOAT32_MIN\n\n target_band.WriteArray(\n sum_array, block_info['xoff'], block_info['yoff'])\n\n LOGGER.info('Summation 100.00% complete')\n raster_tuple_list = None\n\n target_band.ComputeStatistics(0)\n target_band = None\n target_raster = None", "def zonal_stats_workflow():\n save_as = \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/summary/monthly_quickflow.csv\"\n scenario_dict = {\n 'pre-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/pre_decline\",\n 'post-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/post_decline\",\n }\n df_list = []\n for scenario in scenario_dict.iterkeys():\n results_dict = {\n 'scenario': [],\n 'month': [],\n 'sum_quickflow': [],\n }\n folder = scenario_dict[scenario]\n aoi_shp = os.path.join(folder, 'aggregated_results.shp')\n for month in xrange(1, 13):\n qf_raster = os.path.join(\n folder, 'intermediate_outputs', 'qf_{}.tif'.format(month))\n zonal_stats = pygeoprocessing.zonal_statistics(\n (qf_raster, 1), aoi_shp)\n sum_QF = zonal_stats[0]['sum']\n results_dict['scenario'].append(scenario)\n results_dict['month'].append(month)\n results_dict['sum_quickflow'].append(sum_QF)\n results_df = pandas.DataFrame(data=results_dict)\n df_list.append(results_df)\n combined_list = pandas.concat(df_list)\n combined_list.to_csv(save_as, index=False)", "def zonal_grid_statistics(stats, zones, categories=None, grids=None,\n aspect=None, shortnames=True):\n # Check inputs\n zones = _validation.input_file(zones, 'grid', False)\n\n if not (stats.endswith('.txt') or stats.endswith('.csv')):\n stats += '.csv'\n\n if categories is None:\n category_list = 'NULL'\n elif type(categories) is str:\n categories = [_validation.input_file(categories, 'grid', False)]\n category_list = categories[0]\n elif type(categories) in (list, tuple):\n categories = _validation.input_file(categories, 'grid', False)\n category_list = ';'.join(categories)\n else:\n raise TypeError('Wrong argument type to categories!')\n\n if grids is None:\n grids_list = 'NULL'\n elif type(grids) is str:\n grids = [_validation.input_file(grids, 'grid', False)]\n grids_list = grids[0]\n elif type(grids) in (list, tuple):\n grids = _validation.input_file(grids, 'grid', False)\n grids_list = ';'.join(grids)\n else:\n raise TypeError('Wrong argument type to grids!')\n\n if aspect is None:\n aspect = 'NULL'\n elif type(aspect) is str:\n aspect = _validation.input_file(zones, 'grid', False)\n else:\n raise TypeError('Wrong argument type to grids!')\n\n # Check inputs\n shortnames = str(int(shortnames))\n # Create cmd\n cmd = ['saga_cmd', '-f=q', 'statistics_grid', '5', '-ZONES', zones,\n '-CATLIST', category_list, '-STATLIST', grids_list, '-ASPECT',\n aspect, '-OUTTAB', stats, '-SHORTNAMES', shortnames]\n # Run command\n flag = _env.run_command_logged(cmd)\n if not flag:\n raise EnvironmentError(_ERROR_TEXT.format(_sys._getframe().f_code.co_name, _env.errlog))", "def zonal_statistics(self, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n process_id = 'zonal_statistics'\n args = {\n 'imagery': self.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n return self.graph_add_process(process_id, args)", "def exact_zonalstats(self, ras_path, vec_path, fid, col, stats, output_csv):\n cmd = \"exactextract --raster grid:%s --polygons %s --fid %s --stat %s=%s\\(grid\\) --output %s\" %(ras_path, vec_path, fid, col, stats, output_csv)\n # Apply zonalstatistics\n os.system(cmd)", "def comparing_urban_zonal_stats(self, zonal_path = '../data/zonal/', fid = 'uid', stats = 'sum', gpd_ls = ['gpw', 'ghs_pop', 'worldpop'], \n schema = 'urban_pop', table = 'global_grid'):\n \n # Create folder if does not already exist\n if not os.path.exists(zonal_path): \n os.makedirs(zonal_path)\n \n for iso in self.country_iso3:\n \n # Define name of temp shp\n file_name = 'temp.gpkg'\n # And file path\n file_path = '../data/gpkg/'\n # Define full path \n vec_path = ''.join([file_path + file_name])\n \n if os.path.exists(vec_path):\n os.remove(vec_path)\n \n # Join schema and table together\n layer = '.'.join([schema, table])\n # Define sql statement to extract from table e.g. urban_pop.global_grid \n sql = \"SELECT * FROM %s WHERE gid_0 LIKE '%s'\" %(layer, iso)\n # Define column name of output zonal stats\n\n # Define db connection class \n db_conn = postgres_conn(section = 'postgresql', config_path = '../src/config/', config_file = 'database.ini', country_iso3 = iso)\n # Get vector geometries from postgres and store as temp shp\n #db_conn.psql_to_shp(file_name, file_path, schema, table, sql)\n db_conn.psql_to_gpkg(file_name, file_path, schema, table, sql)\n \n # Define full vector path including layer name\n vec_path = vec_path + '[gridded]'\n \n for gpd in gpd_ls:\n \n col = gpd + '_' + stats\n output_path = '../data/zonal/' + iso + '_' + gpd + '.csv'\n\n if 'gpw' == gpd:\n \n # Define input raster path\n ras_path = '../data/gpw/cropped/gpw_' + iso + '.tif'\n \n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)\n \n # Apply zonal statistics if db is ghs_pop\n elif 'ghs_pop' == gpd:\n \n # Define input raster path\n ras_path = '../data/ghs_pop/cropped/ghs_pop_' + iso + '.tif'\n\n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)\n \n # Apply zonal statistics if db is worldpop\n elif 'worldpop' == gpd:\n \n # Define input raster path\n ras_path = '../data/worldpop/MOSAIC_ppp_prj_2015/ppp_prj_2015_' + iso + '.tif'\n\n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def useZstat(zstat, file_path_name_save, file_path_conte, file_path_name_resting_atlas):\n\n import matplotlib.pyplot as plt\n import os\n from glob import glob\n import numpy as np\n import nibabel as nb\n import nibabel.gifti as gifti\n\n # Crucial: xvfb must be imported and started before importing mayavi\n from xvfbwrapper import Xvfb\n print('XVb pre')\n vdisplay = Xvfb()\n vdisplay.start()\n\n print('pre maya')\n # Crashes on this line if run with plain python (not xvfb-run ... python) and if xvfbwrapper is after it.\n from mayavi import mlab\n print('post maya')\n from tvtk.api import tvtk\n print('post tvtk')\n import math\n\n print('display')\n mlab.options.offscreen = True #offscreen window for rendering\n\n img = nb.load(file_path_name_resting_atlas)\n #img = nb.load('/Users/MathiasMacbook/Desktop/rfMRI_REST1_LR_Atlas.dtseries.nii')\n mim = img.header.matrix.mims[1]\n #for idx, bm in enumerate(mim.brainModels):\n # print((idx, bm.indexOffset, bm.brainStructure))\n bm1 = mim.brainModels[0]\n lidx = bm1.vertexIndices.indices\n bm2 = mim.brainModels[1]\n ridx = bm1.surfaceNumberOfVertices + bm2.vertexIndices.indices\n bidx = np.concatenate((lidx, ridx))\n\n axis = [0, 0, 1]\n theta = np.pi\n\n inflated = True\n split_brain = True\n\n surf = gifti.read(file_path_conte + '/Conte69.L.midthickness.32k_fs_LR.surf.gii') \n verts_L_data = surf.darrays[0].data\n faces_L_data = surf.darrays[1].data\n\n surf = gifti.read(file_path_conte + '/Conte69.R.midthickness.32k_fs_LR.surf.gii') \n verts_R_data = surf.darrays[0].data\n faces_R_data = surf.darrays[1].data\n\n if inflated:\n surf = gifti.read(file_path_conte + '/Conte69.L.inflated.32k_fs_LR.surf.gii')\n verts_L_display = surf.darrays[0].data\n faces_L_display = surf.darrays[1].data\n surf = gifti.read(file_path_conte + '/Conte69.R.inflated.32k_fs_LR.surf.gii')\n verts_R_display = surf.darrays[0].data\n faces_R_display = surf.darrays[1].data\n else:\n verts_L_display = verts_L_data.copy()\n verts_R_display = verts_R_data.copy()\n faces_L_display = faces_L_data.copy()\n faces_R_display = faces_R_data.copy()\n\n verts_L_display[:, 0] -= max(verts_L_display[:, 0])\n verts_R_display[:, 0] -= min(verts_R_display[:, 0])\n verts_L_display[:, 1] -= (max(verts_L_display[:, 1]) + 1)\n verts_R_display[:, 1] -= (max(verts_R_display[:, 1]) + 1)\n\n faces = np.vstack((faces_L_display, verts_L_display.shape[0] + faces_R_display))\n\n if split_brain:\n verts2 = rotation_matrix(axis, theta).dot(verts_R_display.T).T\n else:\n verts_L_display[:, 1] -= np.mean(verts_L_display[:, 1])\n verts_R_display[:, 1] -= np.mean(verts_R_display[:, 1])\n verts2 = verts_R_display\n\n verts_rot = np.vstack((verts_L_display, verts2))\n verts = np.vstack((verts_L_data, verts_R_data))\n #print verts.shape\n #print faces.shape\n\n if not os.path.exists(os.path.split(file_path_name_save)[0]):\n os.makedirs(os.path.split(file_path_name_save)[0]) \n\n print('use zstat')\n img = nb.load(zstat)\n print('loaded img')\n \n threshold = 2.3 # 1000, lower limit\n display_threshold = 6 #8000, upper limit\n\n data = img.get_data()\n aff = img.affine\n indices = np.round((np.linalg.pinv(aff).dot(np.hstack((verts, \n np.ones((verts.shape[0], 1)))).T))[:3, :].T).astype(int)\n scalars2 = data[indices[:, 0], indices[:, 1], indices[:, 2]]\n scalars2[np.abs(scalars2) < threshold] = 0.\n scalars = np.zeros(verts.shape[0])\n scalars[bidx] = scalars2[bidx]\n\n negative = positive = False\n if np.any(scalars < 0):\n negative = True\n if np.any(scalars > 0):\n positive = True\n\n nlabels = 2\n vmin = 0\n vmax = 0\n if negative and positive:\n maxval = max(-scalars.min(), scalars.max())\n if maxval > display_threshold:\n maxval = display_threshold\n vmin = -maxval\n vmax = maxval\n nlabels = 3\n vmin = -display_threshold ######\n vmax = display_threshold ######\n elif negative:\n vmin = scalars.min()\n if vmin < -display_threshold:\n vmin = -display_threshold\n vmax = 0\n vmin = -display_threshold ######\n elif positive:\n vmax = scalars.max()\n if vmax > display_threshold:\n vmax = display_threshold\n vmin = 0\n vmax = display_threshold ######\n #print zstat\n \n dual_split = True\n\n fig1 = mlab.figure(1, bgcolor=(0, 0, 0))\n mlab.clf()\n mesh = tvtk.PolyData(points=verts_rot, polys=faces)\n mesh.point_data.scalars = scalars\n mesh.point_data.scalars.name = 'scalars'\n surf = mlab.pipeline.surface(mesh, colormap='autumn', vmin=vmin, vmax=vmax)\n if dual_split:\n verts_rot_shifted = verts_rot.copy()\n verts_rot_shifted = rotation_matrix(axis, theta).dot(verts_rot_shifted.T).T\n verts_rot_shifted[:, 2] -= (np.max(verts_rot_shifted[:, 2]) - np.min(verts_rot_shifted[:, 2]))\n verts_rot_shifted[:, 0] -= np.max(verts_rot_shifted[:, 0])\n mesh2 = tvtk.PolyData(points=verts_rot_shifted, polys=faces)\n mesh2.point_data.scalars = scalars\n mesh2.point_data.scalars.name = 'scalars'\n surf2 = mlab.pipeline.surface(mesh2, colormap='autumn', vmin=vmin, vmax=vmax)\n colorbar = mlab.colorbar(surf, nb_labels=nlabels) #, orientation='vertical')\n lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()\n\n if negative and positive:\n half_index = lut.shape[0] / 2\n index = int(half_index * threshold / vmax)\n lut[(half_index - index + 1):(half_index + index), :] = 192\n lut[(half_index + index):, :] = 255 * plt.cm.autumn(np.linspace(0, 255, half_index - index).astype(int))\n lut[:(half_index - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, half_index - index).astype(int))\n elif negative:\n index = int(lut.shape[0] * threshold / abs(vmin))\n lut[(lut.shape[0] - index):, :] = 192\n lut[:(lut.shape[0] - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n elif positive:\n index = int(lut.shape[0] * threshold / vmax)\n lut[:index, :] = 192\n lut[index:, :] = 255 * plt.cm.autumn(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n lut[:, -1] = 255\n\n surf.module_manager.scalar_lut_manager.lut.table = lut\n if dual_split:\n surf2.module_manager.scalar_lut_manager.lut.table = lut\n surf.module_manager.scalar_lut_manager.show_scalar_bar = False\n surf.module_manager.scalar_lut_manager.show_legend = False\n surf.module_manager.scalar_lut_manager.label_text_property.font_size = 10\n surf.module_manager.scalar_lut_manager.show_scalar_bar = True\n surf.module_manager.scalar_lut_manager.show_legend = True\n mlab.draw()\n\n translate = [0, 0, 0]\n if inflated:\n zoom = -700\n else:\n zoom = -600\n if dual_split:\n if inflated:\n translate = [0, 0, -104.01467148]\n else:\n translate = [0, 0, -54.76305802] \n if inflated:\n zoom = -750\n else:\n zoom = -570\n \n #mlab.view(0, 90.0, zoom, translate)\n mlab.view(9, 90.0)\n\n print(file_path_name_save)\n \n mlab.savefig(file_path_name_save, figure=fig1, magnification=5)\n\n vdisplay.stop()", "def zonal_statistics(self, imagery, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n\n graph = {\n 'process_id': 'zonal_statistics',\n 'imagery': imagery.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n imagery.graph = graph\n\n return imagery", "def getCompStats(self, photoz = \"z_peak\", verbose = True):\n\n specz = self.zout[\"z_spec\"]\n photoz = self.zout[photoz]\n\n dz = (photoz - specz)\n diff = (dz / (1.+specz))\n\n nmad = 1.4826 * np.median( np.abs( dz - np.median(dz) ) )\n mean_offset = np.mean(diff)\n median_offset = np.median(diff)\n dz1s = np.mean(np.abs(diff))\n\n outlier1 = ((np.abs(diff) > 0.15).sum(dtype = float) / self.NOBJ)\n outlier2 = ((np.abs(diff) > 3.*nmad).sum(dtype = float) / self.NOBJ)\n\n # print np.mean(np.abs(diff))\n\n # print nmad, outlier1, outlier2, mean_offset, median_offset\n\n if verbose:\n print \"#\"*35\n print \"NMAD: \\t\\t\\t{0:1.3f}\".format(nmad)\n print \"dz/1+z:\\t\\t\\t{0:1.3f}\".format(dz1s)\n print \"nu 1: \\t\\t\\t{0:1.1f}%\".format(outlier1*100.)\n print \"nu 2: \\t\\t\\t{0:1.1f}%\".format(outlier2*100.)\n print \"mean offset: \\t\\t{0:1.3f}\".format(mean_offset)\n print \"median offset: \\t\\t{0:1.3f}\".format(median_offset)\n print \"#\"*35\n\n keys = [\"nmad\", \"nu1\", \"nu2\", \"mean_offset\", \"median_offset\"]\n values = [nmad, outlier1, outlier2, mean_offset, median_offset]\n\n return dict(zip(keys, values))", "def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D", "def azmap (scores, compare, dimension=0):\r\n mns = amean(compare,dimension)\r\n sstd = asamplestdev(compare,0)\r\n return (scores - mns) / sstd", "def reduce_rasters(stack, statistic, no_data_value=None, dtype=None):\n if statistic not in STATISTICS:\n percentile = parse_percentile_statistic(statistic)\n if percentile is None:\n raise KeyError('Unknown statistic \"{}\"'.format(statistic))\n else:\n statistic = \"percentile\"\n\n if len(stack) == 0:\n raise ValueError(\"Cannot reduce a zero-length stack\")\n\n # get the output array properties (dtype, no_data_value, shape)\n if dtype is None:\n dtype = stack[0][\"values\"].dtype\n if no_data_value is None:\n no_data_value = stack[0][\"no_data_value\"]\n shape = stack[0][\"values\"].shape\n\n # sum, count and nans output do not contain no data: fill zeroes right away\n if statistic in {\"sum\", \"count\", \"nans\"}:\n fill_value = 0\n else:\n fill_value = no_data_value\n\n # create the output array\n out = np.full(shape, fill_value, dtype)\n\n if statistic == \"last\":\n # populate 'out' with the last value that is not 'no data'\n for data in stack:\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n out[index] = data[\"values\"][index]\n elif statistic == \"first\":\n # populate 'out' with the first value that is not 'no data'\n for data in stack[::-1]:\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n out[index] = data[\"values\"][index]\n elif statistic == \"count\":\n # count the number of values that are not 'no data'\n for data in stack:\n out += get_index(data[\"values\"], data[\"no_data_value\"])\n else:\n if statistic == \"percentile\":\n func = partial(np.nanpercentile, q=percentile)\n else:\n func = STATISTICS[statistic]\n # transform 'no data' into 'nan' to be able to use numpy functions\n # NB: the dtype is at least float16 to accomodate NaN\n stack_array = np.full(\n (len(stack),) + shape, np.nan, np.result_type(dtype, np.float16)\n )\n for i, data in enumerate(stack):\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n stack_array[i, index] = data[\"values\"][index]\n\n # protect against all-NaN slice warnings and errors\n not_all_nan = ~np.all(np.isnan(stack_array), axis=0)\n\n # perform the math\n out[not_all_nan] = func(stack_array[:, not_all_nan], axis=0)\n\n return {\"values\": out, \"no_data_value\": no_data_value}", "def obj_s2n_z(s2n_dict, z_bins, flux_bins, otype, outfile=None, ax=None):\n logs = get_logger()\n nz = z_bins.size\n nfx = flux_bins.size\n s2n_sum = np.zeros((nz-1,nfx-1))\n s2n_N = np.zeros((nz-1,nfx-1)).astype(int)\n # Loop on exposures+wedges (can do just once if these are identical for each)\n for jj, wave in enumerate(s2n_dict['waves']):\n # Turn wave into z\n zELG = wave / 3728. - 1.\n z_i = np.digitize(zELG, z_bins) - 1\n m_i = np.digitize(s2n_dict['OII'][jj]*1e17, flux_bins) - 1\n mmm = []\n for ll in range(nfx-1): # Only need to do once\n mmm.append(m_i == ll)\n #\n for kk in range(nz-1):\n all_s2n = s2n_dict['s2n'][jj][:,z_i==kk]\n for ll in range(nfx-1):\n if np.any(mmm[ll]):\n s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])\n s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]\n\n sty_otype = get_sty_otype()\n\n # Plot\n if ax is None:\n fig = plt.figure(figsize=(6, 6.0))\n ax= plt.gca()\n # Title\n fig.suptitle('{:s}: Redshift Summary'.format(sty_otype[otype]['lbl']),\n fontsize='large')\n\n # Plot em up\n z_cen = (z_bins + np.roll(z_bins,-1))/2.\n lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]\n mxy = 1e-9\n for ss in range(nfx-1):\n if np.sum(s2n_N[:,ss]) == 0:\n continue\n lbl = 'OII(1e-17) = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])\n ax.plot(z_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],\n label=lbl, color=sty_otype[otype]['color'])\n mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))\n\n ax.set_xlabel('Redshift')\n ax.set_xlim(z_bins[0], z_bins[-1])\n ax.set_ylabel('Mean S/N per Ang in dz bins')\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_ylim(0.1, mxy*1.1)\n\n legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,\n handletextpad=0.3, fontsize='medium', numpoints=1)\n\n # Finish\n plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)\n plt.subplots_adjust(top=0.92)\n if outfile is not None:\n plt.savefig(outfile, dpi=600)\n print(\"Wrote: {:s}\".format(outfile))", "def raw2outputs(raw, z_vals, rays_d, render_mask=False):\n raw2alpha = lambda x, y: 1. - torch.exp(-x * y)\n device = raw.device\n\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.tensor([1e-2], device=device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples]\n\n dists = dists * torch.norm(rays_d[..., None, :], dim=-1)\n\n rgb = raw[..., :3]\n\n alpha = raw2alpha(raw[..., 3], dists) # [N_rays, N_samples]\n weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1), device=device), 1. - alpha + 1e-10], -1), -1)[:,:-1]\n\n rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]\n\n weights_norm = weights.detach() + 1e-5\n weights_norm /= weights_norm.sum(dim=-1, keepdim=True)\n depth_map = torch.sum(weights_norm * z_vals, -1)\n\n if render_mask:\n density = raw[..., 3] # [N_rays, N_samples]\n mask_map = torch.sum(weights * density, dim=1) # [N_rays,]\n return rgb_map, depth_map, weights_norm, mask_map\n\n return rgb_map, depth_map, weights_norm", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def test_read_raster(self):\n\n # FIXME (Ole): Some datasets show very large differences between extrema in the array and \n # those computed by GDAL. This may warrant another bug report to GEOS\n \n for coverage_name in ['test_grid', \n 'shakemap_padang_20090930',\n 'population_padang_1',\n 'population_padang_2',\n #'fatality_padang_1',\n #'fatality_padang_2'\n ]:\n \n \n filename = 'data/%s.asc' % coverage_name\n \n for R in [Raster(filename), read_coverage(filename)]:\n \n min, max = R.get_extrema()\n \n A = R.get_data(nan=True)\n B = R.get_data(nan=True)\n\n assert numpy.nanmax(A - B) == 0\n\n \n # FIXME (Ole): These tolerances are not really acceptable. Report to GEOS.\n assert numpy.allclose(min, numpy.nanmin(A[:]), rtol=1e-2)\n \n if coverage_name != 'population_padang_2':\n assert numpy.allclose(max, numpy.nanmax(A[:]), rtol=1e-2)", "def merge_rasters(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n rasters = [str(x) for x in i.joinpath('subnational').iterdir() if not x.name.endswith('txt') if x.name.endswith('norm.tif')]\n outfile = i.joinpath(f'{self.country}_{month}_normalised.tif')\n tiffs = \" \".join(rasters)\n gdal_cmd = f\"gdal_merge.py -o {outfile} -a_nodata -99999.0 -of gtiff {tiffs}\"\n subprocess.call(gdal_cmd, shell=True)", "def azs (a):\r\n zscores = []\r\n for item in a:\r\n zscores.append(z(a,item))\r\n return N.array(zscores)", "def merge_hpx_counts_cubes(filelist):\n out_prim = None\n out_skymap = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n out_name = None\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_null_primary(fin[0], out_prim)\n out_name = fin[1].name\n\n map_in = HpxMap.create_from_hdulist(fin)\n out_skymap = update_hpx_skymap_allsky(map_in, out_skymap)\n if i == 0:\n try:\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n except KeyError:\n out_ebounds = update_energies(fin[\"ENERGIES\"], out_ebounds)\n try:\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n except KeyError:\n pass\n\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n try:\n date_end = fin[0].header['DATE-END']\n except KeyError:\n date_end = None\n else:\n fin.close()\n\n out_skymap_hdu = out_skymap.create_image_hdu(\"SKYMAP\")\n\n hdulist = [out_prim, out_skymap_hdu, out_ebounds]\n\n if len(datalist_gti) > 0:\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n hdulist.append(out_gti)\n\n for hdu in hdulist:\n if date_end:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n\n return fits.HDUList(hdulist)", "def GeneralProfile(binsz, filename):\n data_image = fits.open(filename)\n data_image= data_image[1]\n glons = np.arange(LonLow, LonHigh+binsz, binsz) \n glon_bounds = Table()\n glon_bounds['CHANNEL'] = np.arange(len(glons) - 1)\n glon_bounds['GLON_MIN'] = np.float64(glons[:-1])\n glon_bounds['GLON_MAX'] = np.float64(glons[1:])\n data = compute_longitude_profile(glon_bounds = glon_bounds, binsz=binsz, image=data_image, datatype=2, emission=4, tev=0)\n return data", "def astrometry_script(filename, catalog=\"PS\", rotation_scaling=True, xy_transformation=True, fine_transformation=True, images=False, vignette=3,vignette_rectangular=1., cutouts=None, ra=None, dec=None, projection_ra=None, projection_dec=None, verbose=False, save_images=False, ignore_header_rot=False, radius=-1., save_bad_result=False, silent=False, sigma_threshold_for_source_detection=5, high_res = False, hdul_idx=0, filename_for_sources=None, FWHM=4):\n #print(\"Program version: 1.2\")\n\n report = {}\n if(images):\n plt.ioff()\n warnings.simplefilter('ignore', UserWarning)\n fits_image_filename = filename\n\n print(\"> Astrometry for {} \".format(fits_image_filename))\n\n with fits.open(fits_image_filename) as hdul:\n #print(hdul.info())\n #print(hdul[0].header)\n\n hdu = hdul[hdul_idx]\n #hdu.verify('fix')\n hdr = hdu.header\n\n\n image_or = hdul[hdul_idx].data.astype(float)\n median = np.nanmedian(image_or)\n image_or[np.isnan(image_or)]=median\n image = image_or - median\n\n observation = find_sources(image, vignette,vignette_rectangular,cutouts, sigma_threshold_for_source_detection, FWHM=FWHM)\n #print(observation)\n\n #changed order of positions to [(x,y), (x,y),...] for compatibility with photutils 1.4\n xcenters = np.array(observation['xcenter'])\n ycenters = np.array(observation['ycenter'])\n positions = [(xcenters[i], ycenters[i]) for i in range(len(xcenters))]\n apertures = CircularAperture(positions, r=4.)\n\n\n #world coordinates\n if(not silent):\n print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n print(WCS(hdr))\n\n hdr[\"NAXIS1\"] = image.shape[0]\n hdr[\"NAXIS2\"] = image.shape[1]\n\n #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n wcsprm = WCS(hdr).wcs\n wcsprm_original = WCS(hdr).wcs\n wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, ra, dec,projection_ra, projection_dec, ignore_header_rot, radius)\n if(verbose):\n print(WCS(wcsprm.to_header()))\n coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n if(not PIXSCALE_UNCLEAR):\n if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n if(not silent):\n print(\"central value outside of the image, moving it to the center\")\n coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n #print(wcsprm)\n\n\n\n #better: put in nice wrapper! with repeated tries and maybe try synchron!\n if(not silent):\n print(\">Dowloading catalog data\")\n radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n catalog_data = query.get_data(coord, radius, catalog)\n report[\"catalog\"] = catalog\n #reference = reference.query(\"mag <20\")\n \n\n if(catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n if(not silent):\n print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n catalog_data2 = query.get_data(coord, radius, \"PS\")\n report[\"catalog\"] = \"PS\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n elif(catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n if(not silent):\n print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n report[\"catalog\"] = \"GAIA\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n\n max_sources = 400\n if(INCREASE_FOV_FLAG):\n max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n if(catalog_data.shape[0]>max_sources):\n catalog_data = catalog_data.nsmallest(400, \"mag\")\n #remove duplicates in catalog?\n\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Input for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n\n plt.xlim(-200,image.shape[0]+200)\n plt.ylim(-200,image.shape[1]+200)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_before.pdf\")\n\n ###tranforming to match the sources\n if(not silent):\n print(\"---------------------------------\")\n print(\">Finding the transformation\")\n if(rotation_scaling):\n if(not silent):\n print(\"Finding scaling and rotation\")\n wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=verbose)\n if(xy_transformation):\n if(not silent):\n print(\"Finding offset\")\n wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= verbose, silent=silent)\n\n #correct subpixel error\n compare_threshold = 3\n if(high_res):\n compare_threshold = 100\n obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=compare_threshold)#3\n if (len(distances) == 0): #meaning the list is empty\n best_score = 0\n else:\n rms = np.sqrt(np.mean(np.square(distances)))\n best_score = len(obs_x)/(rms+10) #start with current best score\n fine_transformation_success = False\n if(fine_transformation):\n print(\"Finding scaling and rotation\")\n lis = [2,3,5,8,10,6,4, 20,2,1,0.5]\n if(high_res):\n lis = [200,300,100,150,80,40,70, 20, 100, 30,9,5]\n skip_rot_scale = True\n for i in lis:\n wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i, compare_threshold=compare_threshold, skip_rot_scale=skip_rot_scale)\n if(i == 20):\n #only allow rot and scaling for the last few tries\n skip_rot_scale = False\n if(score> best_score):\n wcsprm = wcsprm_new\n best_score = score\n fine_transformation_success = True\n if not fine_transformation_success:\n if(not silent):\n print(\"Fine transformation did not improve result so will be discarded.\")\n else:\n if(not silent):\n print(\"Fine transformation applied to improve result\")\n #register.calculate_rms(observation, catalog_data,wcs)\n\n #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n wcs =WCS(wcsprm.to_header())\n if(verbose):\n print(wcs)\n from astropy.wcs import utils\n scales = utils.proj_plane_pixel_scales(wcs)\n #print(scales)\n cdelt = wcsprm.get_cdelt()\n #print(cdelt)\n scale_ratio = scales/cdelt\n #print(scale_ratio)\n pc = np.array(wcsprm.get_pc())\n pc[0,0] = pc[0,0]/scale_ratio[0]\n pc[1,0] = pc[1,0]/scale_ratio[1]\n pc[0,1] = pc[0,1]/scale_ratio[0]\n pc[1,1] = pc[1,1]/scale_ratio[1]\n wcsprm.pc = pc\n wcsprm.cdelt = scales\n\n #WCS difference before and after\n if(not silent):\n print(\"> Compared to the input the Wcs was changed by: \")\n scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n if(not silent):\n print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n #sources:\n #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n def unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / max(np.linalg.norm(vector), 1e-10)\n def matrix_angle( B, A ):\n \"\"\" comment cos between vectors or matrices \"\"\"\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n #bugfix: multiplying by cdelt otherwise the calculated angle is off by a tiny bit\n rotation_angle = matrix_angle(wcsprm.get_pc()@wcsprm.get_cdelt(), wcsprm_original.get_pc()@wcsprm_original.get_cdelt()) /2./np.pi*360.\n if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n text = \"counterclockwise\"\n else:\n text = \"clockwise\"\n if(not silent):\n print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n if(not silent):\n print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n\n\n #check final figure\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Result for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_after.pdf\")\n if(not silent):\n print(\"--- Evaluate how good the transformation is ----\")\n dic_rms = register.calculate_rms(observation, catalog_data,wcsprm)\n #updating file\n converged = determine_if_fit_converged(dic_rms, catalog_data, observation, wcsprm, image.shape[0], image.shape[1], silent)\n report[\"converged\"] = converged\n report[\"matches\"] = dic_rms[\"matches\"]\n report[\"match_radius\"] = dic_rms[\"radius_px\"]\n if(converged or save_bad_result):\n write_wcs_to_hdr(fits_image_filename, wcsprm, report, hdul_idx=hdul_idx)\n if(filename_for_sources != None):\n wcs =WCS(wcsprm.to_header())\n observation_on_sky = wcs.wcs_pix2world(observation[[\"xcenter\",\"ycenter\"]], 1)\n #catalog_from_obs = np.zeros(observation_on_sky.shape[0], dtype={'names':('ra', 'dec', 'aperture_sum'),'formats':('f8', 'f8', 'f8')})\n catalog_from_obs = pd.DataFrame()\n catalog_from_obs[\"ra\"]= observation_on_sky[:,0]\n catalog_from_obs[\"dec\"]= observation_on_sky[:,1]\n catalog_from_obs[\"aperture_sum\"]= observation[\"aperture_sum\"]\n catalog_from_obs[\"mag\"]= -1.* observation[\"aperture_sum\"]#this is fine since we only use the mag to order the sources!\n catalog_from_obs.to_csv(filename_for_sources+\".csv\")\n if(images):\n plt.show()\n\n return converged, dic_rms #dictionary with short info about fit, \"matches\" gives a number of objects matched within certain radius", "def zonal_avg(data,Log=False):\n print 'computing zonal average'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n if data.ndim == 3:\n new_data = MA.zeros((data.shape[0],nlat,nlon),dtype=float)\n elif data.ndim == 2:\n new_data = MA.zeros((nlat,nlon),dtype=float)\n else:\n print 'Check field dimensions'\n sys.exit()\n\n # geometric mean?\n if Log:\n work = MA.log(data)\n else:\n work = data\n\n # remap data to new regular grid\n for i in range(nlat):\n #print 'lat = %.2f'%(lat_t[i])\n for j in range(nlon):\n new_data[:,i,j] = extract_loc(lon_t[j],lat_t[i],tlon,tlat,work)\n\n # compute zonal average\n if Log:\n za_data = (MA.exp(MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape))))\n else:\n za_data = (MA.average(new_data,axis=-1,\n weights=N.resize(area,new_data.shape)))\n\n return za_data, lat_t", "def average_rasters(*raster_list, clamp=None):\r\n nodata_list = [\r\n pygeoprocessing.get_raster_info(path)['nodata'][0]\r\n for path in raster_list[:-1]]\r\n target_nodata = -1.\r\n\r\n def average_op(*array_list):\r\n result = numpy.empty_like(array_list[0])\r\n result[:] = target_nodata\r\n valid_mask = numpy.ones(result.shape, dtype=numpy.bool)\r\n clamped_list = []\r\n for array, nodata in zip(array_list, nodata_list):\r\n valid_mask &= array != nodata\r\n if clamp:\r\n clamped_list.append(\r\n numpy.where(array > clamp, clamp, array))\r\n else:\r\n clamped_list.append(array)\r\n\r\n if valid_mask.any():\r\n array_stack = numpy.stack(clamped_list)\r\n result[valid_mask] = numpy.average(\r\n array_stack[numpy.broadcast_to(\r\n valid_mask, array_stack.shape)].reshape(\r\n len(array_list), -1), axis=0)\r\n return result\r\n\r\n pygeoprocessing.raster_calculator(\r\n [(path, 1) for path in raster_list[:-1]], average_op,\r\n raster_list[-1], gdal.GDT_Float32, target_nodata)", "def lzs (inlist):\r\n zscores = []\r\n for item in inlist:\r\n zscores.append(z(inlist,item))\r\n return zscores", "def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))", "def get_resample(name: str) -> str:\n\n methods = {\n \"first\":\n \"\"\"\nimport numpy as np\n\ndef first(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in reversed(range(len(in_ar))):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"last\":\n \"\"\"\nimport numpy as np\n\ndef last(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in range(len(in_ar)):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"max\":\n \"\"\"\nimport numpy as np\n\ndef max(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.max(in_ar, axis=0)\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"average\":\n \"\"\"\nimport numpy as np\n\ndef average(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n div = np.zeros(in_ar[0].shape)\n for i in range(len(in_ar)):\n div += (in_ar[i] != 0)\n div[div == 0] = 1\n \n y = np.sum(in_ar, axis = 0, dtype = 'uint16')\n y = y / div\n \n np.clip(y,0,255, out = out_ar)\n\"\"\"}\n\n if name not in methods:\n raise ValueError(\n \"ERROR: Unrecognized resampling method (see documentation): '{}'.\".\n format(name))\n\n return methods[name]", "def run_zrtest(self): # Unweighted z-test\r\n n = reduce(lambda x, y: x+(y.bw_ratio() > 0), self.sorted_r, 0)\r\n if n == 0: return (0, 0)\r\n avg = reduce(lambda x, y: x+y.bw_ratio(), self.sorted_r, 0)/float(n)\r\n def notlambda(x, y):\r\n if y.bw_ratio() <= 0: return x+0\r\n else: return x+(y.bw_ratio()-avg)*(y.bw_ratio()-avg)\r\n stddev = math.sqrt(reduce(notlambda, self.sorted_r, 0)/float(n))\r\n if not stddev: return (avg, stddev)\r\n for r in self.sorted_r:\r\n if r.bw_ratio() > 0:\r\n r.z_ratio = abs((r.bw_ratio()-avg)/stddev)\r\n r.prob_zr = TorUtil.zprob(-r.z_ratio)\r\n return (avg, stddev)", "def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d", "def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl", "def proc_modscag(fn_list, extent=None, t_srs=None):\n #Use cubic spline here for improve upsampling \n ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')\n stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) \n #Create stack here - no need for most of mastack machinery, just make 3D array\n #Mask values greater than 100% (clouds, bad pixels, etc)\n ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)\n\n stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)\n stack_count.set_fill_value(0)\n stack_min = ma_stack.min(axis=0).astype(np.uint8)\n stack_min.set_fill_value(0)\n stack_max = ma_stack.max(axis=0).astype(np.uint8)\n stack_max.set_fill_value(0)\n stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)\n stack_med.set_fill_value(0)\n\n out_fn = stack_fn + '_count.tif'\n iolib.writeGTiff(stack_count, out_fn, ds_list[0])\n out_fn = stack_fn + '_max.tif'\n iolib.writeGTiff(stack_max, out_fn, ds_list[0])\n out_fn = stack_fn + '_min.tif'\n iolib.writeGTiff(stack_min, out_fn, ds_list[0])\n out_fn = stack_fn + '_med.tif'\n iolib.writeGTiff(stack_med, out_fn, ds_list[0])\n\n ds = gdal.Open(out_fn)\n return ds", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n # print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # TODO - make the default configurable.\n# if src.crs == None:\n# src.crs = CRS.from_epsg(4326)\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n # print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def calc_shape_statistics(self, stat_names):\n stats = {}\n try:\n all_props = [regionprops(m) for m in self.masks]\n except TypeError:\n raise TypeError(\"masks not the right type\")\n for stat in stat_names:\n stats[stat] = np.mean([p[0][stat] for p in all_props])\n return stats", "def desforestation_base(ras, threshold=25):\n \"\"\"input raster path -> return stats\"\"\"\n\n # get area grid\n area_grid = raster_area_lat(ras) # true WGS84 spheroid\n\n # getting numpy object\n ras_np_raw = gdal_tif_to_numpy(ras)\n # masking data not need as further masked below\n\n # create mask greater than 25, the same used by Hansen\n # ras_sub_mask = numpy.ma.masked_greater_equal(ras_np_raw, 10)\n ras_sub_mask = numpy.ma.masked_greater_equal(ras_np_raw, threshold)\n\n # use count (no mask) NOT size (including mask)\n # count_pixel = ras_sub.count()\n count_pixel = ras_sub_mask.mask.sum()\n\n # True is treated as 1\n total_area = (ras_sub_mask.mask * area_grid).sum(dtype ='float64')\n\n result = [count_pixel, total_area]\n\n return result", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all", "def analyze_results(results): #, result_nonprivate):\n res_dimensions = zip(*results)\n mean, std = [], []\n \n for resdim in res_dimensions:\n mean.append ( numpy.average(resdim) )\n std.append ( numpy.std(resdim) )\n\n return mean, std", "def output_rasters(self, arr, outdir, outname):\n\n # make the subdirectories if we need 'em\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n if self.config_dict['path_mode'] == 'local':\n outpath = os.path.join(outdir, outname)\n print('the outpath for file {} is {}'.format(outname, outpath))\n\n band1 = arr\n with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n else:\n print('PATH MODE in config is not set properly for the local implementation of output_Rasters')\n sys.exit(0)", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def drizzle_array_groups(sci_list, wht_list, wcs_list, scale=0.1, kernel='point', pixfrac=1., verbose=True):\n from drizzlepac.astrodrizzle import adrizzle\n from stsci.tools import logutil\n log = logutil.create_logger(__name__)\n \n # Output header / WCS \n header, outputwcs = compute_output_wcs(wcs_list, pixel_scale=scale)\n shape = (header['NAXIS2'], header['NAXIS1'])\n \n # Output arrays\n outsci = np.zeros(shape, dtype=np.float32)\n outwht = np.zeros(shape, dtype=np.float32)\n outctx = np.zeros(shape, dtype=np.int32)\n \n # Do drizzle\n N = len(sci_list)\n for i in range(N):\n if verbose:\n log.info('Drizzle array {0}/{1}'.format(i+1, N))\n \n adrizzle.do_driz(sci_list[i].astype(np.float32, copy=False), \n wcs_list[i], \n wht_list[i].astype(np.float32, copy=False),\n outputwcs, outsci, outwht, outctx, 1., 'cps', 1,\n wcslin_pscale=wcs_list[i].pscale, uniqid=1, \n pixfrac=pixfrac, kernel=kernel, fillval=0, \n stepsize=10, wcsmap=None)\n \n return outsci, outwht, outctx, header, outputwcs", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def calculate_maps(self,statistic=None,titles=None,filenames=\"auto\") :\n\n if statistic is not None :\n self.statistic = statistic\n\n if isinstance(self.statistic,str) :\n self.statistic = [self.statistic]\n \n # declare array of nans to fill with maps\n self.maps = np.full([self.num_hists * len(self.statistic)] + \n list(np.shape(self.hists[0])[1:]),np.nan)\n\n if titles is not None :\n self.titles = titles\n else :\n self.titles = [str(x) for x in range(self.num_hists * len(self.statistic))]\n\n if isinstance(filenames,str) and filenames == \"auto\" :\n self.filenames = [str(x) for x in range(self.num_hists * len(self.statistic))]\n else :\n self.filenames = filenames\n\n\n mapnum = 0\n hist_inds = []\n stat_inds = []\n for i in range(len(self.statistic)) :\n for j in range(self.num_hists) :\n \n self.maps[mapnum,:,:] = calculate_map_from_hists(\n self.hists[j],self.statistic[i],self.hist_specs[j]['bin_centers'])\n\n if titles is None :\n if filenames == \"auto\" :\n self.titles[mapnum], self.filenames[mapnum] = gen_map_title(**{\n **self.hist_specs[j],\n 'statistic':self.statistic[i]},filename=True)\n else :\n self.titles[mapnum] = gen_map_title(**{\n **self.hist_specs[j],\n 'statistic':self.statistic[i]},filename=False)\n\n hist_inds = hist_inds + [j]\n stat_inds = stat_inds + [i]\n\n mapnum += 1\n\n self.num_maps = mapnum\n\n self.map_specs = {'hist' : hist_inds, 'statistic' : stat_inds}\n\n return self", "def get_area_stats(\n src,\n bounds,\n max_img_size=512,\n indexes=None,\n nodata=None,\n resampling_method=\"bilinear\",\n bbox_crs=\"epsg:4326\",\n histogram_bins=20,\n histogram_range=None,\n):\n if isinstance(indexes, int):\n indexes = [indexes]\n elif isinstance(indexes, tuple):\n indexes = list(indexes)\n\n with rasterio.open(src) as src_dst:\n bounds = transform_bounds(bbox_crs, src_dst.crs, *bounds, densify_pts=21)\n\n vrt_params = dict(add_alpha=True, resampling=Resampling[resampling_method])\n\n indexes = indexes if indexes is not None else src_dst.indexes\n nodata = nodata if nodata is not None else src_dst.nodata\n\n def _get_descr(ix):\n \"\"\"Return band description.\"\"\"\n name = src_dst.descriptions[ix - 1]\n if not name:\n name = \"band{}\".format(ix)\n return name\n\n band_descriptions = [(ix, _get_descr(ix)) for ix in indexes]\n\n vrt_transform, vrt_width, vrt_height = get_vrt_transform(\n src_dst, bounds, bounds_crs=src_dst.crs\n )\n vrt_params.update(\n dict(transform=vrt_transform, width=vrt_width, height=vrt_height)\n )\n\n width = round(vrt_width) if vrt_width < max_img_size else max_img_size\n height = round(vrt_height) if vrt_height < max_img_size else max_img_size\n out_shape = (len(indexes), width, height)\n if nodata is not None:\n vrt_params.update(dict(nodata=nodata, add_alpha=False, src_nodata=nodata))\n\n if has_alpha_band(src_dst):\n vrt_params.update(dict(add_alpha=False))\n\n with WarpedVRT(src_dst, **vrt_params) as vrt:\n arr = vrt.read(out_shape=out_shape, indexes=indexes, masked=True)\n if not arr.any():\n return None, band_descriptions\n\n params = {}\n if histogram_bins:\n params.update(dict(bins=histogram_bins))\n if histogram_range:\n params.update(dict(range=histogram_range))\n\n stats = {\n indexes[b]: _stats(arr[b], **params)\n for b in range(arr.shape[0])\n if vrt.colorinterp[b] != ColorInterp.alpha\n }\n\n return stats, band_descriptions", "def test_reading_and_writing_of_real_rasters(self):\n\n for rastername in ['Earthquake_Ground_Shaking_clip.tif',\n 'Population_2010_clip.tif',\n 'shakemap_padang_20090930.asc',\n 'population_padang_1.asc',\n 'population_padang_2.asc']:\n\n filename = '%s/%s' % (TESTDATA, rastername)\n R1 = read_layer(filename)\n assert R1.filename == filename\n\n # Check consistency of raster\n A1 = R1.get_data()\n M, N = A1.shape\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster file %s' % R1.filename)\n assert M == R1.rows, msg\n assert N == R1.columns, msg\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, R1.get_geotransform(),\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Write back to new file\n for ext in ['.tif']: # Would like to also have , '.asc']:\n out_filename = unique_filename(suffix=ext)\n write_raster_data(A1,\n R1.get_projection(),\n R1.get_geotransform(),\n out_filename,\n keywords=R1.keywords)\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, M, N))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert M == R2.rows, msg\n assert N == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = ('Array values of written raster array were not as '\n 'expected')\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n msg = 'Keywords were different: %s != %s' % (R1.keywords,\n R2.keywords)\n assert R1.keywords == R2.keywords, msg\n\n # Use overridden == and != to verify\n assert R1 == R2\n assert not R1 != R2\n\n # Check equality within tolerance\n R3 = R1.copy()\n\n R3.data[-1, -1] += 1.0e-5 # This is within tolerance\n assert R1 == R3\n\n R3.data[-1, -1] += 1.0e-2 # This is outside tolerance\n assert R1 != R3\n\n # Check that equality raises exception when type is wrong\n try:\n R1 == Vector()\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)", "def testStatsZebra(self):\n image2 = self.image.Factory(self.image, True)\n #\n # Add 1 to every other row, so the variance is 1/4\n #\n self.assertEqual(image2.getHeight()%2, 0)\n width = image2.getWidth()\n for y in range(1, image2.getHeight(), 2):\n sim = image2.Factory(image2, afwGeom.Box2I(afwGeom.Point2I(0, y), afwGeom.Extent2I(width, 1)),\n afwImage.LOCAL)\n sim += 1\n\n if display:\n ds9.mtv(self.image, frame = 0)\n ds9.mtv(image2, frame = 1)\n\n stats = afwMath.makeStatistics(image2,\n afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean = stats.getResult(afwMath.MEAN)\n n = stats.getValue(afwMath.NPOINT)\n sd = stats.getValue(afwMath.STDEV)\n\n self.assertEqual(mean[0], image2.get(0, 0) + 0.5)\n self.assertEqual(sd, 1/math.sqrt(4.0)*math.sqrt(n/(n - 1)))\n self.assertAlmostEqual(mean[1], sd/math.sqrt(image2.getWidth()*image2.getHeight()), 10)\n\n meanSquare = afwMath.makeStatistics(image2, afwMath.MEANSQUARE).getValue()\n self.assertEqual(meanSquare, 0.5*(image2.get(0, 0)**2 + image2.get(0, 1)**2))", "def run(layers):\n\n # Value above which people are regarded affected\n # For this dataset, 0 is no data, 1 is cloud, 2 is normal water level\n # and 3 is overflow.\n threshold = 0\n\n # Identify hazard and exposure layers\n inundation = get_hazard_layer(layers)\n\n [population] = get_exposure_layers(layers)\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Scale the population layer\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n # Assume an evenly distributed population for Gender\n G = 0.5\n pregnant_ratio = 0.024 # 2.4% of women are estimated to be pregnant\n\n # Calculate breakdown\n P_female = P * G\n P_male = P - P_female\n P_pregnant = P_female * pregnant_ratio\n\n I_female = I * G\n I_male = I - I_female\n I_pregnant = I_female * pregnant_ratio\n\n # Generate text with result for this study\n total = str(int(sum(P.flat) / 1000))\n count = str(int(sum(I.flat) / 1000))\n\n total_female = str(int(sum(P_female.flat) / 1000))\n total_male = str(int(sum(P_male.flat) / 1000))\n total_pregnant = str(int(sum(P_pregnant.flat) / 1000))\n\n affected_female = str(int(sum(I_female.flat) / 1000))\n affected_male = str(int(sum(I_male.flat) / 1000))\n affected_pregnant = str(int(sum(I_pregnant.flat) / 1000))\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'total': total, 'count': count,\n 'total_female': total_female, 'affected_female': affected_female,\n 'total_male': total_male, 'affected_male': affected_male,\n 'total_pregnant': total_pregnant, 'affected_pregnant': affected_pregnant,\n })\n return R", "def remove_all_rasters():\n for fname in g.list_grouped(['raster'])['PERMANENT']:\n g.run_command('g.remove', flags='f', type='raster',\n name=fname)", "def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,\n upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,\n image_dates, imageSummary):\n\n # list containing median depths for each image\n median_depths = list()\n median_depths_est = list()\n\n # contains output data for JSON file\n depth_output = {}\n\n # num of images\n num_images = len(imgs)\n\n # create output dictionary for images\n depths = dict()\n\n # create excel workbook and add worksheet\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n\n # create format\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n\n # add titles\n worksheet.write(0, 0, \"Image\", cell_format)\n worksheet.write(0, 1, \"Date\", cell_format)\n worksheet.write(0, len(tensors) + 2, \"Median Depth (mm)\", cell_format)\n worksheet.write(0, len(tensors) + 3, \"Median Estimate (mm)\", cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i+2, (\"Stake %s\" % str(i)), cell_format)\n\n # start from the first cell\n row = 1\n col = 0\n\n # image iterator\n iterator = 0\n\n # iterate through images\n for img_ in tqdm.tqdm(imgs):\n # create an image to overlay points on if debugging\n if(debug):\n img_overlay = img_.copy()\n\n # list to hold calculated depths\n depths_stake = list()\n estimate_stake = list()\n\n # get image name\n img_name = img_names[iterator]\n\n # reset column\n col = 0\n\n # write to excel file\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)\n col = 2\n\n # get intersection coordiantes\n coords_stake = intersectionCoords[img_name]\n\n # get blob intersection distances\n intersection_dist_stake = intersectionDist[img_name]\n\n # iterate through stakes in image\n for i, stake in enumerate(coords_stake):\n # if stake is valid and intersection point was found\n if stakeValidity[img_name][i] and stake[\"average\"][1] != False:\n # add reference circles to output image if debugging\n # shows intersection point of image with reference to template\n if(debug):\n cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)\n cv2.circle(img_overlay, (int(stake[\"average\"][0]), int(stake[\"average\"][1])), 5, (0,255,0), 2)\n\n # calculate change in snow depth in mm\n tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]\n depth_change = ((templateIntersections[i][1] - upperBorder) - stake[\"average\"][1]) * tensor\n\n # calculate change in snow depth using blob distances\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0\n\n # write to excel file\n worksheet.write(row, col + i, \"%.2f (%.2f)\" % (depth_change, distance_estimate), cell_format)\n\n # add to list\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n\n # if stake wasn't valid or intersection point not found\n else:\n # if stake was valid\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, \"Not Found\", cell_format)\n # invalid stake\n else:\n worksheet.write(row, col + i, \"Invalid Stake\", cell_format)\n\n # append false to array\n depths_stake.append(False)\n estimate_stake.append(False)\n\n # output debug image\n if(debug):\n cv2.imwrite(debug_directory + img_name, img_overlay)\n\n # add list to dictionary\n depths[img_name] = depths_stake\n\n # determine median depth\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n\n if(len(valid_depths) > 0):\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n\n # add to median depth list\n median_depths.append(median)\n median_depths_est.append(median_est)\n\n # write median to excel file\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, \"%.2f\" % median, cell_format)\n worksheet.write(row, len(tensors) + 3, \"%.2f\" % median_est, cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, \"0.0\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"0.0\", cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, \"n/a\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"n/a\", cell_format)\n\n # increment row\n row += 1\n\n # increment iterator\n iterator += 1\n\n # update image summary\n imageSummary[img_name][\" \"] = \"\"\n imageSummary[img_name][\"Stake (Depth Calculation)\"] = \"Depth (mm) Estimate (mm)\"\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][\" %d \" % (e+1)] = \"%0.2f %0.2f \" % \\\n (depth, estimate_stake[e])\n else:\n imageSummary[img_name][\" %d \" % (e+1)] = \"%s %s \" % \\\n (\"n/a\", \"n/a\")\n\n # close workbook\n workbook.close()\n\n # remove negative values\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n\n # generate plot\n fig,ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Snow Depth (mm)\")\n plt.xticks(rotation=75)\n plt.tight_layout()\n\n # save figure\n plt.savefig(debug_directory + \"depth-graph.jpg\")\n plt.close()\n\n # return dictionary containing snow depth changes\n return depths, imageSummary", "def test_rasters_created_with_projected_srs(self):\n\n # Create test data\n x_ul = 220534 # x value of upper left corner\n y_ul = 827790 # y_value of upper left corner\n numx = 8 # Number of xs\n numy = 5 # Number of ys\n dx = 200\n dy = -200\n\n # Define array where ys are rows and xs columns\n A1 = numpy.zeros((numy, numx))\n\n # Establish coordinates for lower left corner\n y_ll = y_ul - numy * dy\n x_ll = x_ul\n\n # Define pixel centers along each direction\n x = numpy.linspace(x_ll + 0.5, x_ll + numx - 0.5, numx)\n y = numpy.linspace(y_ll + 0.5, y_ll + numy - 0.5, numy)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numy):\n for j in range(numx):\n A1[numy - 1 - i, j] = linear_function(x[j], y[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == linear_function(x[0], y[4])\n\n # Lower left corner\n assert A1[4, 0] == linear_function(x[0], y[0])\n\n # Upper right corner\n assert A1[0, 7] == linear_function(x[7], y[4])\n\n # Lower right corner\n assert A1[4, 7] == linear_function(x[7], y[0])\n\n # Generate raster object and write\n projection = \"\"\"PROJCS[\"DGN95 / Indonesia TM-3 zone 48.2\",\n GEOGCS[\"DGN95\",\n DATUM[\"Datum_Geodesi_Nasional_1995\",\n SPHEROID[\"WGS 84\",6378137,298.257223563,\n AUTHORITY[\"EPSG\",\"7030\"]],\n TOWGS84[0,0,0,0,0,0,0],\n AUTHORITY[\"EPSG\",\"6755\"]],\n PRIMEM[\"Greenwich\",0,\n AUTHORITY[\"EPSG\",\"8901\"]],\n UNIT[\"degree\",0.01745329251994328,\n AUTHORITY[\"EPSG\",\"9122\"]],\n AUTHORITY[\"EPSG\",\"4755\"]],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n PROJECTION[\"Transverse_Mercator\"],\n PARAMETER[\"latitude_of_origin\",0],\n PARAMETER[\"central_meridian\",106.5],\n PARAMETER[\"scale_factor\",0.9999],\n PARAMETER[\"false_easting\",200000],\n PARAMETER[\"false_northing\",1500000],\n AUTHORITY[\"EPSG\",\"23834\"],\n AXIS[\"X\",EAST],\n AXIS[\"Y\",NORTH]]\"\"\"\n\n geotransform = (x_ul, dx, 0, y_ul, 0, dy)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n assert nanallclose(R1.get_data(), A1, rtol=1.0e-12)\n assert nanallclose(R1.get_geotransform(), geotransform,\n rtol=1.0e-12)\n assert 'DGN95' in R1.get_projection()", "def heatmap(island_results):\n kart_herb = []\n kart_carn = []\n for row in island_results:\n h_row = []\n c_row = []\n for cell in row:\n h_row.append(cell[\"herbivores\"])\n c_row.append(cell[\"carnivores\"])\n kart_herb.append(h_row)\n kart_carn.append(c_row)\n return kart_herb, kart_carn", "def py_SurfStatAvSurf(filenames, fun = np.add, output_surfstat=False):\n \n if filenames.ndim is not 2:\n raise ValueError('Filenames must be a 2-dimensional array.')\n \n for i in range(0, filenames.shape[0]):\n surfaces = np.empty(filenames.shape[1], dtype=np.object)\n for j in range(0, filenames.shape[1]):\n \n # Check whether input is BSPolyData or a filename. \n if isinstance(filenames[i,j], BSPolyData):\n surfaces[j] = filenames[i,j] \n else:\n surfaces[j] = read_surface(filenames[i,j])\n \n # Concatenate second dimension of filenames. \n if j is 0:\n tri = get_cells(surfaces[j]) \n coord = get_points(surfaces[j])\n else:\n tri = np.concatenate((tri, get_cells(surfaces[j]) + coord.shape[0]), axis=0)\n coord = np.concatenate((coord, get_points(surfaces[j])), axis=0)\n \n if i is 0:\n m = 1\n coord_all = coord\n else:\n coord_all = fun(coord_all,coord)\n m = fun(m,1)\n \n coord_all = coord_all / m \n \n if output_surfstat:\n surface = {'tri': np.array(tri) + 1, 'coord': np.array(coord_all).T}\n else:\n surface = build_polydata(coord_all, tri)\n \n return surface", "def av_cmaps(cmaps,nres,resnames,outdir,name_mod,mtype=\"NULL\"):\n\tplt.clf()\n\tnframes = len(cmaps) \n\tav = np.zeros((cmaps[0].shape))\n\n\t# save cmaps to npy file. Data must first be reshaped.\n\tif mtype == \"NULL\":\n\t\tcmaps = np.array(cmaps)\n\t\tresh = cmaps.reshape(cmaps.shape[0],cmaps.shape[1]*cmaps.shape[2])\n\t\tnp.savetxt(outdir+\"CMAPS\" + name_mod + \"_raw.npy\",resh)\n\n\t\tfor i in range(cmaps[0].shape[0]):\n\t\t\tfor j in range(cmaps[0].shape[1]):\n\t\t\t\t# for each element of the matrix\n\t\t\t\tif j > i: # don't compute things twice\n\t\t\t\t\tl = []\n\t\t\t\t\tfor c in cmaps:\n\t\t\t\t\t\t# for each map, determine if there was a contact at that position\n\t\t\t\t\t\tif c[i][j] < 0.7: # nm\n\t\t\t\t\t\t\tl.append(1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tl.append(0)\n\t\t\t\t\tav[i][j] = np.std(l)/(np.sqrt(nframes)-1)\n\t\t\t\t\tav[j][i] = np.mean(l)\n\t\t\t\t# dont consider contacts from neighbors\n\t\t\t\tif i == j or abs(i-j) <= 2:\n\t\t\t\t\tav[i][j] = 0\n\t\t\t\t\tav[j][i] = 0\n\telse:\n\t\tfor m in range(nres):\n\t\t\tfor n in range(nres):\n\t\t\t\tfor fr in range(nframes):\n\t\t\t\t\tav[n][m] += cmaps[fr][m][n]\n\t\tav/=nframes\n\tfig, ax = plt.subplots()\n\tplt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\tif mtype == \"gremlin\":\n\t\tim = ax.imshow(av, cmap='PuBu')\n\t\tcbar = fig.colorbar(im)\n\t\tax.set_title(\"Average Contact Maps from Rosetta+Gremlin Output\")\n\t\tplt.savefig(outdir+\"gremlin_compare_CMAPS\" + name_mod + \".png\")\n\t\tnp.savetxt(outdir+\"gremlin_CMAPS\" + name_mod + \"_av.npy\",av)\n\telif mtype == \"surface\":\n\t\thydrophobic = ['GLY', 'ALA', 'VAL', 'ILE', 'LEU', 'MET', 'PHE', 'TYR', 'TRP', 'PRO', 'CYS']\n\t\thydrophilic = ['SER', 'THR', 'ASN', 'GLN', 'HIS']\n\t\tposcharge = ['ARG', 'LYS']\n\t\tnegcharge = ['ASP', 'GLU']\n\n\t\tfor it,rn in enumerate(resnames):\n\t\t\tif rn in hydrophobic:\n\t\t\t\tplt.axhline(y=it,c='yellow',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='yellow',linewidth=1.5)\n\t\t\telif rn in hydrophilic:\n\t\t\t\tplt.axhline(y=it,c='g',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='g',linewidth=1.5)\n\t\t\telif rn in poscharge:\n\t\t\t\tplt.axhline(y=it,c='b',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='b',linewidth=1.5)\n\t\t\telif rn in negcharge:\n\t\t\t\tplt.axhline(y=it,c='r',linewidth=1.5)\n\t\t\t\tplt.axvline(x=it,c='r',linewidth=1.5)\n\t\t\telse:\n\t\t\t\tprint \"unknown restype:\", rn\n\t\tax.set_title(\"Average Contact Maps of Surface Residues\")\n\t\tim = ax.imshow(av, cmap='Greys')\n\t\tcbar = fig.colorbar(im)\n\t\tplt.savefig(outdir+\"surface_CMAPS\" + name_mod + \".png\")\n\t\tnp.savetxt(outdir+\"surface_CMAPS\" + name_mod + \"_av.npy\",av)\n\telse:\n\t\tim = ax.imshow(av)\n\t\tcbar = fig.colorbar(im)\n\t\tax.set_title(\"Average Contact Maps\")\n\t\tplt.savefig(outdir+\"CMAPS\" + name_mod + \".png\")\n\t\tnp.savetxt(outdir+\"CMAPS\" + name_mod + \"_av.npy\",av)\n\treturn av", "def test_calc_res():\n with xr.open_rasterio(TEST_RASTER_PATH) as src:\n xr_res = ds.utils.calc_res(src)\n with rasterio.open(TEST_RASTER_PATH) as src:\n rio_res = src.res\n assert np.allclose(xr_res, rio_res)", "def radialAvg(data,xi,zi,rad):\r\n #Remove all nan values from data\r\n data=np.nan_to_num(data)\r\n \r\n #Array with x values\r\n xArr=xi[0]\r\n \r\n #Array with z values\r\n zArr=np.transpose(zi)[0]\r\n \r\n #Create the array to store the values\r\n avgData=[]\r\n \r\n #Go over each z position\r\n for i in range(len(zArr)):\r\n \r\n #Store the total at each z position\r\n zPosTot=0\r\n \r\n #Counter to help average\r\n count=0\r\n \r\n #Go over each x position\r\n for j in range(len(xArr)):\r\n \r\n #Check if we are within the radius\r\n if np.abs(xArr[j])<=rad:\r\n \r\n #Add the data to the position total\r\n zPosTot+=data[i][j]\r\n \r\n #Increment the counter\r\n count+=1\r\n \r\n #Calculate the radial average\r\n zPosAvg=zPosTot/count\r\n \r\n #Add to the array\r\n avgData.append(zPosAvg)\r\n \r\n return avgData", "def applyPhotoZ (self,arr):\n print \"Applying Template SED PZs\"\n\n ztrue = arr['z']\n\n #select a template\n templates = ['El_B2004a.sed']+['Sbc_B2004a.sed','Scd_B2004a.sed']\n templates = templates +['Im_B2004a.sed','SB3_B2004a.sed','SB2_B2004a.sed','ssp_25Myr_z008.sed','ssp_5Myr_z008.sed']\n\n #read in f_mod files, interpolate, get values of f_mod_b\n ngals = len(ztrue)\n\n f_mod_o = np.zeros((self.nb, ngals))\n for z in range(ngals):\n #currently templates are randomly chosen but probably should be an input with true z\n templateno = np.random.choice(range(self.nt))\n for b in range(self.nb):\n spl = InterpolatedUnivariateSpline(self.z_grid, self.f_mod[:,templateno,b])\n f_mod_o[b][z] = spl(ztrue[z])\n\n #select sigma_b - 10% for now\n sigma = 0.1*f_mod_o\n #select observed fluxes f_obs_b = f_mod_b + sigma_b*rando\n f_obs = f_mod_o+ sigma * (np.random.normal(0.,1.,self.nb*ngals).reshape((self.nb,ngals)))\n # I don't seem to be able to find a more efficient way\n arrx=np.zeros(ngals,dtype=[('pz_f_obs',float,(self.nb,)),('pz_flux_sigma',float,(self.nb,))])\n arrx['pz_f_obs']=f_obs.T\n arrx['pz_flux_sigma']=sigma.T\n arr = recfunctions.merge_arrays((arr,arrx),flatten=True,usemask=False)\n return arr", "def spectra_stacker(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n data_shape = np.shape(image_data)\n ra_axis = data_shape[2]\n dec_axis = data_shape[1]\n wl_axis = data_shape[0]\n\n pxl_total = ra_axis * dec_axis\n \n data_unwrap = [] \n for i_ra in range(ra_axis):\n for i_dec in range(dec_axis):\n pixel_data = image_data[:][:,i_dec][:,i_ra]\n \n data_unwrap.append(pixel_data)\n\n data_stacked = np.zeros((pxl_total, wl_axis))\n for i_row in range(np.shape(data_unwrap)[0]):\n data_row = data_unwrap[i_row]\n for i_pixel in range(len(data_row)):\n data_stacked[i_row][i_pixel] = data_row[i_pixel]\n\n # writing data to a fits file\n hdr = fits.Header()\n hdr['CTYPE1'] = 'pixel'\n hdr['CRPIX1'] = 1\n hdr['CRVAL1'] = data_stacked[0][0]\n hdr['CDELT1'] = data_stacked[0][1] - data_stacked[0][0]\n\n primary_hdu = fits.PrimaryHDU(header=hdr)\n hdu = fits.ImageHDU(data_stacked)\n\n hdul = fits.HDUList([primary_hdu, hdu])\n\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n hdul.writeto(data_dir + '/stacked.fits')\n return data_unwrap", "def snow_summary(code, scalingFactor, statistics=\"SUM\", outcellsize='1000', monthRange='', yearRange='',\n path=\"H:/GIS/SNODAS/SNWDS/\", outpath=\"H:/GIS/SNODAS.gdb/\", area=''):\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n # arcpy.env.mask = area\n\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n\n for y in range(yearRange[0], yearRange[1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[1] + 1): # set months converted here\n g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year\n for name in sorted(\n glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n # print(g[code+str(y)+str(m).zfill(2)])\n # ifnull = 'in_memory/ifnull'\n # arcpy sa functions that summarize the daily data to monthly data\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,\n ignore_nodata=\"DATA\")\n div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001\n calc = Con(div < 0.0, 0.0, div) # remove negative and null values\n ifnull = Con(IsNull(calc), 0, calc) # remove null\n # WKID 102039\n outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis\n # define save path for file\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]\n memoryFeature = \"in_memory/myMemoryFeature\"\n # memoryFeature = outnm\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n # Execute ExtractByMask to clip snodas data to Utah watersheds\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management(\"in_memory\")", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def clip_multiple_raster(raster_name_list, output_suffix='clip', overwrite=False, resample=False, n_jobs=2):\n\n global suffix, o, r\n o = overwrite\n r = resample\n suffix = output_suffix\n\n # Check if r.clip is well installed\n if not gscript.find_program('r.clip', '--help'):\n message = _(\"You first need to install the addon r.clip.\\n\")\n message += _(\" You can install the addon with 'g.extension r.clip'\")\n gscript.fatal(message)\n\n # Clip the rasters in multiprocessing pool of jobs\n p = Pool(n_jobs)\n output = p.map(clip, raster_name_list) # Launch the processes for as many items in the list (if function with a return, the returned results are ordered thanks to 'map' function)\n p.close()\n p.join()\n print \"\\n\".join(output)", "def retrieveDensity_reg(slidedir:str, filename : str, resultsdir : str, suffix : str = '_results_dirreg.npz'):\n TILESIZE_X = 512\n TILESIZE_Y = 512\n sl = openslide.open_slide(slidedir+os.sep+filename)\n\n tiles_total_x = int(np.floor(sl.dimensions[0] / TILESIZE_X))\n tiles_total_y = int(np.floor(sl.dimensions[1] / TILESIZE_Y))\n\n # calculate 10 HPFs with highest mitotic activity\n # 1 HPF = 0.237 mm^2 \n A = 2.37 # mm^2 \n W_hpf_microns = np.sqrt(A*4/3) * 1000 # in microns\n H_hpf_microns = np.sqrt(A*3/4) * 1000 # in microns\n\n micronsPerPixel = sl.properties[openslide.PROPERTY_NAME_MPP_X]\n\n W_hpf = int(W_hpf_microns / float(micronsPerPixel)) \n H_hpf = int(H_hpf_microns / float(micronsPerPixel))\n\n W_x = int(W_hpf / TILESIZE_X)\n W_y = int(H_hpf / TILESIZE_Y)\n\n f = np.load(bz2.BZ2File(resultsdir + os.sep + filename + suffix+'.bz2','rb'))\n \n\n scorefield=np.zeros((np.max(f['tilesProcessed'][:,1])+1,1+np.max(f['tilesProcessed'][:,0])))\n scorefield[f['tilesProcessed'][:,1],f['tilesProcessed'][:,0]] = np.reshape(f['scores'],-1)\n\n completeMap = scorefield\n\n kernel = np.ones((W_y,W_x),np.float32)\n ma = cv2.filter2D(completeMap, -1, kernel )\n\n return ma, completeMap", "def output_rasters_cloud(self, arr, outname):\n\n if self.config_dict['path_mode'] == 'aws':\n # later on deleted by s3_delete_local()\n # local_outpath = os.path.join(self.config_dict['temp_folder'], outname)\n local_outname = outname.split('/')[-1]\n local_outpath = os.path.join(self.temp_folder, local_outname)\n self.log.debug('local_outpath {}'.format(local_outpath))\n\n t0 = t_now()\n\n band1 = arr\n # write to a temp folder\n with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # Buckets are not directories but you can treat them like they are\n # bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data\n # bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1\n bucket_name = self.config_dict['out_root'].split('/')[0]\n bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]\n print(bucket_prefix_list)\n bucket_prefix = '/'.join(bucket_prefix_list)\n print(\"bucket prefix =\", bucket_prefix)\n bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)\n\n # uploads to aws bucket with filepath\n self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)\n t_total = t_now() - t0\n self.log.info(\"OUTPUT - TIME - {} - {}\".format(t_total, bucket_filepath))\n\n elif self.config_dict['path_mode'] == 'google':\n print('google path mode not yet implemented')\n sys.exit(0)\n\n else:\n print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')\n sys.exit(0)", "def find_location_gs(source_name, source_alt_az,\n minute, hour, day, month, year,\n plot_grids=True):\n\n alt, az = source_alt_az\n source_obj = Vizier.query_object(source_name, catalog='V/50')[0]\n source_ra_dec = (source_obj['RAJ2000'][0], source_obj['DEJ2000'][0])\n\n source_ra_hms = tuple(map(float, source_ra_dec[0].split()))\n source_dec_dms = tuple(map(float, source_ra_dec[1].split()))\n\n source_ra = Angle(source_ra_hms, unit='hourangle').degree\n source_dec = Angle(source_dec_dms, unit=u.deg).degree\n\n lats = np.arange(-90., 90, res)\n longs = np.arange(-180, 180, res)\n\n ra_grid = np.zeros((len(lats), len(longs)))\n dec_grid = np.zeros((len(lats), len(longs)))\n score_grid = np.zeros((len(lats), len(longs)))\n\n # Run the grid\n lat_counter, long_counter = 0, 0\n for i in range(len(lats)):\n for j in range(len(longs)):\n # Need to sort out angular units\n lat, long = lats[i], longs[j]\n\n ra, dec = altaz_to_radec((alt, az), pos=(lat, long),\n minute=minute, hour=hour, day=day,\n month=month, year=year, tz_offset=5)\n\n # pos_grid[i, j] = {'RA': ra, 'DEC': dec}\n ra_grid[i, j] = ra\n dec_grid[i, j] = dec\n\n # Bad - planar:\n score = np.sqrt((ra - source_ra)**2 + (dec - source_dec)**2)\n\n # Good - spherical:\n # score = np.arccos(np.sin(dec) * np.sin(source_dec) + np.cos(dec) * np.cos(source_dec) * np.cos(abs(ra - source_ra)))\n\n score_grid[i, j] = score\n\n verbose = False\n if verbose is True:\n print('RA, Source RA:', ra, source_ra)\n print('DEC, Source DEC:', dec, source_dec)\n print('Score:', score)\n print('\\n')\n else:\n step = long_counter + lat_counter * len(lats)\n print (str(step) + '/' + str(len(lats) * len(longs)))\n long_counter += 1\n\n outname = 'latlong-gridsearch-results_' + str(res)\n score_df = pd.DataFrame(score_grid)\n score_df.to_csv(outname + '.csv')\n\n if plot_grids is True:\n lat_coord = (90 + local_latlong[0]) * res\n long_coord = (180 + local_latlong[1]) * res\n\n plt.contour(score_grid)\n plt.plot([lat_coord], [long_coord], 'or')\n plt.matshow(score_grid, cmap='magma')\n\n xtick_locs = np.arange(0, len(longs), len(longs)/6)\n xtick_labs = [int(longs[i]) for i in xtick_locs]\n plt.xticks(xtick_locs, xtick_labs)\n\n # plt.ylim(max(lats), min(lats))\n ytick_locs = np.arange(0, len(lats), len(lats)/10)\n ytick_labs = [int(lats[i]) for i in ytick_locs]\n plt.yticks(ytick_locs, ytick_labs)\n\n plt.savefig(outname + '.png', dpi=200)\n plt.show(block=False)\n\n\n return {'RA': ra_grid, 'DEC': dec_grid, 'SCORE': score_grid}", "def main(raster_file):\n with rasterio.open(raster_file) as src:\n data = src.read() # gets ALL the data\n single_band = data[0] # gets the first band OR src.read(1)\n\n print (f'Normally expect this shape from rasterio: {data.shape}')\n # https://rasterio.readthedocs.io/en/latest/topics/image_processing.html\n\n image = reshape_as_image(data)\n\n print(f'After reshaping as image: {image.shape}')\n\n reshaped_to_raster = reshape_as_raster(image)\n\n print(f'After reshaping as raster: {reshaped_to_raster.shape}')\n\n print('---------------')\n\n print(f'first band, or a single band image: {single_band.shape}')\n\n added_dimension = np.expand_dims(single_band, axis=2)\n\n print(f'After adding a dimension: {added_dimension.shape}')\n print('---------------')\n print(added_dimension[:,:,0])", "def radiance_map(file, config, vmax=4200, levels=20, typ=''):\n \n # Select data from configuration \n azimuths = config['skymap'][:, 0] # +180 # azimuths\n zeniths = config['skymap'][:, 1] # zeniths\n\n if typ == 'sim':\n # look for wavelength index in array\n waves_sim = dataset.attrs['simulated_Columns'].split('nm')[0].split('[')[1].split(\n ']')[0].split(',')\n waves = np.asarray(list(map(int, waves_sim)))\n wave_indx = np.where(waves == wave)\n try:\n wave_indx = np.int(wave_indx[0][0])\n except:\n print(\"Wavelength is not in dataset\")\n z = dataset.simulated[:, wave_indx, time_indx]\n\n elif typ == 'meas':\n wave_indx = int((config['wavelength'] - 250 - config['wave_correction']) / 0.446)\n with h5py.File(file, 'r') as data:\n z = data['data'][:, wave_indx]\n else:\n print('Select a input data type(sim or meas)')\n\n # Add values in the origin to close the surface interpolation\n azimuths = np.append(azimuths, [270, 0, 0, 0, 0, 0, 0, 0])\n zeniths = np.append(zeniths, [0, 12, 24, 36, 48, 60, 72, 84])\n z = np.append(z, [z[0], z[3], z[9], z[19], z[33], z[51], z[73], z[99]])\n\n # Convert x to radians\n azimuths = np.radians(azimuths)\n zeniths = np.radians(zeniths)\n\n # Remove dead channels of the dataset\n azimuths = np.delete(azimuths, config['dead_fibre'])\n zeniths = np.delete(zeniths, config['dead_fibre'])\n z = np.delete(z, config['dead_fibre'])\n\n # Set up a regular grid of interpolation point\n thetai, ri = np.linspace(azimuths.min(), azimuths.max(),\n num=len(azimuths)), \\\n np.linspace(zeniths.min(), zeniths.max(), num=len(zeniths))\n\n ri, thetai = np.meshgrid(ri, thetai, indexing='ij')\n\n # zi = scipy.interpolate.griddata((azimuths, zeniths), z, (thetai, ri),\n # method='linear')\n\n rbf = scipy.interpolate.Rbf(azimuths, zeniths, z, fucntion='gaussian',\n epsilon=0.05)\n\n ZI = rbf(thetai, ri)\n\n if typ == 'sim':\n name = str(dataset.time[time_indx].values) # ''\n else:\n name = 'testing' #str(dataset.time[time_indx].values)\n\n # Create the directory to save the results\n # os.makedirs(os.path.dirname(config['path_note'] + '/figures/'),\n # exist_ok=True)\n if vmax == 'default':\n vmax = 4200\n else:\n vmax = vmax\n\n # Plot the dataset\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n cmap = 'Spectral_r' # 'rainbow'\n a = plt.contourf(thetai, ri, ZI, levels, cmap=cmap, vmin=0,\n vmax=vmax) # , vmax=4932)\n plt.title('{} UTC {}nm'.format(name, config['wavelength']))\n plt.axis([0, 2*np.pi, 0, 1.48])\n\n plt.scatter(azimuths, zeniths, cmap=cmap, s=1)\n ax.grid(False)\n ax.set_theta_zero_location(\"N\") # Set the direction of polar plot\n ax.set_theta_direction(1) # Set the increase direction on azimuth angles\n # (-1 to clockwise, 1 counterclockwise)\n cbar = plt.colorbar(a)\n cbar.set_label(\"counts\", rotation=90)\n\n # if typ == 'sim':\n # plt.savefig(\n # 'figures/skymap/simulated/skymap{}nm_{}UTC_sim.jpeg'.format(wave,\n # name),\n # dpi=300)\n # plt.show();\n # else:\n # plt.savefig(\n # 'figures/skymap/measured/skymap{}nm_{}UTC_meas.jpeg'.format(wave,\n # name),\n # dpi=300)", "def map(z):\n pass", "def level1_hitmaps(filename,\n image_directory,\n band_average=True,\n feed_average=False,\n feeds=[1],\n make_hits=True,\n make_sky=True,\n field_width=None,\n cdelt=[1./60.,1./60.],\n ctype=['RA---TAN','DEC--TAN'],\n crval=None,\n source='None',\n plot_circle=False,\n plot_circle_radius=1,\n AzElMode=False,\n SunMode=False):\n\n\n try:\n fd = h5py.File(filename,'r')\n except OSError:\n print('Unable to open file {}'.format(filename))\n return\n\n # cdelt given in arcmin\n if not isinstance(field_width, type(None)):\n xpixelWidth = int(field_width[0]/cdelt[0]*60)\n ypixelWidth = int(field_width[1]/cdelt[1]*60)\n image_width = [xpixelWidth, ypixelWidth]\n else:\n image_width = None\n\n if isinstance(image_directory, type(None)):\n image_directory = filename.split('/')[-1].split('.')[0]\n if not os.path.exists(image_directory):\n os.makedirs(image_directory)\n\n\n if AzElMode:\n mapper = MapperAzEl(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n image_directory=image_directory,\n ctype=ctype)\n elif SunMode:\n mapper = MapperSun(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n image_directory=image_directory,\n ctype=ctype)\n \n else:\n mapper = Mapper(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n image_directory=image_directory,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n ctype=ctype)\n \n \n mapper.setLevel1(fd, source)\n if 'all' in feeds:\n feeds = [feed for feed in fd['spectrometer/feeds'][:] if feed != 20]\n if feed_average:\n \n maps = mapper(feeds, usetqdm=True)\n fstr = '-'.join(['{:02d}'.format(feed) for feed in feeds if feed in mapper.feed_ids])\n outdir = '{}'.format(image_directory)\n\n mapper.plotImages(feeds,\n '{}/Hitmap_FeedAvg.png'.format(outdir),\n '{}/BandAverage_FeedAvg.png'.format(outdir),\n plot_circle,\n plot_circle_radius)\n # mapper.SaveMaps('{}/BandAverage_FeedAvg.fits'.format(image_directory))\n \n \n for feed in tqdm(feeds):\n if not isinstance(mapper.map_bavg,type(None)):\n mapper.map_bavg *= 0.\n mapper.hits = None\n\n maps = mapper(feed)\n\n fstr = '-'.join(['{:02d}'.format(feed)])\n outdir = '{}'.format(image_directory)\n\n mapper.plotImages([feed],\n '{}/Hitmap_Feed{:02d}.png'.format(outdir,feed),\n '{}/BandAverage_Feed{:02d}.png'.format(outdir,feed),\n plot_circle,\n plot_circle_radius)\n #mapper.SaveMaps('{}/BandAverage_Feed{:02d}.fits'.format(image_directory,feed))", "def test_xyz_to_smiles(self):\n xyz1 = \"\"\"S -0.06618943 -0.12360663 -0.07631983\nO -0.79539707 0.86755487 1.02675668\nO -0.68919931 0.25421823 -1.34830853\nN 0.01546439 -1.54297548 0.44580391\nC 1.59721519 0.47861334 0.00711000\nH 1.94428095 0.40772394 1.03719428\nH 2.20318015 -0.14715186 -0.64755729\nH 1.59252246 1.51178950 -0.33908352\nH -0.87856890 -2.02453514 0.38494433\nH -1.34135876 1.49608206 0.53295071\"\"\"\n\n xyz2 = \"\"\"O 2.64631000 -0.59546000 0.29327900\nO 2.64275300 2.05718500 -0.72942300\nC 1.71639100 1.97990400 0.33793200\nC -3.48200000 1.50082200 0.03091100\nC -3.85550400 -1.05695100 -0.03598300\nC 3.23017500 -1.88003900 0.34527100\nC -2.91846400 0.11144600 0.02829400\nC 0.76935400 0.80820200 0.23396500\nC -1.51123800 -0.09830700 0.09199100\nC 1.28495500 -0.50051800 0.22531700\nC -0.59550400 0.98573400 0.16444900\nC -0.94480400 -1.39242500 0.08331900\nC 0.42608700 -1.59172200 0.14650400\nH 2.24536500 1.93452800 1.29979800\nH 1.14735500 2.91082400 0.31665700\nH -3.24115200 2.03800800 0.95768700\nH -3.08546100 2.10616100 -0.79369800\nH -4.56858900 1.48636200 -0.06630800\nH -4.89652000 -0.73067200 -0.04282300\nH -3.69325500 -1.65970000 -0.93924100\nH -3.72742500 -1.73294900 0.81894100\nH 3.02442400 -2.44854700 -0.56812500\nH 4.30341500 -1.72127600 0.43646000\nH 2.87318600 -2.44236600 1.21464900\nH -0.97434200 2.00182800 0.16800300\nH -1.58581300 -2.26344700 0.02264400\nH 0.81122400 -2.60336100 0.13267800\nH 3.16280800 1.25020800 -0.70346900\"\"\"\n\n xyz3 = \"\"\"N 2.24690600 -0.00006500 0.11597700\nC -1.05654800 1.29155000 -0.02642500\nC -1.05661400 -1.29150400 -0.02650600\nC -0.30514100 0.00000200 0.00533200\nC 1.08358900 -0.00003400 0.06558000\nH -0.39168300 2.15448600 -0.00132500\nH -1.67242600 1.35091400 -0.93175000\nH -1.74185400 1.35367700 0.82742800\nH -0.39187100 -2.15447800 0.00045500\nH -1.74341400 -1.35278100 0.82619100\nH -1.67091600 -1.35164600 -0.93286400\"\"\"\n\n xyz4 = \"\"\"C -0.86594600 0.19886100 2.37159000\nC 0.48486900 -0.16232000 1.75422500\nC 1.58322700 0.83707500 2.14923200\nC 0.88213600 -1.51753600 2.17861400\nN 1.17852900 -2.57013900 2.53313600\nN 0.51051200 -0.21074800 0.26080100\nN -0.51042000 0.21074000 -0.26079600\nC -0.48479200 0.16232300 -1.75422300\nC 0.86590400 -0.19926100 -2.37161200\nC -1.58344900 -0.83674100 -2.14921800\nC -0.88166600 1.51765700 -2.17859800\nN -1.17777100 2.57034900 -2.53309500\nH -1.16019200 1.20098300 2.05838400\nH -1.64220300 -0.50052400 2.05954500\nH -0.78054100 0.17214100 3.45935000\nH 1.70120000 0.85267300 3.23368300\nH 2.53492600 0.56708700 1.69019900\nH 1.29214500 1.83331400 1.80886700\nH 1.15987300 -1.20145600 -2.05838100\nH 0.78046800 -0.17257000 -3.45937100\nH 1.64236100 0.49992400 -2.05962300\nH -2.53504500 -0.56650600 -1.69011500\nH -1.70149200 -0.85224500 -3.23366300\nH -1.29263300 -1.83308300 -1.80892900\"\"\"\n\n xyz5 = \"\"\"O 0.90973400 -0.03064000 -0.09605500\nO 0.31656600 -0.00477100 -1.21127600\nO 2.17315400 -0.03069900 -0.09349100\"\"\"\n\n xyz6 = \"\"\"S 0.38431300 0.05370100 0.00000000\nN -1.13260000 0.07859900 0.00000000\nH 0.85151800 -1.28998600 0.00000000\"\"\"\n\n xyz7 = \"\"\"N 0.00000000 0.00000000 0.44654700\nN 0.00000000 0.00000000 -0.77510900\nH 0.86709400 0.00000000 1.02859700\nH -0.86709400 0.00000000 1.02859700\"\"\"\n\n xyz8 = \"\"\"N 0.00000000 0.00000000 0.65631400\nC 0.00000000 0.00000000 -0.50136500\nH 0.00000000 0.00000000 -1.57173600\"\"\"\n\n# xyz9 = \"\"\"S -0.00866000 -0.60254900 0.00000000\n# N -0.96878800 0.63275900 0.00000000\n# N 1.01229100 0.58298500 0.00000000\"\"\"\n#\n# xyz10 = \"\"\"O -0.79494500 -0.93969200 0.00000000\n# O -0.32753500 1.24003800 0.00000000\n# O 1.28811400 -0.24729000 0.00000000\n# N 0.14143500 0.11571500 0.00000000\n# H -1.65602000 -0.48026800 0.00000000\"\"\"\n#\n# xyz11 = \"\"\"O 1.64973000 -0.57433600 0.02610800\n# O 0.49836300 1.28744800 -0.18806200\n# N -0.57621600 -0.65116600 0.24595200\n# N -1.78357200 -0.10211200 -0.14953800\n# N 0.61460400 0.08152700 -0.00952700\n# H -0.42001200 -1.61494900 -0.03311600\n# H -1.72480300 0.33507600 -1.06884500\n# H -2.07362100 0.59363400 0.53038600\"\"\"\n\n xyz12 = \"\"\"O 1.10621000 0.00000000 -0.13455300\nO -1.10621000 0.00000000 -0.13455300\nN 0.00000000 0.00000000 0.33490500\"\"\"\n\n# xyz13 = \"\"\"O -0.37723000 -1.27051900 0.00000000\n# N -0.12115000 -0.04252600 0.00000000\n# N -0.95339100 0.91468300 0.00000000\n# C 1.31648000 0.33217600 0.00000000\n# H 1.76422500 -0.11051900 -0.89038300\n# H 1.76422500 -0.11051900 0.89038300\n# H 1.40045900 1.41618100 0.00000000\n# H -1.88127600 0.47189500 0.00000000\"\"\"\n\n xyz14 = \"\"\"S -0.12942800 0.11104800 0.22427200\nO 0.98591500 -1.00752300 -0.31179100\nO -1.43956200 -0.44459900 -0.15048900\nO 0.32982400 1.44755400 -0.21682700\nH 1.85512700 -0.56879900 -0.36563700\"\"\"\n\n xyz15 = \"\"\"N 1.11543700 0.11100500 0.00000000\nN -0.11982300 -0.03150800 0.00000000\nN -1.25716400 0.01530300 0.00000000\nH 1.57747800 -0.80026300 0.00000000\"\"\"\n\n xyz16 = \"\"\"O 1.21678000 -0.01490600 0.00000000\nN 0.04560300 0.35628400 0.00000000\nC -1.08941100 -0.23907800 0.00000000\nH -1.97763400 0.37807800 0.00000000\nH -1.14592100 -1.32640500 0.00000000\"\"\"\n\n xyz17 = \"\"\"S 0.00000000 0.00000000 0.18275300\nO -0.94981300 -0.83167500 -0.84628900\nO 0.94981300 0.83167500 -0.84628900\nO 0.80426500 -0.99804200 0.85548500\nO -0.80426500 0.99804200 0.85548500\nH -1.67833300 -0.25442300 -1.13658700\nH 1.67833300 0.25442300 -1.13658700\"\"\"\n\n xyz18 = \"\"\"S 0.00000000 0.00000000 0.12264300\nO 1.45413200 0.00000000 0.12264300\nO -0.72706600 1.25931500 0.12264300\nO -0.72706600 -1.25931500 0.12264300\"\"\"\n\n xyz19 = \"\"\"N 1.16672400 0.35870400 -0.00000400\nN -1.16670800 0.35879500 -0.00000400\nC -0.73775600 -0.89086600 -0.00000100\nC 0.73767000 -0.89093000 -0.00000100\nC 0.00005200 1.08477000 -0.00000500\nH -1.40657400 -1.74401100 0.00000000\nH 1.40645000 -1.74411900 0.00000000\nH 0.00009400 2.16788100 -0.00000700\"\"\"\n\n xyz20 = \"\"\"C 3.09980400 -0.16068000 0.00000600\nC 1.73521600 0.45534600 -0.00002200\nC 0.55924400 -0.24765400 -0.00000300\nC -0.73300200 0.32890400 -0.00001600\nC -1.93406200 -0.42115800 0.00001300\nC -3.19432700 0.11090700 0.00000900\nH 3.67991400 0.15199400 -0.87914100\nH 3.67984100 0.15191400 0.87923000\nH 3.04908000 -1.25419800 -0.00004300\nH 1.68713300 1.54476700 -0.00005100\nH -0.81003200 1.41627100 -0.00004600\nH -1.83479400 -1.50747300 0.00004100\nH 0.61489300 -1.33808300 0.00002500\nH -3.35410300 1.18597200 -0.00001700\nH -4.07566100 -0.52115800 0.00003300\"\"\"\n\n mol1 = converter.molecules_from_xyz(converter.str_to_xyz(xyz1))[1]\n mol2 = converter.molecules_from_xyz(converter.str_to_xyz(xyz2))[1]\n mol3 = converter.molecules_from_xyz(converter.str_to_xyz(xyz3))[1]\n mol4 = converter.molecules_from_xyz(converter.str_to_xyz(xyz4))[1]\n mol5 = converter.molecules_from_xyz(converter.str_to_xyz(xyz5))[1]\n mol6 = converter.molecules_from_xyz(converter.str_to_xyz(xyz6), multiplicity=1)[1]\n mol7 = converter.molecules_from_xyz(converter.str_to_xyz(xyz7), multiplicity=1)[1]\n mol8 = converter.molecules_from_xyz(converter.str_to_xyz(xyz8))[1]\n # mol9 = converter.molecules_from_xyz(converter.str_to_xyz(xyz9), multiplicity=1)[1]\n # mol10 = converter.molecules_from_xyz(converter.str_to_xyz(xyz10))[1]\n # mol11 = converter.molecules_from_xyz(converter.str_to_xyz(xyz11))[1]\n mol12 = converter.molecules_from_xyz(converter.str_to_xyz(xyz12))[1]\n # mol13 = converter.molecules_from_xyz(converter.str_to_xyz(xyz13))[1]\n mol14 = converter.molecules_from_xyz(converter.str_to_xyz(xyz14))[1]\n mol15 = converter.molecules_from_xyz(converter.str_to_xyz(xyz15))[1]\n mol16 = converter.molecules_from_xyz(converter.str_to_xyz(xyz16))[1]\n mol17 = converter.molecules_from_xyz(converter.str_to_xyz(xyz17))[1]\n mol18 = converter.molecules_from_xyz(converter.str_to_xyz(xyz18))[1]\n mol19 = converter.molecules_from_xyz(converter.str_to_xyz(xyz19))[1]\n mol20 = converter.molecules_from_xyz(converter.str_to_xyz(xyz20))[1]\n\n self.assertEqual(mol1.to_smiles(), '[NH-][S+](=O)(O)C')\n self.assertIn(mol2.to_smiles(), ['COC1=C(CO)C=C([C](C)C)C=C1', 'COC1C=CC(=CC=1CO)[C](C)C'])\n self.assertEqual(mol3.to_smiles(), '[N]=C=C(C)C')\n self.assertEqual(mol4.to_smiles(), 'N#CC(N=NC(C#N)(C)C)(C)C')\n self.assertEqual(mol5.to_smiles(), '[O-][O+]=O')\n self.assertEqual(mol6.to_smiles(), 'N#S')\n self.assertEqual(mol7.to_smiles(), '[N-]=[NH2+]')\n self.assertEqual(mol8.to_smiles(), 'C#N')\n # self.assertEqual(mol9.to_smiles(), '[N-]=[S+]#N') # gives [N]S#N, multiplicity 3\n # self.assertEqual(mol10.to_smiles(), '[N+](=O)(O)[O-]') # gives None\n # self.assertEqual(mol11.to_smiles(), 'N(N)[N+](=O)[O-]') # gives None\n self.assertEqual(mol12.to_smiles(), '[O]N=O')\n # self.assertEqual(mol13.to_smiles(), 'C[N+]([NH-])=O') # gives None\n self.assertEqual(mol14.to_smiles(), '[O]S(=O)O')\n self.assertEqual(mol15.to_smiles(), '[N-]=[N+]=N')\n self.assertEqual(mol16.to_smiles(), '[O]N=C')\n self.assertEqual(mol17.to_smiles(), '[O-][S+](=O)(O)O')\n self.assertEqual(mol18.to_smiles(), 'O=S(=O)=O')\n self.assertEqual(mol19.to_adjacency_list(), \"\"\"multiplicity 2\n1 N u1 p1 c0 {4,S} {5,S}\n2 N u0 p1 c0 {3,S} {5,D}\n3 C u0 p0 c0 {2,S} {4,D} {6,S}\n4 C u0 p0 c0 {1,S} {3,D} {7,S}\n5 C u0 p0 c0 {1,S} {2,D} {8,S}\n6 H u0 p0 c0 {3,S}\n7 H u0 p0 c0 {4,S}\n8 H u0 p0 c0 {5,S}\n\"\"\") # cannot read SMILES 'c1ncc[n]1' (but can generate them)\n self.assertEqual(mol20.to_smiles(), 'C=C[CH]C=CC')", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def CalcNesz(numRxScanAngles, altitude, chirpBandwidth, ias, srs, txPat, rxPat, wl, txPeakPow, pl, prf):\n neszPeaks = {}\n NESZ_Arr = []\n for i in range(numRxScanAngles):\n nesz_arr = NESZ_BRET(R = srs[i],\n V = SEM.PlatformVelocity(altitude),\n Ia = ias[i],\n Lsa = 0,\n T = Constants.STANDARD_TEMPERATURE,\n Brg = chirpBandwidth,\n F = 4.3,\n L = 4.1,\n Ptx = txPeakPow,\n Gtx = txPat[i],\n Grx = rxPat[i],\n wavelength = wl,\n Pl = pl,\n Prf = prf)\n NESZ_Arr.append(nesz_arr)\n\n # keep track of the NESZ peak values\n for idx, ia in enumerate(ias[i]):\n incidenceAngle = int(ia*1000)\n if incidenceAngle in neszPeaks:\n if neszPeaks[incidenceAngle] > nesz_arr[idx]:\n neszPeaks[incidenceAngle] = nesz_arr[idx]\n else:\n neszPeaks[incidenceAngle] = nesz_arr[idx]\n\n # convert to numpy array\n NESZ_Arr = np.asarray(NESZ_Arr)\n # get the power values\n Nesz_dB_Arr = 10*np.log10(NESZ_Arr)\n\n # Get the maximum NESZ values and the angles where they occur\n Nesz_max_incidence_angles = np.fromiter(neszPeaks.keys(), dtype=float)\n Nesz_max_values = np.fromiter(neszPeaks.values(), dtype=float)\n \n return NESZ_Arr, Nesz_max_incidence_angles, Nesz_max_values", "def GetZScrInfoForAllScaffolds(PosScfBC_d):\n total_pos_and_nIns_l = []\n\n # We initially run through the scaffolds to get SD, mean\n for scf in PosScfBC_d[\"scaffolds\"].keys():\n scf_info = PosScfBC_d[\"scaffolds\"][scf]\n pos_and_nIns_l = [[int(x), scf_info[\"positions\"][x][\"nIns\"]] for x in \\\n scf_info[\"positions\"].keys()]\n total_pos_and_nIns_l += pos_and_nIns_l\n\n\n just_insertions_l = [x[1] for x in total_pos_and_nIns_l]\n mean = float(sum(just_insertions_l))/float(len(just_insertions_l))\n\n SD = GetStandardDeviation(just_insertions_l, mean)\n\n\n Scf_Pos_ZScr_vals = {\"scaffolds\": {}}\n total_max_z = 0\n # Now we run through the scaffolds again to get relation of values to total\n # SD and mean and store them in output dict\n for scf in PosScfBC_d[\"scaffolds\"].keys():\n scf_info = PosScfBC_d[\"scaffolds\"][scf]\n pos_and_nIns_l = [[int(x), scf_info[\"positions\"][x][\"nIns\"]] for x in \\\n scf_info[\"positions\"].keys()]\n scf_max_z, pos_to_Zscr_l = GetZScrValuesForPoints(pos_and_nIns_l, mean, SD)\n if scf_max_z > total_max_z:\n total_max_z = scf_max_z\n\n Scf_Pos_ZScr_vals[\"scaffolds\"][scf] = {\n \"scaffold_length\": scf_info[\"scaffold_length\"],\n \"max_z\": scf_max_z,\n \"pos_to_Zscr_l\": pos_to_Zscr_l\n }\n\n\n\n Scf_Pos_ZScr_vals[\"mean\"] = mean\n Scf_Pos_ZScr_vals[\"SD\"] = SD\n Scf_Pos_ZScr_vals[\"max_z\"] = total_max_z\n Scf_Pos_ZScr_vals[\"analysis_type\"] = \"AllGenomeStats\"\n\n return Scf_Pos_ZScr_vals", "def z_score_transformation(data, numeric_list):\n\n transformed_data = data[numeric_list].apply(stats.zscore())\n\n return transformed_data", "def L_radec():\n\treturn sdssimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=img_width, img_height=img_height)", "def main(parameters):\n metadata = get_metadata(parameters)\n # pprint(metadata)\n image_api = NswSatelliteImages(parameters, metadata)\n print('Zoom level:', image_api.zoom_level,\n 'Resolution:', image_api.resolution,\n 'Scale:', image_api.scale)\n image_api.download_tile(xtile=39000, ytile=60000)", "def score_scene(sr, hr, clearhr, norm, num_crop=6):\n zSR = []\n max_x, max_y = np.array(hr.shape) - num_crop\n sr_ = sr[num_crop//2:-num_crop//2, num_crop//2:-num_crop//2]\n \n np.place(clearhr, clearhr==0, np.nan)\n \n zSR = np.zeros((num_crop + 1, num_crop + 1), np.float64)\n for x_off in prange(0, num_crop+1):\n for y_off in prange(0, num_crop+1):\n \n clearHR_ = clearhr[x_off : x_off + max_x, y_off : y_off + max_y]\n\n hr_ = hr[x_off:x_off + max_x, y_off:y_off + max_y]\n\n diff = (hr_- sr_)* clearHR_\n\n b = np.nanmean(diff)\n\n\n ## compute cMSE\n cMSE = np.nanmean( (diff-b)**2) \n\n cPSNR = -10.0*np.log10(cMSE)\n \n zSR[x_off, y_off] = norm/cPSNR\n\n return zSR.min()", "def compress_color_data(self):\n avg_rgb_vals_dict = {} # dictionary of average color coordinates\n for label in self.labels_list:\n try:\n avg_rgb = np.mean(\n np.mean(np.mean(self.jzazbz_dict[label], axis=0), axis=0), axis=0\n )\n avg_rgb_vals_dict[label] = avg_rgb\n except Exception as exc:\n self.log.error(exc)\n self.log.error(label + \" failed\")\n self.avg_rgb_vals_dict = avg_rgb_vals_dict\n\n jzazbz_dict_simp = {}\n for label in self.labels_list:\n avg_jzazbz = np.mean(self.jzazbz_dist_dict[label], axis=0)\n jzazbz_dict_simp[label] = avg_jzazbz\n self.jzazbz_dict_simp = jzazbz_dict_simp", "def compile_global_stats(results_dir='./../data/*/*cr_sizes*hdf5'):\n\n flist = glob.glob(results_dir)\n output = defaultdict(list)\n flist = [f for f in flist if 'nicmos' not in f]\n print(flist)\n flist.append('./../data/STIS/stis_cr_sizes.hdf5')\n results = [dask.delayed(tally_stats)(f) for f in flist]\n results = list(dask.compute(*results, scheduler='processes'))\n\n for instr, data in results:\n output[instr].append(data)\n\n for key in output.keys():\n cr_count = 0\n img_count = 0\n total_exptime = 0\n for val in output[key]:\n cr_count += val.cr_count\n img_count += val.img_count\n total_exptime += val.total_exptime\n output[key] = [cr_count, img_count, total_exptime]\n\n df = pd.DataFrame(output, index=['cr_count', 'img_count', 'total_exptime'])\n print(df)\n print('Total CR count: {}'.format(df.loc['cr_count', :].sum()))\n print('Total number of images analyzed: {}'.format(df.loc['img_count', :].sum()))\n print('Cumulative exposure time: {}'.format(df.loc['total_exptime', :].sum()))", "def zscore(vals):", "def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist", "def main(S, N):\n\n z_binary, z_density = point_count(N, S)\n\n extent = [-2, 2, -2, 2]\n plt.imshow(z_binary, extent=extent, cmap='Greys')\n plt.colorbar()\n plt.show()\n plt.close('all')\n\n log_zd = np.log10(z_density)\n plt.imshow(z_density, extent=extent, cmap='jet')\n plt.colorbar()\n plt.show()\n plt.close('all')\n\n log_zd = np.log10(z_density)\n plt.imshow(log_zd, extent=extent, cmap='jet')\n plt.colorbar()\n plt.show()\n plt.close('all')", "def main(ancillary_ws, zero_elev_nodata_flag=False, overwrite_flag=False):\n logging.info('\\nProcess DAYMET ancillary rasters')\n\n # Site URL\n # ancillary_url = 'http://daymet.ornl.gov/files/ancillary_files.tgz'\n\n # Build output workspace if it doesn't exist\n if not os.path.isdir(ancillary_ws):\n os.makedirs(ancillary_ws)\n\n # Input paths\n # ancillary_targz = os.path.join(\n # ancillary_ws, os.path.basename(ancillary_url))\n # dem_nc = os.path.join(ancillary_ws, 'dem_data.nc')\n # mask_nc = os.path.join(ancillary_ws, 'mask_data.nc')\n\n # Output paths\n dem_raster = os.path.join(ancillary_ws, 'daymet_elev.img')\n lat_raster = os.path.join(ancillary_ws, 'daymet_lat.img')\n lon_raster = os.path.join(ancillary_ws, 'daymet_lon.img')\n # mask_raster = os.path.join(ancillary_ws, 'daymet_mask.img')\n\n # Spatial reference parameters\n daymet_proj4 = (\n \"+proj=lcc +datum=WGS84 +lat_1=25 n \"\n \"+lat_2=60n +lat_0=42.5n +lon_0=100w\")\n daymet_osr = drigo.proj4_osr(daymet_proj4)\n daymet_osr.MorphToESRI()\n daymet_proj = daymet_osr.ExportToWkt()\n daymet_cs = 1000\n # daymet_nodata = -9999\n\n # For now, hardcode the DAYMET extent/geo\n snap_xmin, snap_ymin = -4560750, -3090500\n daymet_rows, daymet_cols = 8075, 7814\n # snap_xmin, snap_ymin = -4659000, -3135000\n # daymet_rows, daymet_cols = 8220, 8011\n # daymet_geo = (\n # snap_xmin, daymet_cs, 0.,\n # snap_ymin + daymet_cs * daymet_rows, 0., -daymet_cs)\n daymet_extent = drigo.Extent([\n snap_xmin, snap_ymin,\n snap_xmin + daymet_cs * daymet_cols,\n snap_ymin + daymet_cs * daymet_rows])\n daymet_geo = daymet_extent.geo(daymet_cs)\n logging.debug(\" Extent: {}\".format(daymet_extent))\n logging.debug(\" Geo: {}\".format(daymet_geo))\n # logging.debug(\" Cellsize: {}\".format(daymet_cs))\n # logging.debug(\" Shape: {}\".format(daymet_extent.shape(daymet_cs)))\n\n # # Download the ancillary raster tar.gz\n # if overwrite_flag or not os.path.isfile(ancillary_targz):\n # logging.info('\\nDownloading ancillary tarball files')\n # logging.info(\" {}\".format(os.path.basename(ancillary_url)))\n # logging.debug(\" {}\".format(ancillary_url))\n # logging.debug(\" {}\".format(ancillary_targz))\n # url_download(ancillary_url, ancillary_targz)\n # try:\n # urllib.urlretrieve(ancillary_url, ancillary_targz)\n # except:\n # logging.error(\" ERROR: {}\\n FILE: {}\".format(\n # sys.exc_info()[0], ancillary_targz))\n # os.remove(ancillary_targz)\n\n # # Extract the ancillary rasters\n # ancillary_list = [dem_nc]\n # # ancillary_list = [dem_nc, mask_nc]\n # if (os.path.isfile(ancillary_targz) and\n # (overwrite_flag or\n # not all([os.path.isfile(os.path.join(ancillary_ws, x))\n # for x in ancillary_list]))):\n # logging.info('\\nExtracting ancillary rasters')\n # logging.debug(\" {}\".format(ancillary_targz))\n # tar = tarfile.open(ancillary_targz)\n # for member in tar.getmembers():\n # print member.name\n # member.name = os.path.basename(member.name)\n # # Strip off leading numbers from ancillary raster name\n # member.name = member.name.split('_', 1)[1]\n # member_path = os.path.join(ancillary_ws, member.name)\n # if not member.name.endswith('.nc'):\n # continue\n # elif member_path not in ancillary_list:\n # continue\n # elif os.path.isfile(member_path):\n # continue\n # logging.debug(\" {}\".format(member.name))\n # tar.extract(member, ancillary_ws)\n # tar.close()\n\n # # Mask\n # if ((overwrite_flag or\n # not os.path.isfile(mask_raster)) and\n # os.path.isfile(mask_nc)):\n # logging.info('\\nExtracting mask raster')\n # mask_nc_f = netCDF4.Dataset(mask_nc, 'r')\n # logging.debug(mask_nc_f)\n # # logging.debug(mask_nc_f.variables['image'])\n # mask_array = mask_nc_f.variables['image'][:]\n # mask_array[mask_array == daymet_nodata] = 255\n # drigo.array_to_raster(\n # mask_array, mask_raster,\n # output_geo=daymet_geo, output_proj=daymet_proj,\n # output_nodata=255)\n # mask_nc_f.close()\n\n # # DEM\n # if ((overwrite_flag or not os.path.isfile(dem_raster)) and\n # os.path.isfile(dem_nc)):\n # logging.info('\\nExtracting DEM raster')\n # dem_nc_f = netCDF4.Dataset(dem_nc, 'r')\n # logging.debug(dem_nc_f)\n # # logging.debug(dem_nc_f.variables['image'])\n # dem_array = dem_nc_f.variables['image'][:]\n # # Rounding issues of the nodata value when converting to float32\n # dem_array[dem_array == daymet_nodata] -= 1\n # dem_array = dem_array.astype(np.float32)\n # if zero_elev_nodata_flag:\n # dem_array[dem_array <= daymet_nodata] = 0\n # else:\n # dem_array[dem_array <= daymet_nodata] = np.nan\n # drigo.array_to_raster(\n # dem_array, dem_raster,\n # output_geo=daymet_geo, output_proj=daymet_proj)\n # dem_nc_f.close()\n\n # Latitude/Longitude\n if (os.path.isfile(dem_raster) and\n (overwrite_flag or\n not os.path.isfile(lat_raster) or\n not os.path.isfile(lon_raster))):\n logging.info('\\nDAYMET Latitude/Longitude')\n logging.debug(' {}'.format(lat_raster))\n lat_array, lon_array = drigo.raster_lat_lon_func(\n dem_raster, gcs_cs=0.05)\n drigo.array_to_raster(\n lat_array.astype(np.float32), lat_raster,\n output_geo=daymet_geo, output_proj=daymet_proj)\n logging.debug(' {}'.format(lon_raster))\n drigo.array_to_raster(\n lon_array.astype(np.float32), lon_raster,\n output_geo=daymet_geo, output_proj=daymet_proj)\n del lat_array, lon_array\n\n logging.debug('\\nScript Complete')", "def plot_zcalib(args):\n\n start_date = args.start_date\n end_date = args.end_date\n\n start_date_dt = dp.parse(start_date) \n end_date_dt = dp.parse(end_date) \n \n min_date = dp.parse(SETTINGS.MIN_START_DATE)\n max_date = dp.parse(SETTINGS.MAX_END_DATE)\n \n if start_date_dt < min_date or end_date_dt > max_date:\n raise ValueError(f'Date must be in range {SETTINGS.MIN_START_DATE} - {SETTINGS.MAX_END_DATE}')\n\n phi_dir = os.path.join(SETTINGS.PHI_DIR)\n img_dir = os.path.join(SETTINGS.Z_CALIB_DIR,'images/')\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n filelist1 = glob.glob(phi_dir+\"phiest*\")\n filelist2 = glob.glob(phi_dir+\"phiobs*\")\n filelist1.sort()\n filelist2.sort()\n dates=[]\n ind=[]\n\n if len(filelist1) != len(filelist2):\n raise ValueError(\"Number of phiest and phiobs files does not match!\")\n\n #Loop through files to find the indices of those between the inputted start and end dates\n for f in range(0,len(filelist1)):\n match = re.search(r'\\d{8}',filelist1[f])\n file=match.group()\n file_dt=dp.parse(file)\n if file_dt >= start_date_dt and file_dt <= end_date_dt:\n ind.append(f)\n dates.append(file)\n\n ndays=len(ind)\n print(ndays)\n\n #If the number of elevation angles in the volumes changes over time, then the total number of rays also varies\n #This loop finds the maximum number of rays\n for f in range(0,ndays):\n file=np.load(filelist1[ind[f]])\n if f==0:\n [_,a]=file.shape\n nrays=a\n else:\n [_,a2]=file.shape\n if a2>a:\n nrays=a2\n\n #Number of volumes can vary each day \n nvols=250\n phiest=np.zeros((ndays,nvols,nrays))*np.nan\n phiobs=np.zeros((ndays,nvols,nrays))*np.nan\n good_rays=np.zeros((ndays,nvols))*np.nan\n x=np.zeros((nvols,nrays))*np.nan\n \n #Load each phiest and phiobs data for each day and store into 3D array. \n #Calculate number of good rays for each day/volume\n d=0\n for f in range(0,ndays):\n phiest1 = np.load(filelist1[ind[f]])\n [a,b] = phiest1.shape\n phiest[d,0:a,0:b] = phiest1 \n phiobs1 = np.load(filelist2[ind[f]])\n [a,b] = phiobs1.shape\n phiobs[d,0:a,0:b] = phiobs1\n d=d+1\n \n #Calculate number of good rays in each volume. good_rays(ndays,nvols)\n for j in range(ndays): \n for i in range(nvols):\n good_rays[j,i] = np.nansum(np.isfinite(phiest[j,i,:]))\n\n #bias_each_ray (ndays,nvols,nrays)\n #Calculate a bias/offset for each individual ray\n bias_each_ray = (phiest - phiobs) / phiobs\n \n #Only use volumes with more than 10 good rays for calculation of overall bias.\n ind = good_rays>10\n \n #SINGLE VALUES FOR WHOLE TIME PERIOD\n mean_bias = np.nanmean(bias_each_ray[ind,:])\n mean_bias_db = 10.0*np.log10(1000+mean_bias*1000)-30\n \n median_bias = np.nanmedian(bias_each_ray[ind,:])\n median_bias_db = 10.0*np.log10(1000.0+median_bias*1000.0)-30.0\n \n std = np.nanstd(bias_each_ray[ind,:])\n std_db = 10.0*np.log10(1000.0+std*1000.0)-30.0\n \n #print 'Mean bias = ', mean_bias_db, 'Median bias = ', median_bias_db, 'Standard Deviation = ', std_db\n \n #DAILY VALUES OF BIAS\n mean_bias_each_day=np.zeros(ndays)*np.nan\n median_bias_each_day=np.zeros(ndays)*np.nan\n std_each_day=np.zeros(ndays)*np.nan\n std_error_each_day = np.zeros(ndays)*np.nan\n num_rays_day=np.zeros(ndays)\n \n for day in range(ndays):\n #good_rays has shape (days,vols)\n #find index for volumes with more than 10 good rays\n ind = good_rays[day,:]>10\n #find all rays on each day within these volumes\n bias_one_day = bias_each_ray[day,ind,:].flatten()\n ind2 = np.isfinite(bias_one_day) \n if np.sum(ind2)>0:\n std_error_each_day[day] = scipy.stats.sem(bias_one_day[ind2])\n mean_bias_each_day[day] = np.nanmean(bias_one_day)\n median_bias_each_day[day] = np.nanmedian(bias_one_day)\n std_each_day[day] = np.nanstd(bias_one_day)\n \n #Number of rays for each day\n num_rays_day[day] = np.sum(np.isfinite(bias_one_day))\n\n #Convert to dB \n mean_bias_each_day_db = 10.0*np.log10(1000.0+mean_bias_each_day*1000.0)-30.0\n median_bias_each_day_db = 10.0*np.log10(1000.0+median_bias_each_day*1000.0)-30.0\n std_each_day_db = 10.0*np.log10(1000.0+std_each_day*1000.0)-30.0\n std_error_each_day_db = 10.0*np.log10(1000.0+std_error_each_day*1000.0)-30.0\n \n #Put data into dataframe\n time = pd.to_datetime(dates, format = '%Y%m%d')\n data = pd.DataFrame({'Mean Bias' : mean_bias_each_day_db, 'Median Bias' : median_bias_each_day_db, \n 'Standard Error' : std_error_each_day_db, 'Standard Deviation' : std_each_day_db}, \n index=time) \n \n #Make plot \n fig, ax1 = plt.subplots(figsize=(15,8)) \n plt.errorbar(data.index, mean_bias_each_day_db, std_error_each_day_db, \n color='black',fmt='o',markersize='4', elinewidth=2,capsize=4)\n plt.plot([start_date_dt, end_date_dt],[median_bias_db,median_bias_db],'r-',\n label=\"Median Bias = %s\" % round(median_bias_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db,mean_bias_db],'g', \n label=\"Mean Bias = %s\" % round(mean_bias_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db+std_db*2,mean_bias_db+std_db*2],'g--',\n label=\"Standard Deviation = %s\" % round(std_db,2))\n plt.plot([start_date_dt, end_date_dt],[mean_bias_db-std_db*2,mean_bias_db-std_db*2],'g--')\n \n plt.plot(data.index, median_bias_each_day_db,'rx')\n \n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%y'))\n plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))\n plt.gca().xaxis.set_minor_locator(mdates.WeekdayLocator(interval=1))\n plt.xlim(start_date_dt,end_date_dt)\n\n plt.xticks(rotation=90)\n plt.xlabel('Time',{'fontsize':18})\n plt.ylabel('Z Bias (dBZ)',{'fontsize':18})\n plt.yticks(size=18)\n plt.xticks(size=18)\n plt.grid()\n plt.legend(loc=0,fontsize=18)\n \n #If you want to overlay number of rays for each data point then uncomment these lines.\n #May need some tweaking to get the yaxis scale correct for the data you are plotting. \n# ax2=ax1.twinx()\n# ax2.set_ylim(0,20000)\n# ax2.plot(data.index, num_rays_day,'bx-')\n# ax2.set_yticks([5000, 10000])\n# ax2.set_yticks([1000, 2000, 3000, 4000, 7500],minor=True)\n# plt.ylabel('Total number of Rays',{'fontsize':18})\n# plt.yticks(size=18)\n# plt.xlim(start_date_dt,end_date_dt)\n\n #Save the plot\n imgname = f'{img_dir}/Z_calibration_{start_date}_{end_date}.png'\n plt.tight_layout()\n plt.savefig(imgname,dpi=150)", "def redshift_draws(self, s_grid, num=1000):\n n_obj = len(self)\n z_draws = np.zeros((n_obj, num))\n # i_range = np.random.rand_int(0, n_obj, len(se))\n\n for i in tqdm(range(n_obj)):\n cdf_z = self['cdf_z'][i]\n _, z_draws[i, :] = self.rvs_from_cdf(s_grid, cdf_z, num=num)\n\n return z_draws", "def makemap(d,x,y,ra0=0,dec0=0, cd=1./60., nxpix=600, nypix=600):\n\n xy = np.zeros((x.size,2))\n xy[:,0] = x.flatten()\n xy[:,1] = y.flatten()\n\n from astropy import wcs\n\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [ra0, dec0]\n w.wcs.cdelt = [cd,cd]\n w.wcs.crpix = [nxpix/2., nypix/2.]\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n\n pixels = w.wcs_world2pix(xy,0)\n ygrid, xgrid = np.meshgrid(np.arange(nypix),np.arange(nxpix))\n\n pixCens = w.wcs_pix2world(np.array([xgrid.flatten(), ygrid.flatten()]).T,0)\n pixCens[:,0] += 1./2.*cd\n pixCens[:,1] += 1./2.*cd\n pflat = (pixels[:,1].astype(int) + (nypix)*pixels[:,0].astype(int)).astype(int)\n\n\n pEdges = np.arange(nxpix*nypix+1)\n m = np.histogram(pflat,pEdges, weights=d)[0]\n h = np.histogram(pflat,pEdges)[0]\n m = m/h\n return m,pixCens,w", "def _gdal_preprocessing(self, nodatavalue: float = 1000000.0, z_positive_up: bool = True,\n layer_names: tuple = ('depth', 'vertical_uncertainty')):\n\n if self.is_vr:\n raise NotImplementedError(\"VR surfacing doesn't currently return gridded data arrays yet, have to figure this out\")\n\n layerdata = []\n geo_transform = []\n finalnames = []\n for cnt, layer in enumerate(layer_names):\n nodex, nodey, nodez, valid, newmins, newmaxs = self.return_surf_xyz(layer)\n if cnt == 0:\n cellx = nodex[0] - self.min_grid_size / 2 # origin of the grid is the cell, not the node\n celly = nodey[-1] + self.min_grid_size / 2\n geo_transform = [np.float32(cellx), self.min_grid_size, 0, np.float32(celly), 0, -self.min_grid_size]\n if z_positive_up:\n if layer.lower() == 'depth':\n nodez = nodez * -1 # geotiff depth should be positive up, make all depths negative\n layer = 'Elevation'\n nodez = nodez[:, ::-1]\n nodez[np.isnan(nodez)] = nodatavalue\n layerdata.append(nodez)\n finalnames.append(layer)\n return layerdata, geo_transform, layer_names", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def comparison():\n path = \"Data/data_fronts/\"\n path1 = \"Results/labelled_images1010/fronts/\"\n\n #computes the areas for the first frame in order to normalize the other areas\n pol0 = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.0.png.txt\",sep =' '))\n #makes an object polygon in order to compute the area\n pol0 = np.array(pol0)\n pol0 = Polygon(pol0)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n pol = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.\"+str(i)+\".png.txt\",sep =' '))\n pol = np.array(pol)\n pol = Polygon(pol)\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def compute_at_zref(itile, reso_deg, mode, date, block_choice, tile_dict=None):\n if tile_dict is not None:\n tile = tile_dict\n else:\n tile = stats.date_mode_filter(mode, date, itile)\n CT, SA, RI, BVF2 = tile['CT'], tile['SA'], tile['RHO'], tile['BVF2']\n nanidx = np.where(np.isnan(CT) | np.isnan(SA))\n lat, lon = tile['LATITUDE'], tile['LONGITUDE']\n grid_lat, grid_lon = stats.grid_coordinate(itile, reso_deg)\n lon_deg, lat_deg = np.meshgrid(grid_lon, grid_lat)\n\n lon_rad = np.deg2rad(lon_deg)\n lat_rad = np.deg2rad(lat_deg)\n reso_rad = np.deg2rad(reso_deg)\n\n nlat, nlon = np.shape(lon_deg)\n\n # RI is rho in situ\n\n nz = len(zref)\n nbprof = len(CT)\n\n variables = {}\n\n for b in block_choice:\n # gridded arrays of CT, SA et RI means\n for i, v in enumerate(var_choice['zmean']):\n variables[v] = np.zeros((nz, nlat, nlon))\n\n for k in range(nbprof):\n # print('%4i/%i' % (k, nbprof))\n # todo: weigh in time using juld,\n # e.g. only winter statistics\n time_weight = 1.\n xlon_rad = np.deg2rad(lon[k])\n xlat_rad = np.deg2rad(lat[k])\n weight = general.compute_weight(lon_rad, lat_rad,\n xlon_rad, xlat_rad,\n reso_rad)\n weight *= time_weight\n for l in range(nz):\n if np.isnan(CT[k, l]) or np.isnan(SA[k, l]):\n pass\n else:\n variables['NBbar'][l, :, :] += weight\n variables['CTbar'][l, :, :] += weight*CT[k, l]\n variables['SAbar'][l, :, :] += weight*SA[k, l]\n variables['Ribar'][l, :, :] += weight*RI[k, l]\n variables['BVF2bar'][l, :, :] += weight*BVF2[k, l]\n\n # normalize with the number of profiles (fractional\n # because NBbar is fractionnal)\n coef = 1./variables['NBbar']\n coef[variables['NBbar'] < 1] = np.NaN\n\n variables['CTbar'] *= coef\n variables['SAbar'] *= coef\n variables['Ribar'] *= coef\n variables['BVF2bar'] *= coef\n\n if b == 'zstd' or b == 'zdz':\n xlon_rad = np.deg2rad(lon)\n xlat_rad = np.deg2rad(lat)\n for i, v in enumerate(var_choice[b]):\n variables[v] = np.zeros((nz, nlat, nlon))\n variables['NBstd'] = variables['NBbar']\n\n if len(lat) == 0:\n pass\n else:\n for j in range(nlat):\n for i in range(nlon):\n if len(lat) < j+1:\n pass\n else:\n time_weight = 1.\n weight = general.compute_weight(lon_rad[j, i],\n lat_rad[j, i],\n xlon_rad, xlat_rad,\n reso_rad)\n weight *= time_weight\n drho = RI - variables['Ribar'][:, j, i]\n dbvf2 = BVF2 - variables['BVF2bar'][:, j, i]\n dCT = CT - variables['CTbar'][:, j, i]\n interpolator = ip.interp1d(\n variables['Ribar'][:, j, i],\n zref, bounds_error=False)\n p = gsw.p_from_z(-zref, lat[j])\n g = gsw.grav(lat[j], p)\n cs = gsw.sound_speed(\n variables['SAbar'][:, j, i],\n variables['CTbar'][:, j, i], p)\n rho0 = variables['Ribar'][:, j, i].copy()\n zrho = interpolator(RI)\n dzstar = zrho-zref\n dz = dzstar/(1.+rho0*g*dzstar/(cs**2*drho))\n dSA = SA - variables['SAbar'][:, j, i]\n\n weight = weight[:, np.newaxis] + \\\n np.zeros_like(zref)\n weight[np.where(np.isnan(dz) | np.isnan(\n drho) | np.isnan(dCT) | np.isnan(dSA))] = 0.\n weight[nanidx] = 0.\n\n def average(field):\n return np.nansum(weight*field, axis=0)\n if b == 'zstd':\n variables['CTstd'][:, j, i] = average(dCT**2)\n variables['SAstd'][:, j, i] = average(dSA**2)\n variables['BVF2std'][:, j,\n i] = average(dbvf2**2)\n variables['Ristd'][:, j, i] = average(drho**2)\n\n if b == 'zdz':\n\n variables['DZmean'][:, j, i] = average(dz)\n variables['DZstd'][:, j, i] = average(dz**2)\n variables['DZskew'][:, j, i] = average(dz**3)\n variables['EAPE'][:, j, i] = average(dz*drho)\n\n if b in ['zstd', 'zdz']:\n coef = 1./(variables['NBstd']-1)\n coef[variables['NBstd'] < 2] = np.nan\n\n if b == 'zstd':\n variables['CTstd'] = np.sqrt(coef*variables['CTstd'])\n variables['SAstd'] = np.sqrt(coef*variables['SAstd'])\n variables['Ristd'] = np.sqrt(coef*variables['Ristd'])\n variables['BVF2std'] = np.sqrt(coef*variables['BVF2std'])\n\n elif b == 'zdz':\n variables['DZmean'] *= coef\n variables['DZstd'] = np.sqrt(coef*variables['DZstd'])\n variables['DZskew'] *= coef/variables['DZstd']**3\n variables['EAPE'] *= 0.5*coef\n\n variables['lat'] = lat_deg\n variables['lon'] = lon_deg\n print(variables['CTstd'].min())\n print(variables['CTstd'].max())\n print(variables['SAstd'].min())\n print(variables['SAstd'].max())\n\n return variables", "def test_compare_zmats(self):\n z_1 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2|3_0|1', 'A_2|3_0|1_1|0', None),\n ('R_2|3_0|1', 'A_2|3_0|1_1|0', 'D_3_1_0_2')),\n 'vars': {'R_1_0': 1.2451214479859707, 'D_3_1_0_2': 180.00000435340846, 'R_2|3_0|1': 1.0308198031527174,\n 'A_2|3_0|1_1|0': 112.42663889936155}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n z_2 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2|3_0|1', 'A_2|3_0|1_1|0', None),\n ('R_2|3_0|1', 'A_2|3_0|1_1|0', 'D_3_1_0_2')),\n 'vars': {'R_1_0': 1.2458481980184417, 'D_3_1_0_2': 359.99999758516344, 'R_2|3_0|1': 1.0292894916884854,\n 'A_2|3_0|1_1|0': 115.61126115172507}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n z_3 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2|3_0|1', 'A_2|3_0|1_1|0', None),\n ('R_2|3_0|1', 'A_2|3_0|1_1|0', 'D_3_1_0_2')),\n 'vars': {'R_1_0': 1.24584819, 'D_3_1_0_2': 360, 'R_2|3_0|1': 1.0292894916884854,\n 'A_2|3_0|1_1|0': 115.61126115172507}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n self.assertFalse(converter.compare_zmats(z_1, z_2))\n self.assertTrue(converter.compare_zmats(z_2, z_2))\n self.assertTrue(converter.compare_zmats(z_2, z_3))", "def process_image(overviews, db_graph, input_filename, color, out_raster_srs):\n if verbose > 0:\n print(\"~~~process_image\")\n input_image = gdal.Open(input_filename)\n stem = Path(input_filename).stem\n if not(\"dataSet\" in overviews):\n overviews['dataSet'] = {}\n overviews['dataSet']['boundingBox'] = {}\n overviews['dataSet']['limits'] = {}\n\n tile_limits = get_tile_limits(input_filename)\n\n if not(\"LowerCorner\" in overviews['dataSet']['boundingBox']):\n overviews['dataSet']['boundingBox'] = tile_limits\n else:\n if tile_limits['LowerCorner'][0] < overviews['dataSet']['boundingBox']['LowerCorner'][0]:\n overviews['dataSet']['boundingBox']['LowerCorner'][0] = tile_limits['LowerCorner'][0]\n if tile_limits['LowerCorner'][1] < overviews['dataSet']['boundingBox']['LowerCorner'][1]:\n overviews['dataSet']['boundingBox']['LowerCorner'][1] = tile_limits['LowerCorner'][1]\n if tile_limits['UpperCorner'][0] > overviews['dataSet']['boundingBox']['UpperCorner'][0]:\n overviews['dataSet']['boundingBox']['UpperCorner'][0] = tile_limits['UpperCorner'][0]\n if tile_limits['UpperCorner'][1] > overviews['dataSet']['boundingBox']['UpperCorner'][1]:\n overviews['dataSet']['boundingBox']['UpperCorner'][1] = tile_limits['UpperCorner'][1]\n\n # for z in tiles:\n for tile_z in range(overviews['level']['min'], overviews['level']['max'] + 1):\n print('Niveau de zoom : ', tile_z)\n\n resolution = overviews['resolution'] * 2 ** (overviews['level']['max'] - tile_z)\n\n MinTileCol = \\\n math.floor(round((tile_limits['LowerCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8))\n MinTileRow = \\\n math.floor(round((overviews['crs']['boundingBox']['ymax']-tile_limits['UpperCorner'][1])/(resolution*overviews['tileSize']['height']),8))\n MaxTileCol = \\\n math.ceil(round((tile_limits['UpperCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8)) - 1\n MaxTileRow = \\\n math.ceil(round((overviews['crs']['boundingBox']['ymax']-tile_limits['LowerCorner'][1])/(resolution*overviews['tileSize']['height']),8)) - 1\n\n if not( str(tile_z) in overviews['dataSet']['limits'] ):\n overviews['dataSet']['limits'][str(tile_z)] = {\n 'MinTileCol': MinTileCol,\n 'MinTileRow': MinTileRow,\n 'MaxTileCol': MaxTileCol,\n 'MaxTileRow': MaxTileRow,\n }\n\n else:\n if MinTileCol < overviews['dataSet']['limits'][str(tile_z)]['MinTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileCol'] = MinTileCol\n if MinTileRow < overviews['dataSet']['limits'][str(tile_z)]['MinTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileRow'] = MinTileRow\n if MaxTileCol > overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol'] = MaxTileCol\n if MaxTileRow > overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow'] = MaxTileRow\n\n for tile_x in range(MinTileCol, MaxTileCol + 1): \n for tile_y in range(MinTileRow, MaxTileRow + 1):\n # on cree une image 3 canaux pour la tuile\n opi = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on reech l'OPI dans cette image\n gdal.Warp(opi, input_image)\n # si necessaire on cree le dossier de la tuile\n tile_dir = args.cache+'/'+str(tile_z)+'/'+str(tile_y)+'/'+str(tile_x)\n Path(tile_dir).mkdir(parents=True, exist_ok=True)\n # on export en jpeg (todo: gerer le niveau de Q)\n PNG_DRIVER.CreateCopy(tile_dir+\"/\"+stem+\".png\", opi)\n # on cree une image mono canal pour la tuile\n mask = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on rasterise la partie du graphe qui concerne ce cliche\n gdal.Rasterize(mask, db_graph,\n SQLStatement='select geom from ' + args.table + ' where cliche = \\''+stem+'\\' ')\n img_mask = mask.GetRasterBand(1).ReadAsArray()\n # si le mask est vide, on a termine\n val_max = np.amax(img_mask)\n if val_max > 0:\n # on cree le graphe et l'ortho\n ortho = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n graph = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n if Path(tile_dir+\"/ortho.png\").is_file():\n existing_ortho = gdal.Open(tile_dir+\"/ortho.png\")\n existing_graph = gdal.Open(tile_dir+\"/graph.png\")\n else:\n existing_ortho = False\n existing_graph = False\n for i in range(3):\n opi_i = opi.GetRasterBand(i+1).ReadAsArray()\n if existing_ortho:\n ortho_i = existing_ortho.GetRasterBand(i+1).ReadAsArray()\n else:\n ortho_i = ortho.GetRasterBand(i+1).ReadAsArray()\n opi_i[(img_mask == 0)] = 0\n ortho_i[(img_mask != 0)] = 0\n ortho.GetRasterBand(i+1).WriteArray(np.add(opi_i, ortho_i))\n if existing_graph:\n graph_i = existing_graph.GetRasterBand(i+1).ReadAsArray()\n else:\n graph_i = graph.GetRasterBand(i+1).ReadAsArray()\n graph_i[(img_mask != 0)] = color[i]\n graph.GetRasterBand(i+1).WriteArray(graph_i)\n existing_ortho = None\n existing_graph = None\n PNG_DRIVER.CreateCopy(tile_dir+\"/ortho.png\", ortho)\n PNG_DRIVER.CreateCopy(tile_dir+\"/graph.png\", graph)" ]
[ "0.71548915", "0.68493986", "0.6522732", "0.62442964", "0.62096614", "0.60611725", "0.60029554", "0.5962458", "0.5880883", "0.58487236", "0.5846595", "0.5792119", "0.57755446", "0.57681596", "0.5670428", "0.55730975", "0.5560811", "0.5551571", "0.55226356", "0.55010265", "0.5425854", "0.54105395", "0.54085886", "0.5401833", "0.53954977", "0.5377328", "0.5376893", "0.53562975", "0.5347076", "0.53305864", "0.5329128", "0.53253865", "0.53216124", "0.53145325", "0.5313748", "0.5302233", "0.5300853", "0.52817315", "0.5275051", "0.5263463", "0.5262771", "0.5240082", "0.52333885", "0.52305156", "0.52226126", "0.52173924", "0.5209621", "0.5197813", "0.5189624", "0.51815164", "0.51698697", "0.51679426", "0.5153631", "0.51469797", "0.51451844", "0.5121058", "0.5114411", "0.5110415", "0.5106509", "0.51032144", "0.5101796", "0.5097314", "0.5089722", "0.50891954", "0.50810915", "0.5078652", "0.50745267", "0.5072531", "0.50644255", "0.5057849", "0.5049986", "0.5045194", "0.50414", "0.5030309", "0.5024453", "0.50212455", "0.5016051", "0.5010401", "0.50103086", "0.50056165", "0.49969473", "0.49948537", "0.4993398", "0.49922097", "0.49906644", "0.49906033", "0.49905255", "0.49863213", "0.4982017", "0.4975571", "0.49700728", "0.49637908", "0.495984", "0.49509072", "0.49466792", "0.49298885", "0.49277285", "0.49250275", "0.4920888", "0.49170423" ]
0.6187095
5
Export a google earth engine featureCollection to an asset folder
def export_table_to_cloudstorage(fc,description,fileNamePrefix): task = ee.batch.Export.table.toCloudStorage( collection = ee.FeatureCollection(fc), description = description, bucket = GCS_BUCKET, fileNamePrefix = GCS_OUTPUT_PATH + fileNamePrefix, fileFormat = "CSV" ) task.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_geojson(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".geojson\",\n filetypes=((\"geo json\", \"*.geojson\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n self.tabs.window.aistracker.create_geojson_map(outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def ee_export_vector_silent(ee_object, filename, selectors=None):\n import requests\n import zipfile\n #ee_initialize()\n\n if not isinstance(ee_object, ee.FeatureCollection):\n print('The ee_object must be an ee.FeatureCollection.')\n return\n\n allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']\n filename = os.path.abspath(filename)\n basename = os.path.basename(filename)\n name = os.path.splitext(basename)[0]\n filetype = os.path.splitext(basename)[1][1:].lower()\n filename_shp = filename\n\n if filetype == 'shp':\n filename = filename.replace('.shp', '.zip')\n\n if not (filetype.lower() in allowed_formats):\n print('The file type must be one of the following: {}'.format(\n ', '.join(allowed_formats)))\n return\n\n if selectors is None:\n selectors = ee_object.first().propertyNames().getInfo()\n elif not isinstance(selectors, list):\n print(\"selectors must be a list, such as ['attribute1', 'attribute2']\")\n return\n else:\n allowed_attributes = ee_object.first().propertyNames().getInfo()\n for attribute in selectors:\n if not (attribute in allowed_attributes):\n print('Attributes must be one chosen from: {} '.format(\n ', '.join(allowed_attributes)))\n return\n\n try:\n #print('Generating URL ...')\n url = ee_object.getDownloadURL(\n filetype=filetype, selectors=selectors, filename=name)\n #print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n\n if r.status_code != 200:\n print('An error occurred while downloading. \\n Retrying ...')\n try:\n new_ee_object = ee_object#.map(filter_polygons)\n print('Generating URL ...')\n url = new_ee_object.getDownloadURL(\n filetype=filetype, selectors=selectors, filename=name)\n print('Downloading data from {}\\nPlease wait ...'.format(url))\n r = requests.get(url, stream=True)\n except Exception as e:\n print(e)\n\n with open(filename, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n except Exception as e:\n print('An error occurred while downloading.')\n print(e)\n return", "def _on_collections_export(self, evt=None):\n \n # remove old exports\n for name in os.listdir(self._library.library_path):\n if EXPORT_PATTERN.match(name):\n os.remove(os.path.join(self._library.library_path, name))\n \n # get collections\n collections = self._library.search(core.Query(\"\", core.Collection.NAME))\n collections = [c for c in collections if c.export]\n \n # export collections\n for collection in collections:\n \n # get query\n if collection.query:\n query = core.Query(collection.query, core.Article.NAME)\n else:\n query = core.Query(\"%s[COLLECTIONID]\" % collection.dbid, core.Article.NAME)\n \n # get articles\n articles = self._library.search(query)\n \n # make export\n text = \"\"\n for article in articles:\n text += article.format(\"PDF: [PDF]\\n[TI]\\n[AU]\\n[CI]\\n\\n\")\n \n # init filename and path\n filename = \"_export_\"\n filename += collection.title.replace(\" \", \"_\")\n filename += \".txt\"\n path = os.path.join(self._library.library_path, filename)\n \n # save to file\n with open(path, 'w', encoding=\"utf-8\") as export:\n export.write(text)", "def save_to_geojson(self, topology_map, filename):", "def export_coll_to_drive(collection,\n folder=None,\n scale=None,\n region=None,\n crs=None,\n verbose=False,\n save_metadata=True,\n metadata_folder='.'):\n\n collection = collection.filterBounds(region)\n\n # get size info\n coll_size = collection.size().getInfo()\n sys.stdout.write(\"Exporting {} images from this collection.\\n\".format(coll_size))\n\n # convert collection to list\n coll_list = collection.toList(coll_size)\n\n # loop over all collection images and export\n for img_indx in range(coll_size):\n img = ee.Image(coll_list.get(img_indx))\n\n EEHelper.export_image_to_drive(img,\n folder=folder,\n scale=scale,\n crs=crs,\n region=region,\n verbose=verbose,\n save_metadata=save_metadata,\n metadata_folder=metadata_folder)", "def _export_vector(self, vector_name,\n format=\"GML\",\n additional_options=[]):\n # Export the layer\n prefix = \"\"\n if format == \"GML\":\n prefix = \".gml\"\n if format == \"GeoJSON\":\n prefix = \".json\"\n if format == \"ESRI_Shapefile\":\n prefix = \"\"\n if format == \"SQLite\":\n prefix = \".sqlite\"\n if format == \"GPKG\":\n prefix = \".gpkg\"\n if format == \"CSV\":\n prefix = \".csv\"\n\n # Remove a potential mapset\n file_name = vector_name.split(\"@\")[0] + prefix\n archive_name = file_name + \".zip\"\n # switch into the temporary working directory to use relative path for zip\n os.chdir(self.temp_file_path)\n\n module_name = \"v.out.ogr\"\n args = [\"-e\", \"input=%s\"%vector_name, \"format=%s\"%format,\n \"output=%s\"%file_name]\n\n if additional_options:\n args.extend(additional_options)\n\n # Export\n p = Process(exec_type=\"grass\",\n executable=module_name,\n executable_params=args,\n stdin_source=None)\n\n self._update_num_of_steps(1)\n self._run_module(p)\n\n # Compression\n compressed_output_path = os.path.join(self.temp_file_path, archive_name)\n\n executable = \"/usr/bin/zip\"\n args = [\"-r\", archive_name, file_name]\n\n p = Process(exec_type=\"exec\",\n executable=executable,\n executable_params=args,\n stdin_source=None)\n\n self._update_num_of_steps(1)\n self._run_process(p)\n\n return archive_name, compressed_output_path", "def write_geojson(vec:gpd.GeoDataFrame, dest):\n\t\tdest = str(dest)\n\n\t\t# WGS 84\n\t\t#vec = vec.to_crs({'init': 'epsg:4326'})\n\n\t\tif os.path.isfile(dest):\n\t\t\tos.remove(dest)\n\t\t\t\n\t\tvec.to_file(dest, driver='GeoJSON', encoding='utf-8')", "def to_feature_collection(self,\r\n name=None,\r\n drawing_info=None,\r\n extent=None,\r\n global_id_field=None):\r\n from arcgis.features import FeatureCollection\r\n import uuid\r\n import string\r\n import random\r\n\r\n if name is None:\r\n name = random.choice(string.ascii_letters) + uuid.uuid4().hex[:5]\r\n template = {\r\n 'showLegend' : True,\r\n 'layers' : []\r\n }\r\n if extent is None:\r\n ext = self.geoextent\r\n extent = {\r\n \"xmin\" : ext[0],\r\n \"ymin\" : ext[1],\r\n \"xmax\" : ext[2],\r\n \"ymax\" : ext[3],\r\n \"spatialReference\" : self.sr\r\n }\r\n fs = self.__feature_set__\r\n fields = []\r\n for fld in fs['fields']:\r\n if fld['name'].lower() == fs['objectIdFieldName'].lower():\r\n fld['editable'] = False\r\n fld['sqlType'] = \"sqlTypeOther\"\r\n fld['domain'] = None\r\n fld['defaultValue'] = None\r\n fld['nullable'] = False\r\n else:\r\n fld['editable'] = True\r\n fld['sqlType'] = \"sqlTypeOther\"\r\n fld['domain'] = None\r\n fld['defaultValue'] = None\r\n fld['nullable'] = True\r\n if drawing_info is None:\r\n di = {\r\n 'renderer' : {\r\n 'labelingInfo' : None,\r\n 'label' : \"\",\r\n 'description' : \"\",\r\n 'type' : 'simple',\r\n 'symbol' : None\r\n\r\n }\r\n }\r\n symbol = None\r\n if symbol is None:\r\n if fs['geometryType'] in [\"esriGeometryPoint\", \"esriGeometryMultipoint\"]:\r\n di['renderer']['symbol'] = {\"color\":[0,128,0,128],\"size\":18,\"angle\":0,\r\n \"xoffset\":0,\"yoffset\":0,\r\n \"type\":\"esriSMS\",\r\n \"style\":\"esriSMSCircle\",\r\n \"outline\":{\"color\":[0,128,0,255],\"width\":1,\r\n \"type\":\"esriSLS\",\"style\":\"esriSLSSolid\"}}\r\n elif fs['geometryType'] == 'esriGeometryPolyline':\r\n di['renderer']['symbol'] = {\r\n \"type\": \"esriSLS\",\r\n \"style\": \"esriSLSDot\",\r\n \"color\": [0,128,0,128],\r\n \"width\": 1\r\n }\r\n elif fs['geometryType'] == 'esriGeometryPolygon':\r\n di['renderer']['symbol'] = {\r\n \"type\": \"esriSFS\",\r\n \"style\": \"esriSFSSolid\",\r\n \"color\": [0,128,0,128],\r\n \"outline\": {\r\n \"type\": \"esriSLS\",\r\n \"style\": \"esriSLSSolid\",\r\n \"color\": [110,110,110,255],\r\n \"width\": 1\r\n }\r\n }\r\n else:\r\n di['renderer']['symbol'] = symbol\r\n else:\r\n di = drawing_info\r\n layer = {\r\n 'featureSet' : {'features' : fs['features'],\r\n 'geometryType' : fs['geometryType']\r\n },\r\n 'layerDefinition' : {\r\n 'htmlPopupType' : 'esriServerHTMLPopupTypeNone',\r\n 'objectIdField' : fs['objectIdFieldName'] or \"OBJECTID\",\r\n #'types' : [],\r\n 'defaultVisibility' : True,\r\n 'supportsValidateSql' : True,\r\n 'supportsAttachmentsByUploadId' : True,\r\n 'useStandardizedQueries' : False,\r\n 'supportsApplyEditsWithGlobalIds' : True,\r\n 'standardMaxRecordCount' : 32000,\r\n 'supportsTruncate' : False,\r\n 'extent' : extent,\r\n 'maxScale' : 0,\r\n 'supportsAppend' : True,\r\n 'supportsCalculate' : True,\r\n 'copyrightText' : \"\",\r\n #'templates' : [],\r\n 'description' : \"\",\r\n #'relationships' : [],\r\n 'supportsRollbackOnFailureParameter' : True,\r\n 'hasM' : False,\r\n 'displayField' : \"\",\r\n 'drawingInfo' : di,\r\n 'type' : 'Feature Layer',\r\n 'supportedQueryFormats' : 'JSON, geoJSON',\r\n 'isDataVersioned' : False,\r\n 'maxRecordCount' : 2000,\r\n 'minScale' : 0,\r\n 'supportsStatistics' : True,\r\n 'hasAttachments' : False,\r\n #'indexes' : [],\r\n 'tileMaxRecordCount' : 8000,\r\n 'supportsAdvancedQueries' : True,\r\n #'globalIdField' : \"\",\r\n 'hasZ' : False,\r\n 'name' : name,\r\n 'id' : 0,\r\n 'allowGeometryUpdates' : True,\r\n #'typeIdField' : \"\",\r\n 'geometryType' : fs['geometryType'],\r\n 'currentVersion' : 10.51,\r\n #'maxRecordCountFactor' : 1,\r\n 'supportsCoordinatesQuantization' : True,\r\n 'fields' : fs['fields'],\r\n 'hasStaticData' : True,# False\r\n 'capabilities' : 'Create,Delete,Query,Update,Editing,Extract,Sync',\r\n 'advancedQueryCapabilities' : {'supportsReturningGeometryCentroid': False,\r\n 'supportsQueryRelatedPagination': True,\r\n 'supportsHavingClause': True,\r\n 'supportsOrderBy': True,\r\n 'supportsPaginationOnAggregatedQueries': True,\r\n 'supportsQueryWithDatumTransformation': True,\r\n 'supportsAdvancedQueryRelated': True,\r\n 'supportsOutFieldSQLExpression': True,\r\n 'supportsPagination': True,\r\n 'supportsStatistics': True,\r\n 'supportsSqlExpression': True,\r\n 'supportsQueryWithDistance': True,\r\n 'supportsReturningQueryExtent': True,\r\n 'supportsDistinct': True,\r\n 'supportsQueryWithResultType': True},\r\n\r\n }\r\n }\r\n if global_id_field is not None:\r\n layer['layerDefinition']['globalIdField'] = global_id_field\r\n return FeatureCollection(layer)", "def export_classification(out_name, table, asset_root, region, years, export='asset'):\n fc = ee.FeatureCollection(table)\n roi = ee.FeatureCollection(region)\n mask = roi.geometry().bounds().getInfo()['coordinates']\n\n classifier = ee.Classifier.randomForest(\n numberOfTrees=100,\n variablesPerSplit=0,\n minLeafPopulation=1,\n outOfBagMode=False).setOutputMode('CLASSIFICATION')\n\n input_props = fc.first().propertyNames().remove('YEAR').remove('POINT_TYPE').remove('system:index')\n\n trained_model = classifier.train(fc, 'POINT_TYPE', input_props)\n\n for yr in years:\n input_bands = stack_bands(yr, roi)\n annual_stack = input_bands.select(input_props)\n classified_img = annual_stack.classify(trained_model).int().set({\n 'system:index': ee.Date('{}-01-01'.format(yr)).format('YYYYMMdd'),\n 'system:time_start': ee.Date('{}-01-01'.format(yr)).millis(),\n 'system:time_end': ee.Date('{}-12-31'.format(yr)).millis(),\n 'image_name': out_name,\n 'class_key': '0: irrigated, 1: rainfed, 2: uncultivated, 3: wetland'})\n\n if export == 'asset':\n task = ee.batch.Export.image.toAsset(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n assetId=os.path.join(asset_root, '{}_{}'.format(out_name, yr)),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n\n elif export == 'cloud':\n task = ee.batch.Export.image.toCloudStorage(\n image=classified_img,\n description='{}_{}'.format(out_name, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(yr, out_name),\n region=mask,\n scale=30,\n pyramidingPolicy={'.default': 'mode'},\n maxPixels=1e13)\n else:\n raise NotImplementedError('choose asset or cloud for export')\n\n task.start()\n print(os.path.join(asset_root, '{}_{}'.format(out_name, yr)))", "def ee_export_image_collection(ee_image_collection: ee.ImageCollection,\n out_dir: str,\n scale: float = None,\n crs: str = None,\n region=None,\n file_per_band: bool = False) -> None:\n\n if not isinstance(ee_image_collection, ee.ImageCollection):\n raise TypeError(\n 'The ee_image_collection must be an ee.ImageCollection.')\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n try:\n count = int(ee_image_collection.size().getInfo())\n LOG.info(\"Total number of images: {}\\n\".format(count))\n\n images = ee_image_collection.toList(count)\n for i in range(count):\n image = ee.Image(images.get(i))\n name = image.get('system:index').getInfo() + '.tif'\n filename = os.path.join(os.path.abspath(out_dir), name)\n LOG.info('Exporting {}/{}: {}'.format(i + 1, count, name))\n ee_export_image(image,\n filename=filename,\n scale=scale,\n crs=crs,\n region=region,\n file_per_band=file_per_band)\n\n except Exception as e:\n LOG.error(e)", "def test_to_geojson(self):\n fc = self.read_feature()\n dest_filename = str(self.datadir.join('test.geojson'))\n fc.to_geojson(dest_filename)\n fc_check = read_feature_collection(dest_filename)\n self.check_feature(fc_check.features[0])", "def exportGIS(self, tabindex, curcycle):\n ubdata.exportGISShapeFile(self, tabindex, curcycle)\n ubdata.writeGeoJSONTempFiles(self, tabindex, curcycle)\n return True", "def export_dataset(self):\n raise NotImplementedError", "def export(**kwargs):\n\n import os\n\n interface = None # Holds the actual FileInterface for the specified data format\n vertex_index_to_file_key_map = None\n element_index_to_file_key_map = None\n\n if 'file_name' in kwargs:\n fname = kwargs['file_name']\n else:\n raise ValueError(\"file_name must be specified.\")\n \n extension = os.path.splitext(fname)[1].lower()\n\n if extension=='.msh':\n from bempp.api.file_interfaces import gmsh\n interface = gmsh.GmshInterface()\n \n if int('grid' in kwargs) + int('grid_function' in kwargs) != 1:\n raise ValueError(\"Exactly one of 'grid' or 'grid_function' must be specified\")\n\n if 'grid' in kwargs:\n grid = kwargs['grid']\n elif 'grid_function' in kwargs:\n grid = kwargs['grid_function'].grid\n\n number_of_vertices = grid.leaf_view.entity_count(2)\n number_of_elements = grid.leaf_view.entity_count(0)\n\n offset = interface.index_offset\n\n if 'vertex_index_to_file_key_map' in kwargs:\n vertex_index_to_file_key_map = kwargs['vertex_index_to_file_key_map']\n else:\n vertex_index_to_file_key_map = range(offset,number_of_vertices+offset)\n if 'element_index_to_file_key_map' in kwargs:\n element_index_to_file_key_map = kwargs['element_index_to_file_key_map']\n else:\n element_index_to_file_key_map = range(offset,number_of_elements+offset)\n\n # Create the vertex and element structure\n\n from collections import OrderedDict\n\n vertex_iterator = grid.leaf_view.entity_iterator(2)\n element_iterator = grid.leaf_view.entity_iterator(0)\n index_set = grid.leaf_view.index_set()\n\n vertices = OrderedDict([(vertex_index_to_file_key_map[index_set.entity_index(vertex)],vertex.geometry.corners[:,0])\n for vertex in vertex_iterator])\n elements = OrderedDict([(element_index_to_file_key_map[index_set.entity_index(element)],\n {'data':[vertex_index_to_file_key_map[index_set.sub_entity_index(element,n,2)] for n in range(3)],\n 'domain_index':element.domain}) for element in element_iterator])\n\n interface.add_grid_data(vertices,elements)\n\n # Evaluate data\n\n if 'grid_function' in kwargs:\n fun = kwargs['grid_function']\n data_type = kwargs.get('data_type',interface.default_data_type)\n\n if 'transformation' in kwargs:\n transformation = kwargs['transformation']\n else:\n transformation = lambda x: x\n\n index_set = grid.leaf_view.index_set()\n\n if data_type == 'element_node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates))\n interface.add_element_node_data(data,kwargs.get('label','element_node_data'))\n elif data_type == 'node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(vertex_index_to_file_key_map)\n for element in grid.leaf_view.entity_iterator(0):\n local_data = transformation(fun.evaluate(element,local_coordinates))\n for i in range(3):\n data[vertex_index_to_file_key_map[index_set.sub_entity_index(element,i,2)]] = local_data[:,i]\n interface.add_node_data(data,kwargs.get('label','node_data'))\n elif data_type == 'element':\n local_coordinates = _np.array([[1./3],[1./3]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates).ravel())\n interface.add_element_data(data,kwargs.get('label','element_data'))\n else:\n raise ValueError(\"data_type must be one of 'node', 'element', or 'element_node'\")\n\n interface.write(kwargs['file_name'])", "def _to_arcpy_featureset(self):\r\n if HAS_ARCPY:\r\n import uuid, string, random\r\n l = []\r\n for i in range(3):\r\n l.append(random.choice(string.ascii_letters))\r\n l = \"\".join(l)\r\n out_name = l\r\n res = self.to_featureclass(out_location='in_memory',\r\n out_name=out_name)\r\n\r\n feature_set = arcpy.FeatureSet()\r\n feature_set.load(res)\r\n return feature_set\r\n else:\r\n raise Exception(\"ArcPy must be present to convert to arcpy.FeatureSet object\")", "def featurefile(request):\n featurename = request.param[0]\n return os.path.join(__FEATURE_FILES_DIR__, featurename + \".feature\")", "def save_features_to_file_by_set(self, _set):\n with open(self.features_save_path + \"features_\" + _set, 'w') as f:\n for document in self.sets[_set]:\n f.write(self.get_feature_string_by_document(_set, document))", "def create_feature_set(es_host: str, model_name: str) -> None:\n features_path = PATH / f'{model_name}' / 'features'\n feature_set = {\n 'featureset': {\n 'name': model_name,\n 'features': [process_feature_file(str(filename)) for filename in\n features_path.glob('*')]\n }\n }\n post_feature_set(feature_set, model_name, es_host)", "def download_features():\r\n\twith open('places.json') as f:\r\n\t\tplaces = json.load(f)\r\n\r\n\tfor place in places.values():\r\n\t\tr = get_url('http://www.caiusjcr.org.uk/roomCaius/index.php?location={}'.format(place['name'].replace(' ', '%20')), allow_redirects=False)\r\n\r\n\t\twith open('dump/features/place-{}.html'.format(place['name']), 'w') as f:\r\n\t\t\tf.write(r.encode('utf8'))\r\n\r\n\t\tprint place['name']", "def save(self, export_path: str):", "def export_alembic(self, path, geo_nodes, use_local_space=False):\n if os.path.exists(path):\n raise RuntimeError('Given path aleady exist: {}'.format(path))\n\n export_space = '' if use_local_space else '-worldSpace'\n args = [\n '-uv',\n export_space,\n '-frameRange', str(self._model.frame_in - 1),\n str(self._model.frame_out + 1),\n '-frameRelativeSample', str(self._model.motion_blur_in),\n '-frameRelativeSample', '0',\n '-frameRelativeSample', str(self._model.motion_blur_out),\n '-file', path,\n ]\n for node in geo_nodes:\n if mc.nodeType(node) != 'transform':\n node = mc.listRelatives(node, parent=True, fullPath=True)[0]\n args.extend([\n '-root', node\n ])\n\n mc.AbcExport(jobArg=[' '.join(args)])", "def export_to_gsas():\n # Get workflow\n work_flow = my_data.get()\n\n output_file_name = '/tmp/acceptance_test.gda'\n\n # Clear the file if it exists.\n if os.path.exists(output_file_name):\n os.remove(output_file_name)\n\n status = work_flow.export_gsas_file(run_number=80231)\n assert status\n assert os.path.exists(output_file_name)", "def export_to_gral(\n inventory: Inventory, grid: GralGrid, path: os.PathLike, polygon_raster_size\n) -> None:\n\n writer = EmissionWriter(Path(path), inventory, grid, polygon_raster_size)\n\n writer.write_gdfs()", "def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')", "def result_writer(result_poly):\n val = {}\n val[\"type\"] = \"FeatureCollection\"\n val[\"features\"] = result_poly\n with open(output_file_path, 'w') as outfile:\n json.dump(val, outfile, indent=3)\n outfile.close()", "def to_geojson(model, contrib_id):\n feature_collection = []\n for record in model.objects.filter(contributer_id=contrib_id):\n try:\n properies = {\n \"name\": record.name,\n \"address\": record.address,\n \"email\": record.email,\n \"website\": record.website,\n \"phone_number\": record.phone_number,\n }\n my_point = Point((record.longitude, record.latitude))\n my_feature = Feature(geometry=my_point, properties=properies)\n feature_collection.append(my_feature)\n except ValueError:\n pass\n return FeatureCollection(feature_collection)", "def export_experiment(session, saver, last_step, global_step, output_dir,\n eval_set, features, labels, images, route):\n output_filename = 'output_%s_%s_%d.h5' % (\n FLAGS.dataset, eval_set, global_step)\n output_directory = os.path.join(output_dir, 'classify', 'output')\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n with h5py.File(os.path.join(output_directory, output_filename), 'w') as hf:\n hf.create_dataset('features', data=features, compression='lzf')\n hf.create_dataset('labels', data=labels, compression='lzf')\n hf.create_dataset('images', data=images, compression='lzf')\n hf.create_dataset('route', data=route, compression='lzf')\n\n session_directory = os.path.join(\n output_dir, 'eval', FLAGS.dataset, eval_set)\n saver.save(session, os.path.join(session_directory, 'model.ckpt'),\n global_step=last_step)", "def k2g(\n kml_path_or_buffer,\n output_dir,\n feature_collection_name,\n style_type,\n style_filename,\n separate_folders,\n):\n style, *layers = m.convert(\n kml_path_or_buffer,\n style_type=style_type,\n separate_folders=separate_folders,\n feature_collection_name=feature_collection_name,\n )\n\n # Create output directory if it doesn't exist\n output_dir = pl.Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir(parents=True)\n output_dir = output_dir.resolve()\n\n # Write style file\n path = output_dir / style_filename\n with path.open(\"w\") as tgt:\n json.dump(style, tgt)\n\n # Create filenames for layers\n stems = m.disambiguate(m.to_filename(layer[\"name\"]) for layer in layers)\n filenames = [f\"{stem}.geojson\" for stem in stems]\n\n # Write layer files\n for i in range(len(layers)):\n path = output_dir / filenames[i]\n with path.open(\"w\") as tgt:\n json.dump(layers[i], tgt)", "def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)", "def generate_geojson_export(\n export_type,\n username,\n id_string,\n metadata=None,\n export_id=None,\n options=None,\n xform=None,\n):\n\n extension = options.get(\"extension\", export_type)\n if xform is None:\n xform = XForm.objects.get(user__username=username, id_string=id_string)\n request = HttpRequest()\n extra_data = metadata.extra_data\n # build out query params to be used in GeoJsonSerializer\n request.query_params = {\n \"geo_field\": extra_data.get(\"data_geo_field\"),\n \"simple_style\": extra_data.get(\"data_simple_style\"),\n \"title\": extra_data.get(\"data_title\"),\n \"fields\": extra_data.get(\"data_fields\"),\n }\n _context = {}\n _context[\"request\"] = request\n # filter out deleted submissions\n content = GeoJsonSerializer(\n xform.instances.filter(deleted_at__isnull=True), many=True, context=_context\n )\n data_to_write = json.dumps(content.data).encode(\"utf-8\")\n timestamp = datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n basename = f\"{id_string}_{timestamp}\"\n filename = basename + \".\" + extension\n file_path = os.path.join(username, \"exports\", id_string, export_type, filename)\n\n export_filename = write_temp_file_to_path(extension, data_to_write, file_path)\n\n export = get_or_create_export_object(export_id, options, xform, export_type)\n\n dir_name, basename = os.path.split(export_filename)\n export.filedir = dir_name\n export.filename = basename\n export.internal_status = Export.SUCCESSFUL\n export.save()\n\n return export", "def generate_feature(dataset_path, output_path, num_threads):\n settings = BeatSettings()\n\n if output_path is not None:\n settings.dataset.feature_save_path = output_path\n\n beat.app.generate_feature(dataset_path, beat_settings=settings, num_threads=num_threads)", "def save_to_json(self):\r\n file = col.defaultdict(list)\r\n data_sources = [\"http://www.gcmap.com/\",\r\n \"http://www.theodora.com/country_digraphs.html\",\r\n \"http://www.citypopulation.de/world/Agglomerations.html\",\r\n \"http://www.mongabay.com/cities_urban_01.htm\",\r\n \"http://en.wikipedia.org/wiki/Urban_agglomeration\",\r\n \"http://www.worldtimezone.com/standard.html\"]\r\n file[\"data_sources\"] = data_sources\r\n for code, city in self.vertices.items():\r\n metros = {}\r\n for key, val in vars(city).items():\r\n metros[key] = val\r\n file[\"metros\"].append(metros)\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n routes = {\"ports\": [edge.start, edge.destination], \"distance\": edge.distance}\r\n second_route = {\"ports\": [edge.destination, edge.start], \"distance\": edge.distance}\r\n if second_route not in file[\"routes\"]:\r\n file[\"routes\"].append(routes)\r\n with open('../Data/save.json', 'w') as outfile:\r\n json.dump(file, outfile, indent=4)", "def addExportLayerToCoreml(builder):\n outputNames = [output.name for output in builder.spec.description.output]\n\n for i, outputName in enumerate(outputNames):\n # formulas: https://github.com/ultralytics/yolov5/issues/471\n builder.add_activation(\n name=f\"sigmoid_{outputName}\",\n non_linearity=\"SIGMOID\",\n input_name=outputName,\n output_name=f\"{outputName}_sigmoid\",\n )\n\n ### Coordinates calculation ###\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2) -> nC = 640 / strides[i]\n builder.add_slice(\n name=f\"slice_coordinates_xy_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_xy\",\n axis=\"width\",\n start_index=0,\n end_index=2,\n )\n # x,y * 2\n builder.add_elementwise(\n name=f\"multiply_xy_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_xy\"],\n output_name=f\"{outputName}_multiplied_xy_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # x,y * 2 - 0.5\n builder.add_elementwise(\n name=f\"subtract_0_5_from_xy_{outputName}\",\n input_names=[f\"{outputName}_multiplied_xy_by_two\"],\n output_name=f\"{outputName}_subtracted_0_5_from_xy\",\n mode=\"ADD\",\n alpha=-0.5,\n )\n grid = make_grid(featureMapDimensions[i], featureMapDimensions[i]).numpy()\n # x,y * 2 - 0.5 + grid[i]\n builder.add_bias(\n name=f\"add_grid_from_xy_{outputName}\",\n input_name=f\"{outputName}_subtracted_0_5_from_xy\",\n output_name=f\"{outputName}_added_grid_xy\",\n b=grid,\n shape_bias=grid.shape,\n )\n # (x,y * 2 - 0.5 + grid[i]) * stride[i]\n builder.add_elementwise(\n name=f\"multiply_xy_by_stride_{outputName}\",\n input_names=[f\"{outputName}_added_grid_xy\"],\n output_name=f\"{outputName}_calculated_xy\",\n mode=\"MULTIPLY\",\n alpha=strides[i],\n )\n\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2)\n builder.add_slice(\n name=f\"slice_coordinates_wh_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_wh\",\n axis=\"width\",\n start_index=2,\n end_index=4,\n )\n # w,h * 2\n builder.add_elementwise(\n name=f\"multiply_wh_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_wh\"],\n output_name=f\"{outputName}_multiplied_wh_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # (w,h * 2) ** 2\n builder.add_unary(\n name=f\"power_wh_{outputName}\",\n input_name=f\"{outputName}_multiplied_wh_by_two\",\n output_name=f\"{outputName}_power_wh\",\n mode=\"power\",\n alpha=2,\n )\n # (w,h * 2) ** 2 * anchor_grid[i]\n anchor = (\n anchorGrid[i]\n .expand(-1, featureMapDimensions[i], featureMapDimensions[i], -1)\n .numpy()\n )\n builder.add_load_constant_nd(\n name=f\"anchors_{outputName}\",\n output_name=f\"{outputName}_anchors\",\n constant_value=anchor,\n shape=anchor.shape,\n )\n builder.add_elementwise(\n name=f\"multiply_wh_with_achors_{outputName}\",\n input_names=[f\"{outputName}_power_wh\", f\"{outputName}_anchors\"],\n output_name=f\"{outputName}_calculated_wh\",\n mode=\"MULTIPLY\",\n )\n\n builder.add_concat_nd(\n name=f\"concat_coordinates_{outputName}\",\n input_names=[f\"{outputName}_calculated_xy\", f\"{outputName}_calculated_wh\"],\n output_name=f\"{outputName}_raw_coordinates\",\n axis=-1,\n )\n builder.add_scale(\n name=f\"normalize_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_coordinates\",\n output_name=f\"{outputName}_raw_normalized_coordinates\",\n W=torch.tensor([1 / 640]).numpy(),\n b=0,\n has_bias=False,\n )\n\n ### Confidence calculation ###\n builder.add_slice(\n name=f\"slice_object_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_object_confidence\",\n axis=\"width\",\n start_index=4,\n end_index=5,\n )\n builder.add_slice(\n name=f\"slice_label_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_label_confidence\",\n axis=\"width\",\n start_index=5,\n end_index=0,\n )\n # confidence = object_confidence * label_confidence\n builder.add_multiply_broadcastable(\n name=f\"multiply_object_label_confidence_{outputName}\",\n input_names=[\n f\"{outputName}_label_confidence\",\n f\"{outputName}_object_confidence\",\n ],\n output_name=f\"{outputName}_raw_confidence\",\n )\n\n # input: (1, 3, nC, nC, 85), output: (3 * nc^2, 85)\n builder.add_flatten_to_2d(\n name=f\"flatten_confidence_{outputName}\",\n input_name=f\"{outputName}_raw_confidence\",\n output_name=f\"{outputName}_flatten_raw_confidence\",\n axis=-1,\n )\n builder.add_flatten_to_2d(\n name=f\"flatten_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_normalized_coordinates\",\n output_name=f\"{outputName}_flatten_raw_coordinates\",\n axis=-1,\n )\n\n builder.add_concat_nd(\n name=\"concat_confidence\",\n input_names=[\n f\"{outputName}_flatten_raw_confidence\" for outputName in outputNames\n ],\n output_name=\"raw_confidence\",\n axis=-2,\n )\n builder.add_concat_nd(\n name=\"concat_coordinates\",\n input_names=[\n f\"{outputName}_flatten_raw_coordinates\" for outputName in outputNames\n ],\n output_name=\"raw_coordinates\",\n axis=-2,\n )\n\n builder.set_output(\n output_names=[\"raw_confidence\", \"raw_coordinates\"],\n output_dims=[(25200, numberOfClassLabels), (25200, 4)],\n )", "def cli(ctx, dataset_collection_id, file_path):\n return ctx.gi.dataset_collections.download_dataset_collection(dataset_collection_id, file_path)", "def create_collection_feature_space(collection_path):\n sentences = load_collection_sentences(collection_path, __fape_files_to_load)\n return create_feature_space(reduce(lambda x,y: x[0]+y[0], sentences))", "def save(self):\r\n\r\n for video_name, video_data in self.data.items():\r\n save_path = os.path.join(\r\n self.features_dir, video_name + \".\" + self.file_type\r\n )\r\n write_df(\r\n df=video_data.fillna(0), file_type=self.file_type, save_path=save_path\r\n )\r\n print(\"Created additional ROI features for {}...\".format(video_name))\r\n self.timer.stop_timer()\r\n stdout_success(\r\n msg=\"Created additional ROI features for files within the project_folder/csv/features_extracted directory\",\r\n elapsed_time=self.timer.elapsed_time_str,\r\n )", "def export_simple(self):\n raise Exception(\"this ImageCollection cannot be exported in the simple format\")", "def export(\n self,\n folder_name,\n data_type,\n coordinate_system=\"EPSG:4326\",\n scale=500,\n export_limit=None,\n min_img_val=None,\n max_img_val=None,\n major_states_only=True,\n check_if_done=False,\n download_folder=None,\n ):\n if check_if_done:\n if download_folder is None:\n download_folder = Path(\"data\") / folder_name\n already_downloaded = get_tif_files(download_folder)\n\n imgcoll = (\n ee.ImageCollection(self.collection_id)\n .filterBounds(ee.Geometry.Rectangle(-106.5, 50, -64, 23))\n .filterDate(\"2002-12-31\", \"2016-8-4\")\n )\n\n datatype_to_func = {\n \"image\": _append_im_band,\n \"mask\": _append_mask_band,\n \"temperature\": _append_temp_band,\n }\n\n img = imgcoll.iterate(datatype_to_func[data_type])\n img = ee.Image(img)\n\n # \"clip\" the values of the bands\n if min_img_val is not None:\n # passing en ee.Number creates a constant image\n img_min = ee.Image(ee.Number(min_img_val))\n img = img.min(img_min)\n if max_img_val is not None:\n img_max = ee.Image(ee.Number(max_img_val))\n img = img.max(img_max)\n\n # note that the county regions are pulled from Google's Fusion tables. This calls a merge\n # of county geometry and census data:\n # https://fusiontables.google.com/data?docid=1S4EB6319wWW2sWQDPhDvmSBIVrD3iEmCLYB7nMM#rows:id=1\n\n region = ee.FeatureCollection(\"TIGER/2018/Counties\")\n\n # turn the strings into numbers, see\n # https://developers.google.com/earth-engine/datasets/catalog/TIGER_2018_Counties\n def county_to_int(feature):\n return feature.set(\"COUNTYFP\", ee.Number.parse(feature.get(\"COUNTYFP\")))\n\n def state_to_int(feature):\n return feature.set(\"STATEFP\", ee.Number.parse(feature.get(\"STATEFP\")))\n\n region = region.map(county_to_int)\n region = region.map(state_to_int)\n\n count = 0\n\n for state_id, county_id in np.unique(\n self.locations[[\"State ANSI\", \"County ANSI\"]].values, axis=0\n ):\n if major_states_only:\n if int(state_id) not in MAJOR_STATES:\n print(f\"Skipping state id {int(state_id)}\")\n continue\n\n fname = \"{}_{}\".format(int(state_id), int(county_id))\n\n if check_if_done:\n if f\"{fname}.tif\" in already_downloaded:\n print(f\"{fname}.tif already downloaded! Skipping\")\n continue\n\n file_region = region.filterMetadata(\n \"COUNTYFP\", \"equals\", int(county_id)\n ).filterMetadata(\"STATEFP\", \"equals\", int(state_id))\n file_region = ee.Feature(file_region.first())\n processed_img = img.clip(file_region)\n file_region = None\n while True:\n try:\n self._export_one_image(\n processed_img,\n folder_name,\n fname,\n file_region,\n scale,\n coordinate_system,\n )\n except (ee.ee_exception.EEException, ssl.SSLEOFError):\n print(f\"Retrying State {int(state_id)}, County {int(county_id)}\")\n time.sleep(10)\n continue\n break\n\n count += 1\n if export_limit:\n if count >= export_limit:\n print(\"Reached export limit! Stopping\")\n break\n print(f\"Finished Exporting {count} files!\")", "def _export_postgis(self, vector_name, dbstring,\n output_layer=None,\n additional_options=[]):\n\n module_name = \"v.out.postgis\"\n args = [\"-l\", \"input=%s\"%vector_name, \"output=%s\"%dbstring]\n\n if output_layer:\n args.append(\"output_layer=%s\"%output_layer)\n\n if additional_options:\n args.extend(additional_options)\n\n # Export\n p = Process(exec_type=\"grass\",\n executable=module_name,\n executable_params=args,\n stdin_source=None)\n\n self._update_num_of_steps(1)\n self._run_module(p)", "def export_poly(self, filename):\n mun = Geometry.merge_adjacent_features([f for f in self.getFeatures()])\n mun = Geometry.get_multipolygon(mun)\n with open(filename, \"w\") as fo:\n fo.write(\"admin_boundary\\n\")\n i = 0\n for part in mun:\n for j, ring in enumerate(part):\n i += 1\n prefix = \"!\" if j > 0 else \"\"\n fo.write(prefix + str(i) + \"\\n\")\n for p in ring:\n fo.write(\"%f %f\\n\" % (p.x(), p.y()))\n fo.write(\"END\\n\")\n fo.write(\"END\\n\")\n return", "def export_assets(self, asset_dir):\n return self.examples_inputter.export_assets(asset_dir)", "def generate_data_collection(self):\n self.data_1 = Exporter(name=\"name_1\",\n url=constants.XSL_URL,\n enable_by_default=False,\n templates=[]).save()\n self.data_collection = [self.data_1]", "def write_preds_as_geojson(preds, outfp):\n\n preds_collection = geojson.FeatureCollection(preds)\n with open(outfp, \"w\") as outfile:\n geojson.dump(preds_collection, outfile)\n\n print(\"wrote {} assembled predictions to file {}\".format(\n len(preds), outfp))", "def GEEtopoPts(ptsFile,metric,buf,poly,username,folderOut, scalePix = 30):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define topo images\n srtm = ee.Image('USGS/SRTMGL1_003')\n slopeI = ee.Terrain.slope(srtm).multiply(math.pi/180)\n aspectI = ee.Terrain.aspect(srtm).multiply(math.pi/180)\n\n aspectS = aspectI.sin();\n aspectC = aspectI.cos();\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n\n if 'elev' in metric:\n table_tc_pts = srtm.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_elev_topo_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n if 'slope' in metric:\n table_tc_pts = slopeI.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_slope_topo_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n if 'aspect' in metric:\n table_AS_pts = aspectS.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.sum(),\n scale = scalePix)\n table_AC_pts = aspectC.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.sum(),\n scale = scalePix)\n task_AS = ee.batch.Export.table.toDrive(collection = table_AS_pts\n .filter(ee.Filter.neq('sum', None))\n .select(['.*'],None,False),\n description = 's_aspect_sin_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_AC = ee.batch.Export.table.toDrive(collection = table_AC_pts\n .filter(ee.Filter.neq('sum', None))\n .select(['.*'],None,False),\n description = 's_aspect_cos_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_AS.start()\n task_AC.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n\n if 'elev' in metric:\n table_tc_pts = srtm.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_elev_topo_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n if 'slope' in metric:\n table_tc_pts = slopeI.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_slope_topo_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n if 'aspect' in metric:\n table_AS_pts = aspectS.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.sum(),\n scale = scalePix)\n table_AC_pts = aspectC.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.sum(),\n scale = scalePix)\n task_AS = ee.batch.Export.table.toDrive(collection = table_AS_pts\n .filter(ee.Filter.neq('sum', None))\n .select(['.*'],None,False),\n description = 's_aspect_sin_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_AC = ee.batch.Export.table.toDrive(collection = table_AC_pts\n .filter(ee.Filter.neq('sum', None))\n .select(['.*'],None,False),\n description = 's_aspect_cos_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_AS.start()\n task_AC.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n\n if 'elev' in metric:\n table_tc_pts = srtm.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_elev_topo_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n if 'slope' in metric:\n table_tc_pts = slopeI.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_slope_topo_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n if 'aspect' in metric:\n table_A_pts = aspectI.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_A = ee.batch.Export.table.toDrive(collection = table_A_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_aspect_topo_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_A.start()\n \n #print(\"value at point: no buffer\")", "def to_csv(self, path):\n if os.path.isdir(path):\n shutil.rmtree(os.path.join(path))\n os.makedirs(path)\n\n for name, df in self.input_data.items():\n name += \".csv\"\n filename = os.path.join(path, name)\n df.to_csv(filename)\n logging.info(\"Scenario saved as csv-collection to %s\", path)", "def exportData(self):\n\t\tlays = rlayer.renderlayers()\n\t\tdata = {}\n\t\tfor l in lays:\n\t\t\tif l.name == 'defaultRenderLayer':\n\t\t\t\tcontinue\n\t\t\tdata[l.name] = {'objects':l.objects, # OBJECTS IN LAYER\n\t\t\t\t\t\t\t'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES\n\t\t\t\t\t\t\t'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS\n\t\t\t\t\t\t\t'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER\n\t\t\t\t\t\t\t}\n\t\tpickle.dump( data, open( self.dataPath.path, \"wb\" ) )", "def reproject_vector( path, epsg_from=None, epsg_to=None):\n\n if not epsg_to: raise Exception(\"please, specify the output EPSG codes\")\n\n inDataSet = None\n outDataSet = None\n inFeature = None\n outFeature = None\n outLayer = None\n\n try:\n\n driver = ogr.GetDriverByName('ESRI Shapefile')\n inDataSet = driver.Open(path, 0) # 0 means read-only\n\n # define input SpatialReference\n if not epsg_from:\n layer = inDataSet.GetLayer()\n inSpatialRef = layer.GetSpatialRef()\n else:\n inSpatialRef = osr.SpatialReference()\n inSpatialRef.ImportFromEPSG(epsg_from)\n\n # define output SpatialReference\n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(epsg_to)\n\n # create the CoordinateTransformation\n coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)\n\n # get the first input layer and the geometry type\n inLayer = inDataSet.GetLayer()\n geotype = inLayer.GetGeomType()\n lname = inLayer.GetName()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n outDataSet = drv.CreateDataSource(\"/vsimem/memory.shp\")\n\n outLayer = outDataSet.CreateLayer(lname, srs=outSpatialRef, geom_type=geotype)\n\n # add fields\n inLayerDefn = inLayer.GetLayerDefn()\n\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n outLayer.CreateField(fieldDefn)\n\n # get the output layer\"s feature definition\n outLayerDefn = outLayer.GetLayerDefn()\n\n counter = 1\n\n # loop through the input features\n inFeature = inLayer.GetNextFeature()\n while inFeature:\n # get the input geometry\n geom = inFeature.GetGeometryRef()\n # reproject the geometry\n geom.Transform(coordTrans)\n # create a new feature\n outFeature = ogr.Feature(outLayerDefn)\n # set the geometry and attribute\n outFeature.SetGeometry(geom)\n for i in range(0, outLayerDefn.GetFieldCount()):\n outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))\n # add the feature to the shapefile\n outLayer.CreateFeature(outFeature)\n\n # destroy the features and get the next input feature\n if outFeature: outFeature = None\n inFeature = inLayer.GetNextFeature()\n\n counter += 1\n #print(counter)\n\n return outDataSet\n\n except RuntimeError as err:\n raise err\n except Exception as e:\n raise e\n\n finally:\n if inDataSet: outDataSet == None # give back control to C++\n if outDataSet: outDataSet == None\n if outLayer: outLayer == None\n if inFeature: inFeature == None\n if outFeature: outFeature = None", "def GEEicPts(ptsFile,yr,buf,poly,username,folderOut, scalePix = 30):\n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define landcover images\n tc = ee.Image(\"USGS/NLCD/NLCD\" + str(yr)).select('impervious')\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_ic_'+str(yr)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_ic_'+str(yr)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_ic_'+str(yr)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print(\"value at point: no buffer\")", "def dump_gazettes_as_csv(self):\n # TODO: dump_gazettes_as_csv\n pass", "def make_source(self):\n sources = []\n for feature in self.regions_json['features']:\n sources.append(dict(type= 'FeatureCollection', features = [feature]))\n return sources", "def get_feature_layers_collection(project_name):\r\n feature_layers_collection = get_portal_item(\r\n portal_connection=PORTAL_CONNECTION,\r\n item_name=\"{}_Map\".format(project_name),\r\n item_type=\"Feature Layer\"\r\n )\r\n\r\n return feature_layers_collection", "def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))", "def export_gltf(self, path):\n with open(path, 'w') as gltf_f:\n json.dump(self.to_dict(), gltf_f)", "def persistent_image_features(images, toStoreFile):\n image_features = extract_features(images)\n\n np.save(toStoreFile, image_features)", "def export(self, output_path: str, export_format: str = 'csv', z_positive_up: bool = True, **kwargs):\n strt = perf_counter()\n print('****Exporting surface data to {}****'.format(export_format))\n fmt = export_format.lower()\n if os.path.exists(output_path):\n tstmp = datetime.now().strftime('%Y%m%d_%H%M%S')\n foldername, filname = os.path.split(output_path)\n filnm, filext = os.path.splitext(filname)\n output_path = os.path.join(foldername, '{}_{}{}'.format(filnm, tstmp, filext))\n\n if fmt == 'csv':\n self._export_csv(output_path, z_positive_up=z_positive_up)\n elif fmt == 'geotiff':\n self._export_geotiff(output_path, z_positive_up=z_positive_up)\n elif fmt == 'bag':\n self._export_bag(output_path, z_positive_up=z_positive_up, **kwargs)\n else:\n raise ValueError('fqpr_surface_v3: Unrecognized format {}'.format(fmt))\n end = perf_counter()\n print('****Export complete: {}s****'.format(round(end - strt, 3)))", "def export(self, fname):\n\n # discard any data with null feature values\n self.discard()\n\n # set target as last column\n self.target = self.getFeatureData('Weather Type')\n\n # remove non-exportable features\n for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:\n if self._isFIdx(n):\n self.delete(n)\n\n # convert all data to float\n self.data = self.data.astype(float)\n\n # export to file\n pickle.dump(self, open(fname, 'wb'))\n\n return 0", "def save_features(self, output_filename):\n # TODO: See if this function should save the features in memory\n if isinstance(self.chip_producer, Chipper):\n raise NotImplementedError(\"Only ChipDatasets are supported at this time\")\n chip_keys, chips, features = self.return_features()\n FeatureDataset.save(output_filename, chip_keys, chips, features)", "def GEEmonthTRMM(ptsFile,startYear,endYear,buf,poly,username,folderOut, scalePix = 25000):\n \n # load required libraries\n import ee\n\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n years = list(range(startYear, endYear + 1))\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n ID_field = \"geeID\"\n \n TRMM = ee.ImageCollection('TRMM/3B43V7').select('precipitation')\n \n img_col = TRMM.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select('precipitation')\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'rm_TRMM_pr_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print ('buffered pts by:' + str(buf) + ' for TRMM')\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select('precipitation')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'rm_TRMM_pr_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for TRMM')\n\n else:\n def table_m(image):\n table = (image\n .select('precipitation')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'rm_TRMM_pr_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for TRMM')", "def dump_distributions(self):\n file_path = self.get_local_path(self.filename_distributions)\n\n with open(file_path, \"w\") as f:\n json_obj = {\n \"feature_uniques\": self.feature_uniques,\n \"feature_summaries\": self.feature_summaries,\n }\n json.dump(json_obj, f)\n return file_path", "def download(cls):\n cls._check_folder()\n os.chdir(cls.VIEWS_PATH)\n # iterate documents\n for doc in cls._documents:\n design_doc = doc().view()\n if design_doc is None:\n continue\n bucket_name = design_doc.bucket.name\n # iterate viewtypes (i.e. spatial and views)\n for view_type, views in design_doc.ddoc.iteritems():\n save_dir = '%s/%s/%s' % (bucket_name, design_doc.name, view_type)\n try:\n # remove and recreate the dir\n shutil.rmtree(save_dir, ignore_errors=True)\n os.makedirs(save_dir)\n except OSError:\n pass\n for name, view in views.iteritems():\n if isinstance(view, unicode) and view_type=='spatial':\n spatial_file = '%s/%s.spatial.js' % (save_dir, name)\n with open(spatial_file, 'w') as f:\n f.write(view)\n print 'Downloaded: %s' % spatial_file\n if isinstance(view, dict) and 'map' in view:\n map_file = '%s/%s.map.js' % (save_dir, name)\n with open(map_file, 'w') as f:\n f.write(view['map'])\n print 'Downloaded: %s' % map_file\n if isinstance(view, dict) and 'reduce' in view:\n reduce_file = '%s/%s.reduce.js' % (save_dir, name)\n with open(reduce_file, 'w') as f:\n f.write(view['reduce'])\n print 'Downloaded: %s' % reduce_file\n pass", "def save_map(self, path: str):\n self.folium_map.save(path)", "def get_shapefile(self, shpname: str):\r\n self.get_geojson()\r\n self.geojson.to_shp(shpname)", "def dump_geodatabase_to_folder(path, folder='Worldmap Files'):\r\n # make sure that path exists and is a geodatabase\r\n ensure_valid_gdb(path):\r\n\r\n # set workspace and output folder\r\n env.workspace = path\r\n if os.path.isdir(folder):\r\n os.removedirs(folder)\r\n os.mkdir(folder)\r\n\r\n # get complete list of FCs to project\r\n feature_classes = find_all_feature_classes(env.workspace)\r\n print 'Recovered {0} feature classes to project'.format(len(feature_classes))\r\n\r\n # project feature classes - skipping any with unknown references\r\n for infc in feature_classes:\r\n project_feature_class(infc, folder)", "def _export_reference_representations(self):\n\n self.logger.msg1(\"Saving reference representations\")\n general_refset, _ = get_refsets(self.dbpath)\n general_refset.save(self.rootpath+\"-references\", \"phenotype\")", "def write_edge_features(edge_features, edge_file):\n dgl.data.utils.save_tensors(edge_file, edge_features)", "def create_collection_report(output_dir, output_file_name, k8s_cli, namespaces, start_time, mode):\n\n with open(os.path.join(output_dir, 'collection_report.json'), \"w\") as output_fh:\n json.dump({\n \"output_file_name\": output_file_name,\n \"k8s_cli\": k8s_cli,\n \"namespaces\": namespaces,\n \"start_time\": start_time,\n \"mode\": mode,\n \"log_collector_version\": VERSION_LOG_COLLECTOR\n }, output_fh)", "def GEEmacaGCMs(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,models,\n username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n MACA = (ee.ImageCollection('IDAHO_EPSCOR/MACAv2_METDATA_MONTHLY')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n yearsEE = ee.List(years)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax'),\n (met == 'huss'),(met == 'rsds'),\n (met == 'was')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif (timeStep == 'month'):\n \n img_col = MACA.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)", "def load():\n\n # To run this command type: 'python manage.py shell'\n # 'from map.views import load; load()'\n\n mapping = {\"productivi\": \"productivi\", \"mpoly\": \"MULTIPOLYGON\"}\n map_path = os.path.abspath('gis_django/fields_test/test_fields.shp')\n lm = LayerMapping(Map, map_path, mapping, transform=False, encoding=\"iso-8859-1\")\n lm.save(verbose=True)", "def find_all_feature_classes(current_workspace):\r\n paths_to_export = arcpy.ListFeatureClasses()\r\n \r\n # search for additional feature classes in feature datasets\r\n for fds in arcpy.ListDatasets():\r\n env.workspace = os.path.join(env.workspace, fds)\r\n for fc in arcpy.ListFeatureClasses():\r\n paths_to_export.append(os.path.join(fds, fc))\r\n env.workspace = current_workspace\r\n \r\n if len(paths_to_export) == 0:\r\n raise EmptyGeodatabaseError\r\n else:\r\n return paths_to_export", "def write_dgl_objects(\n graph_obj,\n node_features,\n edge_features,\n output_dir,\n part_id,\n orig_nids,\n orig_eids,\n formats,\n sort_etypes,\n):\n part_dir = output_dir + \"/part\" + str(part_id)\n os.makedirs(part_dir, exist_ok=True)\n write_graph_dgl(\n os.path.join(part_dir, \"graph.dgl\"), graph_obj, formats, sort_etypes\n )\n\n if node_features != None:\n write_node_features(\n node_features, os.path.join(part_dir, \"node_feat.dgl\")\n )\n\n if edge_features != None:\n write_edge_features(\n edge_features, os.path.join(part_dir, \"edge_feat.dgl\")\n )\n\n if orig_nids is not None:\n orig_nids_file = os.path.join(part_dir, \"orig_nids.dgl\")\n dgl.data.utils.save_tensors(orig_nids_file, orig_nids)\n if orig_eids is not None:\n orig_eids_file = os.path.join(part_dir, \"orig_eids.dgl\")\n dgl.data.utils.save_tensors(orig_eids_file, orig_eids)", "def extractFeaturesToFile(inputVideo=\"videos/House.Of.Cards.S01E01.720p.BluRay.x265.mp4\", filename=\"ep1.png\"):\n features = extractFeatures(inputVideo)\n cv2.imwrite(filename, features)", "def GEElcPts(ptsFile,yr,buf,poly,username,folderOut, scalePix = 30):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define landcover images\n tc = ee.Image(\"USGS/NLCD/NLCD\" + str(yr)).select('landcover')\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.frequencyHistogram(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('histogram', None))\n .select(['.*'],None,False),\n description = 'f_lc_'+str(yr)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.frequencyHistogram(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('histogram', None))\n .select(['.*'],None,False),\n description = 'f_lc_'+str(yr)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_lc_'+str(yr)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print(\"value at point: no buffer\")", "def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)", "def export(self, savepath):\n logger.debug(f\"Exporting scene to {savepath}\")\n _backend = self.backend\n\n if not self.is_rendered:\n self.render(interactive=False)\n\n path = Path(savepath)\n if path.suffix != \".html\":\n raise ValueError(\"Savepath should point to a .html file\")\n\n # prepare settings\n vsettings.notebookBackend = \"k3d\"\n\n # Create new plotter and save to file\n plt = Plotter()\n plt.add(self.clean_renderables, render=False)\n plt = plt.show(interactive=False)\n plt.camera[-2] = -1\n\n with open(path, \"w\") as fp:\n fp.write(plt.get_snapshot())\n\n print(\n f\"The brainrender scene has been exported for web. The results are saved at {path}\"\n )\n\n # Reset settings\n vsettings.notebookBackend = None\n self.backend = _backend\n\n return str(path)", "def export_coreml(self, filename):\n import coremltools\n # First define three internal helper functions\n\n\n # Internal helper function\n def _create_vision_feature_print_screen():\n prob_name = self.target + 'Probability'\n\n #\n # Setup the top level (pipeline classifier) spec\n #\n top_spec = coremltools.proto.Model_pb2.Model()\n top_spec.specificationVersion = 3\n\n desc = top_spec.description\n desc.output.add().name = prob_name\n desc.output.add().name = self.target\n\n desc.predictedFeatureName = self.target\n desc.predictedProbabilitiesName = prob_name\n\n input = desc.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')\n input.type.imageType.colorSpace = BGR_VALUE\n\n #\n # VisionFeaturePrint extractor\n #\n pipelineClassifier = top_spec.pipelineClassifier\n scene_print = pipelineClassifier.pipeline.models.add()\n scene_print.specificationVersion = 3\n scene_print.visionFeaturePrint.scene.version = 1\n\n input = scene_print.description.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n input.type.imageType.colorSpace = BGR_VALUE\n\n output = scene_print.description.output.add()\n output.name = \"output_name\"\n DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')\n output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n output.type.multiArrayType.shape.append(2048)\n\n #\n # Neural Network Classifier, which is just logistic regression, in order to use GPUs\n #\n temp = top_spec.pipelineClassifier.pipeline.models.add()\n temp.specificationVersion = 3\n\n # Empty inner product layer\n nn_spec = temp.neuralNetworkClassifier\n feature_layer = nn_spec.layers.add()\n feature_layer.name = \"feature_layer\"\n feature_layer.input.append(\"output_name\")\n feature_layer.output.append(\"softmax_input\")\n fc_layer_params = feature_layer.innerProduct\n fc_layer_params.inputChannels = 2048\n\n # Softmax layer\n softmax = nn_spec.layers.add()\n softmax.name = \"softmax\"\n softmax.softmax.MergeFromString(b'')\n softmax.input.append(\"softmax_input\")\n softmax.output.append(prob_name)\n\n input = temp.description.input.add()\n input.name = \"output_name\"\n input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n input.type.multiArrayType.shape.append(2048)\n\n # Set outputs\n desc = temp.description\n prob_output = desc.output.add()\n prob_output.name = prob_name\n label_output = desc.output.add()\n label_output.name = self.target\n\n if type(self.classifier.classes[0]) == int:\n prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')\n label_output.type.int64Type.MergeFromString(b'')\n else:\n prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')\n label_output.type.stringType.MergeFromString(b'')\n\n temp.description.predictedFeatureName = self.target\n temp.description.predictedProbabilitiesName = prob_name\n\n return top_spec\n\n\n # Internal helper function\n def _update_last_two_layers(nn_spec):\n # Replace the softmax layer with new coeffients\n num_classes = self.num_classes\n fc_layer = nn_spec.layers[-2]\n fc_layer_params = fc_layer.innerProduct\n fc_layer_params.outputChannels = self.classifier.num_classes\n inputChannels = fc_layer_params.inputChannels\n fc_layer_params.hasBias = True\n\n coefs = self.classifier.coefficients\n weights = fc_layer_params.weights\n bias = fc_layer_params.bias\n del weights.floatValue[:]\n del bias.floatValue[:]\n\n import numpy as np\n W = np.array(coefs[coefs['index'] != None]['value'], ndmin = 2).reshape(\n inputChannels, num_classes - 1, order = 'F')\n b = coefs[coefs['index'] == None]['value']\n Wa = np.hstack((np.zeros((inputChannels, 1)), W))\n weights.floatValue.extend(Wa.flatten(order = 'F'))\n bias.floatValue.extend([0.0] + list(b))\n\n # Internal helper function\n def _set_inputs_outputs_and_metadata(spec, nn_spec):\n # Replace the classifier with the new classes\n class_labels = self.classifier.classes\n\n probOutput = spec.description.output[0]\n classLabel = spec.description.output[1]\n probOutput.type.dictionaryType.MergeFromString(b'')\n if type(class_labels[0]) == int:\n nn_spec.ClearField('int64ClassLabels')\n probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')\n classLabel.type.int64Type.MergeFromString(b'')\n del nn_spec.int64ClassLabels.vector[:]\n for c in class_labels:\n nn_spec.int64ClassLabels.vector.append(c)\n else:\n nn_spec.ClearField('stringClassLabels')\n probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')\n classLabel.type.stringType.MergeFromString(b'')\n del nn_spec.stringClassLabels.vector[:]\n for c in class_labels:\n nn_spec.stringClassLabels.vector.append(c)\n\n prob_name = self.target + 'Probability'\n label_name = self.target\n old_output_name = nn_spec.layers[-1].name\n coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)\n coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)\n if nn_spec.layers[-1].name == old_output_name:\n nn_spec.layers[-1].name = prob_name\n if nn_spec.labelProbabilityLayerName == old_output_name:\n nn_spec.labelProbabilityLayerName = prob_name\n coremltools.models.utils.rename_feature(spec, 'data', self.feature)\n if len(nn_spec.preprocessing) > 0:\n nn_spec.preprocessing[0].featureName = self.feature\n\n mlmodel = coremltools.models.MLModel(spec)\n model_type = 'image classifier (%s)' % self.model\n mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)\n mlmodel.input_description[self.feature] = u'Input image'\n mlmodel.output_description[prob_name] = 'Prediction probabilities'\n mlmodel.output_description[label_name] = 'Class label of top prediction'\n _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {\n 'model': self.model,\n 'target': self.target,\n 'features': self.feature,\n 'max_iterations': str(self.max_iterations),\n }, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION)\n\n return mlmodel\n\n\n # main part of the export_coreml function\n if self.model in _pre_trained_models.MODELS:\n ptModel = _pre_trained_models.MODELS[self.model]()\n feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel)\n\n coreml_model = feature_extractor.get_coreml_model()\n spec = coreml_model.get_spec()\n nn_spec = spec.neuralNetworkClassifier\n else: # model == VisionFeaturePrint_Screen\n spec = _create_vision_feature_print_screen()\n nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier\n\n _update_last_two_layers(nn_spec)\n mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec)\n mlmodel.save(filename)", "def __make_geo(self):\n # gmsh freecad_part.iges -o out_iges.geo -0\n fname_list = self.__fname.split('.')\n geo_file = fname_list[0]+'.geo'\n runstr = \"%s %s -o %s -0\" % (environment.GMSH, self.__fname, geo_file)\n print(runstr)\n subprocess.call(runstr, shell=True)\n print('Wrote file: %s' % geo_file)", "def export(output, model_path, run_id, mlflow_home):\n mlflow.azureml.export(output=output, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)", "def exports(self):\n return ExportsCollection(client=self)", "def create_collection(project: str, category: str) -> Collection:\n # Check the project and the category exist\n try:\n project_metadata = COLLECTIONS_METADATA[project]\n except KeyError:\n print(f\"Project doesn't exist: {project}\")\n try:\n category_metadata = project_metadata.get(category)\n except KeyError:\n print(f\"Category doesn't exist: {project}/{category}\")\n\n temporal_extent = [\n str_to_datetime(dt) if dt is not None else None\n for dt in category_metadata[\"time_extent\"]\n ]\n\n extent = Extent(\n SpatialExtent([WORLDPOP_EXTENT]),\n TemporalExtent(temporal_extent),\n )\n\n collection = Collection(\n id=category_metadata[\"id\"],\n description=category_metadata[\"description\"],\n title=category_metadata[\"title\"],\n license=LICENSE,\n keywords=KEYWORDS,\n providers=PROVIDERS,\n catalog_type=CatalogType.RELATIVE_PUBLISHED,\n extent=extent,\n summaries=Summaries({\n \"gsd\": [category_metadata[\"gsd\"]],\n }),\n )\n\n # Include projection information\n proj_ext = SummariesProjectionExtension(collection)\n proj_ext.epsg = [WORLDPOP_EPSG]\n\n # Include scientific information\n scientific = ScientificExtension.ext(collection, add_if_missing=True)\n scientific.doi = category_metadata[\"doi\"]\n scientific.citation = category_metadata[\"citation\"]\n\n # Include Item Asset information\n item_asset_ext = ItemAssetsExtension.ext(collection, add_if_missing=True)\n item_asset_ext.item_assets = {\n \"metadata\":\n AssetDefinition(\n dict(\n types=[MediaType.JSON],\n roles=[\"metadata\"],\n title=\"WorldPop Metadata\",\n )),\n \"thumbnail\":\n AssetDefinition(\n dict(\n types=[MediaType.PNG],\n roles=[\"thumbnail\"],\n title=\"WorldPop Thumbnail\",\n )),\n \"worldpop\":\n AssetDefinition({\n \"types\": [MediaType.GEOTIFF, \"application/zip\"],\n \"roles\": [\"data\"],\n \"title\": \"WorldPop Data\",\n \"proj:epsg\": WORLDPOP_EPSG\n })\n }\n\n return collection", "def test_collection_export_import(self) -> None:\n self.save_new_valid_exploration(\n '0', '[email protected]', end_state_name='End')\n collection = collection_domain.Collection.create_default_collection(\n '0', title='title', category='category', objective='objective')\n collection_dict = collection.to_dict()\n collection_from_dict = collection_domain.Collection.from_dict(\n collection_dict)\n self.assertEqual(collection_from_dict.to_dict(), collection_dict)", "def export_representations(self):\n\n dbpath, config = self._start()\n self.logger.msg1(\"Loading ontology\")\n obo_path = check_file(config.obo, dbpath, \"obo\")\n self.obo = MinimalObo(obo_path, True)\n self._export_reference_representations()\n self._export_model_representations(config)\n self._end()", "def ExportAssets(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def GEEtcPts(ptsFile,yr,buf,poly,username,folderOut, scalePix = 30):\n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define landcover images\n tc = ee.Image(\"USGS/NLCD/NLCD\" + str(yr)).select('percent_tree_cover')\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_tc_'+str(yr)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"buffered pts by:\" + str(buf))\n\n elif poly > 0:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_tc_'+str(yr)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print (\"spatial mean in poly: no buffer\")\n\n else:\n \n #reduce regions, filter out null values, remove geometry and export table\n table_tc_pts = tc.reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix)\n task_tc = ee.batch.Export.table.toDrive(collection = table_tc_pts\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 's_tc_'+str(yr)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n\n #print(\"value at point: no buffer\")", "def main(ini_path=None, overwrite_flag=False,\n tile_cols='', tile_rows='', delay=0):\n logging.info('\\nExport annual ET/ETrF/ETr/count image tiles')\n\n # Read config file\n ini = inputs.read(ini_path)\n inputs.parse_section(ini, section='INPUTS')\n inputs.parse_section(ini, section='INTERPOLATE')\n inputs.parse_section(ini, section='EXPORT')\n inputs.parse_section(ini, section=ini['INPUTS']['et_model'])\n\n if os.name == 'posix':\n shell_flag = False\n else:\n shell_flag = True\n\n # Limit tile ranges from command line\n # Eventually move to config file?\n try:\n tile_cols_list = list(utils.parse_int_set(tile_cols))\n except:\n tile_cols_list = []\n try:\n tile_rows_list = list(utils.parse_int_set(tile_rows))\n except:\n tile_rows_list = []\n\n logging.debug('\\nInitializing Earth Engine')\n ee.Initialize()\n\n # Get current running tasks\n tasks = utils.get_ee_tasks()\n\n # Get list of existing images/files\n if ini['EXPORT']['export_dest'] == 'ASSET':\n logging.debug('\\nGetting GEE asset list')\n asset_list = utils.get_ee_assets(\n ini['EXPORT']['output_ws'], shell_flag=shell_flag)\n logging.debug(asset_list)\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # logging.debug('\\nGetting cloud storage file list')\n # cloud_list = utils.get_bucket_files(\n # ini['EXPORT']['project_name'], ini['EXPORT']['output_ws'],\n # shell_flag=shell_flag)\n # # It may be necessary to remove image tile notation\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # logging.debug('\\nGetting Google drive file list')\n # gdrive_list = [\n # os.path.join(ini['EXPORT']['output_ws'], x)\n # for x in os.listdir(ini['EXPORT']['output_ws'])]\n # # It may be necessary to remove image tile notation\n # # Very large tiles may get split up automatically by EE\n # # Strip the EE tile notation data from the image list\n # # gdrive_list = list(set([\n # # re.sub('-\\d{10}-\\d{10}.tif', '.tif', x)\n # # for x in os.listdir(ini['EXPORT']['output_ws'])]))\n # # logging.debug(gdrive_list)\n\n # Get list of tiles that intersect the study area\n logging.debug('\\nBuilding export list')\n export_list = list(ard_tile_export_generator(\n ini['INPUTS']['study_area_path'],\n wrs2_coll=ini['INPUTS']['wrs2_coll'],\n cell_size=ini['EXPORT']['cell_size'],\n wrs2_tile_list=ini['INPUTS']['wrs2_tiles'],\n wrs2_tile_field=ini['INPUTS']['wrs2_tile_field'],\n wrs2_buffer=ini['INPUTS']['wrs2_buffer']))\n if not export_list:\n logging.error('\\nEmpty export list, exiting')\n return False\n\n # Save export list to json\n with open('export_tiles.json', 'w') as json_f:\n json.dump(export_list, json_f)\n\n\n # Process each tile separately\n logging.info('\\nImage Exports')\n for export_n, export_info in enumerate(export_list):\n tile_col = int(export_info['index'][1:4])\n tile_row = int(export_info['index'][5:8])\n if tile_cols_list and int(tile_col) not in tile_cols_list:\n logging.debug('ARD Tile: {} ({}/{}), skipping'.format(\n export_info['index'], export_n + 1, len(export_list)))\n continue\n elif tile_rows_list and int(tile_row) not in tile_rows_list:\n logging.debug('ARD Tile: {} ({}/{}), skipping'.format(\n export_info['index'], export_n + 1, len(export_list)))\n continue\n else:\n logging.info('ARD Tile: {} ({}/{})'.format(\n export_info['index'], export_n + 1, len(export_list)))\n\n logging.debug(' Shape: {}'.format(export_info['shape']))\n logging.debug(' Transform: {}'.format(export_info['geo']))\n logging.debug(' Extent: {}'.format(export_info['extent']))\n logging.debug(' MaxPixels: {}'.format(export_info['maxpixels']))\n logging.debug(' WRS2 tiles: {}'.format(\n ', '.join(export_info['wrs2_tiles'])))\n\n\n if ini['INPUTS']['et_model'] == 'EEFLUX':\n # Get the Landsat collection\n landsat_coll = landsat.get_landsat_coll(\n wrs2_tile_list=export_info['wrs2_tiles'],\n cloud_cover=ini['INPUTS']['cloud_cover'],\n start_date=ini['INTERPOLATE']['start_date'],\n end_date=ini['INTERPOLATE']['end_date'],\n landsat5_flag=ini['INPUTS']['landsat5_flag'],\n landsat7_flag=ini['INPUTS']['landsat7_flag'],\n landsat8_flag=ini['INPUTS']['landsat8_flag'],\n landsat_type='RAD')\n\n # Compute ETf for each Landsat scene\n # The 'BQA' band is also being returned by the etrf method\n def apply_et_fraction(image):\n etrf_obj = eeflux.EEFlux(ee.Image(image)).etrf\n etrf_img = ee.Image(etrf_obj.select(['etrf'], ['etf'])) \\\n .clamp(-1, 2)\n cloud_mask = landsat.landsat_bqa_cloud_mask_func(\n ee.Image(etrf_obj. select(['BQA'])))\n return etrf_img.updateMask(cloud_mask) \\\n .copyProperties(image, ['system:time_start'])\n scene_et_fraction_coll = ee.ImageCollection(\n landsat_coll.map(apply_et_fraction))\n\n else:\n logging.error('\\nInvalid/unsupported ET Model: {}'.format(\n ini['INPUTS']['et_model']))\n return False\n\n\n # Daily reference ET collection\n # Is the \"refet_source\" a function of the model, interpolation, or other?\n # The \"refet_type\" parameter is currently being ignored\n if ini[ini['INPUTS']['et_model']]['refet_source'] == 'GRIDMET':\n daily_et_reference_coll = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') \\\n .filterDate(ini['INPUTS']['start_date'], ini['INPUTS']['end_date']) \\\n .select(['etr'], ['et_reference'])\n elif ini[ini['INPUTS']['et_model']]['refet_source'] == 'CIMIS':\n daily_et_reference_coll = ee.ImageCollection('projects/climate-engine/cimis/daily') \\\n .filterDate(ini['INPUTS']['start_date'],\n ini['INPUTS']['end_date']) \\\n .select(['etr_asce'], ['et_reference'])\n\n # Compute composite/mosaic images for each image date\n daily_et_fraction_coll = ee.ImageCollection(interpolate.aggregate_daily(\n image_coll=scene_et_fraction_coll,\n start_date=ini['INTERPOLATE']['start_date'],\n end_date=ini['INTERPOLATE']['end_date']))\n\n # Interpolate daily ETf, multiply by daily ETr, and sum to ET\n daily_et_actual_coll = ee.ImageCollection(interpolate.interp_et_coll(\n et_reference_coll=daily_et_reference_coll,\n et_fraction_coll=daily_et_fraction_coll,\n interp_days=ini['INTERPOLATE']['interp_days'],\n interp_type=ini['INTERPOLATE']['interp_type']))\n\n # Export products\n # for product in ini['EXPORT']['products']:\n\n # logging.debug('\\n Product: {}'.format(product))\n export_id = ini['EXPORT']['export_id_fmt'].format(\n model=ini['INPUTS']['et_model'].lower(),\n # product=product.lower(),\n study_area=ini['INPUTS']['study_area_name'],\n index=export_info['index'],\n start=ini['INPUTS']['start_date'],\n end=ini['INPUTS']['end_date'],\n export=ini['EXPORT']['export_dest'].lower())\n export_id = export_id.replace('-', '')\n logging.debug(' Export ID: {}'.format(export_id))\n\n # if product == 'scene_id':\n # # Export the scene list CSV to Google Drive\n # if ini['EXPORT']['export_dest'] == 'GDRIVE':\n # export_path = os.path.join(\n # ini['EXPORT']['output_ws'], export_id + '.csv')\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # export_path = '{}/{}/{}'.format(\n # ini['EXPORT']['output_ws'], product, export_id + '.csv')\n # if ini['EXPORT']['export_dest'] == 'CLOUD':\n # # Write each product to a separate folder\n # export_path = '{}/{}/{}'.format(\n # ini['EXPORT']['output_ws'], product, export_id + '.tif')\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # export_path = os.path.join(\n # ini['EXPORT']['output_ws'], export_id + '.tif')\n if ini['EXPORT']['export_dest'] == 'ASSET':\n # Write each product to a separate folder\n export_path = '{}/{}'.format(\n ini['EXPORT']['output_ws'], export_id)\n else:\n logging.warning(' Unsupported product type, skipping')\n continue\n logging.debug(' Export folder: {}'.format(\n os.path.dirname(export_path)))\n logging.debug(' Export file: {}'.format(\n os.path.basename(export_path)))\n\n if overwrite_flag:\n if export_id in tasks.keys():\n logging.debug(' Task already submitted, cancelling')\n ee.data.cancelTask(tasks[export_id])\n\n # This is intentionally not an \"elif\" so that a task can be\n # cancelled and an existing image/file/asset can be removed\n if (ini['EXPORT']['export_dest'] == 'ASSET' and\n export_path in asset_list):\n logging.debug(' Asset already exists')\n subprocess.check_output(\n ['earthengine', 'rm', export_path],\n shell=shell_flag)\n # Files in cloud storage are easily overwritten\n # so it is unneccesary to manually remove them\n # # This would remove an existing file\n # subprocess.call(['gsutil', 'rm', export_path])\n # if (ini['EXPORT']['export_dest'] == 'CLOUD' and\n # export_path in cloud_list):\n # logging.debug(' Export image already exists')\n # # Files in cloud storage are easily overwritten\n # # so it is unneccesary to manually remove them\n # # # This would remove an existing file\n # # subprocess.check_output(['gsutil', 'rm', export_path])\n # elif (ini['EXPORT']['export_dest'] == 'GDRIVE' and\n # export_path in gdrive_list):\n # logging.debug(' Export image already exists, removing')\n # os.remove(export_path)\n # # Remove automatically generated image tiles\n # # for f in glob.glob(export_path.replace('.tif', '*.tif')):\n # # os.remove(f)\n else:\n if export_id in tasks.keys():\n logging.debug(' Task already submitted, skipping')\n continue\n if (ini['EXPORT']['export_dest'] == 'ASSET' and\n export_path in asset_list):\n logging.debug(' Asset already exists, skipping')\n continue\n # elif (ini['EXPORT']['export_dest'] == 'CLOUD' and\n # export_path in cloud_list):\n # logging.debug(' Export file already exists, skipping')\n # continue\n # elif (ini['EXPORT']['export_dest'] == 'GDRIVE' and\n # os.path.isfile(export_path)):\n # logging.debug(' Export file already exists, skipping')\n # continue\n\n # Compute target product\n # if product == 'scene_id':\n # def scene_id_extract(image):\n # return ee.Feature(None).setMulti({\n # 'SCENE_ID': ee.String(image.get('SCENE_ID'))})\n # scene_id_coll = ee.FeatureCollection(\n # scene_et_fraction_coll.map(scene_id_extract)).sort('SCENE_ID')\n\n output_images = []\n for product_i, product in enumerate(ini['EXPORT']['products']):\n logging.debug(' Product: {}'.format(product))\n if product == 'et_actual':\n # Sum daily ET to total ET\n output_images.append(\n ee.Image(daily_et_actual_coll.sum()).toFloat())\n elif product == 'et_reference':\n # Sum daily reference ET to total reference ET\n output_images.append(\n ee.Image(daily_et_reference_coll.sum()).toFloat())\n elif product == 'et_fraction':\n # Compute mean ETf (ET / ETr)\n output_images.append(\n ee.Image(daily_et_actual_coll.sum()) \\\n .divide(ee.Image(daily_et_reference_coll.sum())).toFloat())\n elif product == 'count':\n # Filter count date range to same period as reference ET\n output_images.append(ee.Image(\n daily_et_fraction_coll.filterDate(\n ini['INPUTS']['start_dt'],\n ini['INPUTS']['end_dt'] + datetime.timedelta(days=1)).count())\\\n .toUint8())\n\n # DEADEEF - Consider saving other input parameters\n # CLOUD_COVER_LAND, number of interpolation days, ?\n output_image = ee.Image(ee.Image(output_images) \\\n .rename(ini['EXPORT']['products']) \\\n .setMulti({\n 'system:time_start': ini['INPUTS']['start_date'],\n 'index': export_info['index']}))\n # print(output_image.get('system:time_start').getInfo())\n # input('ENTER')\n\n # Build export tasks\n # if product == 'scene_id':\n # if ini['EXPORT']['export_dest'] == 'CLOUD':\n # task = ee.batch.Export.table.toCloudStorage(\n # scene_id_coll,\n # description=export_id,\n # bucket=ini['EXPORT']['bucket_name'],\n # fileNamePrefix='{}/{}/{}'.format(\n # ini['EXPORT']['bucket_folder'], product, export_id),\n # fileFormat='CSV')\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # # Export the scene list CSV to Google Drive\n # task = ee.batch.Export.table.toDrive(\n # scene_id_coll,\n # description=export_id,\n # folder=os.path.basename(ini['EXPORT']['output_ws']),\n # fileNamePrefix=export_id,\n # fileFormat='CSV')\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # # Export the image to cloud storage\n # task = ee.batch.Export.image.toCloudStorage(\n # output_image,\n # description=export_id,\n # bucket=ini['EXPORT']['bucket_name'],\n # fileNamePrefix='{}/{}/{}'.format(\n # ini['EXPORT']['bucket_folder'], product, export_id),\n # dimensions=export_info['shape'],\n # crs=export_info['crs'],\n # crsTransform=export_info['geo'],\n # # shardSize=,\n # # fileDimensions=,\n # maxPixels=export_info['maxpixels'])\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # # Export the images to your Google Drive\n # task = ee.batch.Export.image.toDrive(\n # output_image,\n # description=export_id,\n # folder=os.path.basename(ini['EXPORT']['output_ws']),\n # fileNamePrefix=export_id,\n # dimensions=export_info['shape'],\n # crs=export_info['crs'],\n # crsTransform=export_info['geo'],\n # maxPixels=export_info['maxpixels'])\n if ini['EXPORT']['export_dest'] == 'ASSET':\n # Export the image to cloud storage\n task = ee.batch.Export.image.toAsset(\n output_image,\n description=export_id,\n assetId='{}/{}'.format(ini['EXPORT']['output_ws'], export_id),\n # pyramidingPolicy='mean',\n dimensions=export_info['shape'],\n crs=export_info['crs'],\n crsTransform=export_info['geo'],\n maxPixels=export_info['maxpixels'])\n else:\n logging.debug(' Export task not built, skipping')\n # continue\n\n # Try to start the export task a few times\n logging.debug(' Starting export task')\n for i in range(1, 10):\n try:\n task.start()\n break\n except Exception as e:\n logging.error(\n ' Error: {}\\n Retrying ({}/10)'.format(e, i))\n time.sleep(i ** 2)\n i += 1\n # logging.debug(' Active: {}'.format(task.active()))\n # logging.debug(' Status: {}'.format(task.status()))\n\n if delay and delay > 0:\n time.sleep(delay)\n elif delay and delay == -1:\n input('ENTER')", "def create_feature_collection(self, csv_file):\n all_features = list()\n with open(path.join(self.cur_dir, csv_file), 'rb') as f:\n reader = csv.reader(f)\n for row in reader:\n postcode = str(row[0]).replace(' ', '')\n easting = int(row[2])\n northing = int(row[3])\n point = Point(coordinates=(easting, northing))\n feature = Feature(geometry=point, properties={'postcode': postcode})\n all_features.append(feature)\n # Set CRS to BNG\n coord_ref = crs.Named(properties={'name': 'urn:ogc:def:crs:EPSG::27700'})\n feature_collection = FeatureCollection(all_features, crs=coord_ref)\n return feature_collection", "def get_feature_collection(page):\n #print page['words']\n feature_array = []\n for i,word in enumerate(page['words']):\n # should line_num be required here? It's not supported by -bbox output... \n word_properties = {'text':word['text'], 'line_num':word['line_num']}\n # should we instead rely on the the word number for the id? \n feature_array.append(get_geojson_feature(i, word['bbox'], word_properties))\n \n featurecollection = geojson.FeatureCollection(feature_array)\n # todo: add page dimensions\n return geojson.dumps(featurecollection)", "def GEEprismPtsAvgMonth(ptsFile,metric,startYear,endYear,buf,poly,username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n\n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n years = list(range(startYear, endYear + 1))\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n ID_field = \"geeID\"\n \n for met in metric:\n metL = [met]\n Gridmet_pr = ee.ImageCollection('OREGONSTATE/PRISM/AN81m').select(met)\n \n img_col = Gridmet_pr.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'pri'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print ('buffered pts by:' + str(buf) + ' for ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'pri'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'pri'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for ' + met)", "def test_load_and_featurize_save_csv():\n f = ImageFeaturizer()\n name, ext = os.path.splitext(CSV_NAME_MULT)\n check_array_path = \"{}_{}\".format(name, 'squeezenet_depth-1_output-512')\n f.featurize(save_csv=True, save_features=True, omit_time=True,\n **LOAD_DATA_ARGS_MULT)\n full_check = \"{}{}{}\".format(check_array_path, '_full', ext)\n feature_check = \"{}{}{}\".format(check_array_path, '_features_only', ext)\n f.save_csv(save_features=True, omit_time=True)\n try:\n assert os.path.isfile(full_check)\n assert os.path.isfile(feature_check)\n finally:\n remove_generated_paths(assert_not=False)\n if os.path.isfile(\"{}{}{}\".format(check_array_path, '_features_only', ext)):\n os.remove(\"{}{}{}\".format(check_array_path, '_features_only', ext))\n if os.path.isfile(\"{}{}{}\".format(check_array_path, '_full', ext)):\n os.remove(\"{}{}{}\".format(check_array_path, '_full', ext))", "def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:", "def save_feature(self):\n import scipy.io as sio\n testdp = self.test_data_provider\n num_batches = len(testdp.batch_range)\n print 'There are ' + str(testdp.get_num_batches(self.data_path)) + ' in directory'\n if self.test_data_provider.batch_size > 0:\n num_batches = (num_batches - 1)/ self.test_data_provider.batch_size + 1\n if self.test_one:\n num_batches = min(num_batches, 1)\n print 'There are ' + str( num_batches ) + ' in range'\n iu.ensure_dir(self.save_feature_path)\n feature_name = self.op.get_value('save_feature_name')\n feature_dim = self.model_state['layers'][self.feature_idx]['outputs']\n print 'Feature dim is %d' % feature_dim\n for b in range(num_batches):\n epoch, b_num, data = self.get_next_batch(train=False)\n print ' Start writing batch......\\t' + str(b_num)\n num_data = data[0].shape[-1]\n data += [n.zeros((num_data, feature_dim), dtype=n.single)]\n save_name = 'batch_feature_' + str(b_num) + '_' + feature_name \n save_path = iu.fullfile(self.save_feature_path, save_name)\n self.libmodel.startFeatureWriter(data, self.feature_idx)\n self.finish_batch()\n d = dict()\n d['X'] = data[-1].transpose()\n d['batch_num'] = b_num\n d['Y'] = data[1]\n cur_batch_indexes = self.test_data_provider.data_dic['cur_batch_indexes']\n # d['Y_other'] = data[2:-1] if len(data) > 3 else []\n ####### WARN BEGIN ################\n # for human eva fake experiments\n # d['images_path'] = [self.test_data_provider.images_path[x] for x in cur_batch_indexes]\n # d['Y'] = np.concatenate(map(lambda x:self.test_data_provider.batch_meta['RelativeSkel_Y3d_mono_body_backup'][...,x].reshape((-1,1),order='F'), cur_batch_indexes),axis=1)\n print d['Y'].shape\n d['cur_batch_indexes'] = cur_batch_indexes\n ####### WARN END ################\n print 'The len of data is ' + str(len(data))\n print 'The shape of X is' + str(d['X'].shape)\n print 'The shape of Y is' + str(d['Y'].shape)\n ##sio.savemat(save_path, d)\n pickle(save_path, d)", "def export_regions_to_file(\n label_layer, destination_directory, ignore_empty=True, obj_ext=\".obj\",\n):\n data = label_layer.data\n if ignore_empty:\n if data.sum() == 0:\n return\n\n name = label_layer.name\n\n filename = destination_directory / (name + obj_ext)\n volume_to_vector_array_to_obj_file(\n data, filename,\n )", "def export(self, outpath):\n fout = open(outpath, \"w\")\n\n # Header takes the guesswork out of loading by recording how many lines, vector dims\n fout.write(str(self.n_words) + \" \" + str(self.n_dim) + \"\\n\")\n for token in self.id2word:\n vector_components = [\"%.6f\" % number for number in self[token]]\n vector_as_string = \" \".join(vector_components)\n\n out_line = token + \" \" + vector_as_string + \"\\n\"\n fout.write(out_line)\n\n fout.close()", "def export(\n self,\n dest_file: Optional[IO[str]] = None, *,\n inc_version: bool = True,\n minimal: bool = False,\n disp_multiblend: bool = True,\n ) -> Optional[str]:\n if dest_file is None:\n string_buf = io.StringIO()\n dest_file = string_buf\n else:\n string_buf = None\n\n if inc_version:\n # Increment this to indicate the map was modified\n self.map_ver += 1\n\n dest_file.write('versioninfo\\n{\\n')\n dest_file.write(f'\\t\"editorversion\" \"{self.hammer_ver}\"\\n')\n dest_file.write(f'\\t\"editorbuild\" \"{self.hammer_build}\"\\n')\n dest_file.write(f'\\t\"mapversion\" \"{self.map_ver}\"\\n')\n dest_file.write(f'\\t\"formatversion\" \"{self.format_ver}\"\\n')\n dest_file.write('\\t\"prefab\" \"' +\n srctools.bool_as_int(self.is_prefab) + '\"\\n}\\n')\n\n dest_file.write('visgroups\\n{\\n')\n for vis in self.vis_tree:\n vis.export(dest_file, ind='\\t')\n dest_file.write('}\\n')\n\n if not minimal:\n dest_file.write('viewsettings\\n{\\n')\n dest_file.write('\\t\"bSnapToGrid\" \"' +\n srctools.bool_as_int(self.snap_grid) + '\"\\n')\n dest_file.write('\\t\"bShowGrid\" \"' +\n srctools.bool_as_int(self.show_grid) + '\"\\n')\n dest_file.write('\\t\"bShowLogicalGrid\" \"' +\n srctools.bool_as_int(self.show_logic_grid) + '\"\\n')\n dest_file.write(f'\\t\"nGridSpacing\" \"{self.grid_spacing}\"\\n')\n dest_file.write('\\t\"bShow3DGrid\" \"' +\n srctools.bool_as_int(self.show_3d_grid) + '\"\\n}\\n')\n\n # The worldspawn version should always match the global value.\n # Also force the classname, since this will crash if it's different.\n self.spawn['mapversion'] = str(self.map_ver)\n self.spawn['classname'] = 'worldspawn'\n self.spawn.export(dest_file, disp_multiblend=disp_multiblend, _is_worldspawn=True)\n del self.spawn['mapversion']\n\n for ent in self.entities:\n ent.export(dest_file, disp_multiblend=disp_multiblend)\n\n if not minimal:\n dest_file.write('cameras\\n{\\n')\n if len(self.cameras) == 0:\n self.active_cam = -1\n dest_file.write(f'\\t\"activecamera\" \"{self.active_cam}\"\\n')\n for cam in self.cameras:\n cam.export(dest_file, '\\t')\n dest_file.write('}\\n')\n\n dest_file.write('cordons\\n{\\n')\n if len(self.cordons) > 0:\n dest_file.write('\\t\"active\" \"' +\n srctools.bool_as_int(self.cordon_enabled) +\n '\"\\n')\n for cord in self.cordons:\n cord.export(dest_file, '\\t')\n else:\n dest_file.write('\\t\"active\" \"0\"\\n')\n dest_file.write('}\\n')\n\n if self.quickhide_count > 0:\n dest_file.write(\n 'quickhide\\n'\n '{\\n'\n f'\\t\"count\" \"{self.quickhide_count}\"\\n'\n '}\\n'\n )\n\n if string_buf is not None:\n return string_buf.getvalue()\n else:\n return None", "def save_reconstructions(reconstructions, out_dir):\n if (not (os.path.exists(out_dir))):\n os.mkdir(out_dir)\n out_dir.mkdir(exist_ok=True)\n print('Saved directory is',out_dir)\n for fname, recons in reconstructions.items():\n with h5py.File(out_dir / fname, 'w') as f:\n f.create_dataset('reconstruction', data=recons)", "async def export(export_info: ExportDTO, background_tasks: BackgroundTasks):\n areas = get_areas_to_export(export_info)\n cameras = get_cameras_to_export(export_info, areas)\n temp_dir = tempfile.mkdtemp()\n export_filename = f\"export-{date.today()}.zip\"\n zip_path = os.path.join(temp_dir, export_filename)\n with ZipFile(zip_path, 'w', compression=ZIP_DEFLATED) as export_zip:\n for (cam_id, name) in cameras:\n export_camera_data_into_file(export_info, cam_id, name, export_zip)\n for (area_id, name) in areas:\n export_area_data_into_file(export_info, area_id, name, export_zip)\n background_tasks.add_task(clean_up_file, temp_dir)\n return FileResponse(zip_path, filename=export_filename)", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)", "def create_feature_store(es_host: str) -> None:\n host = f'http://{es_host}'\n feature_store_url = urljoin(host, '_ltr')\n requests.delete(feature_store_url)\n requests.put(feature_store_url)", "def save_as_fits(self, filename):" ]
[ "0.61912423", "0.61882675", "0.59939873", "0.5933762", "0.5790404", "0.5758867", "0.5716076", "0.5666679", "0.56646466", "0.56428987", "0.5607236", "0.5524524", "0.5473312", "0.5465263", "0.54639006", "0.54507273", "0.54334444", "0.53373575", "0.533101", "0.5320749", "0.5317444", "0.5311356", "0.53059036", "0.53026456", "0.5298586", "0.5294648", "0.52402455", "0.5235857", "0.5223603", "0.5216896", "0.52053726", "0.5179646", "0.517246", "0.51672167", "0.51582384", "0.5133424", "0.5122704", "0.5118182", "0.51153606", "0.5111843", "0.51067865", "0.51025057", "0.50991017", "0.50709075", "0.50668603", "0.5059243", "0.50590324", "0.5047022", "0.5045532", "0.503977", "0.50239754", "0.50122553", "0.5009646", "0.5005952", "0.49998748", "0.49996343", "0.49969137", "0.49844214", "0.49800482", "0.49792466", "0.49780673", "0.49664873", "0.49517745", "0.4948447", "0.49475724", "0.49419177", "0.49344066", "0.4928717", "0.49080837", "0.48883915", "0.48880184", "0.48802844", "0.48777154", "0.48743135", "0.4874201", "0.48741192", "0.48655105", "0.48637545", "0.48608208", "0.4859708", "0.48536623", "0.48480955", "0.4844402", "0.48383558", "0.4822797", "0.48169827", "0.48105937", "0.48083368", "0.48074362", "0.48069394", "0.48069364", "0.48014504", "0.48012114", "0.47997335", "0.47987226", "0.47984985", "0.4798014", "0.47949108", "0.4793466", "0.4785339" ]
0.539379
17
Initializing an instance of Square
def __init__(self, size): self.__size = size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_init(self):\n s1 = Square(4)\n self.assertEqual(s1.width, 4)\n self.assertEqual(s1.height, 4)\n self.assertEqual(s1.id, 1)\n self.assertEqual(s1.x, 0)\n self.assertEqual(s1.y, 0)\n\n s2 = Square(5, 2, 3, 47)\n self.assertEqual(s2.width, 5)\n self.assertEqual(s2.height, 5)\n self.assertEqual(s2.id, 47)\n self.assertEqual(s2.x, 2)\n self.assertEqual(s2.y, 3)", "def setUp(self):\r\n self.S0 = Square(2)", "def __init__(self):\n self.rows = None\n self.columns = None\n self.squares = None\n # max is useful as a way to track range for iteration, and also as a way\n # to track the maximum number in any spot.\n self.max = 0", "def test_square_class(self):\n s1 = Square(10)\n self.assertEqual(10, s1.size)\n\n s2 = Square(10, 2)\n self.assertEqual(10, s2.size)\n self.assertEqual(2, s2.x)\n\n s3 = Square(3, 5, 2)\n self.assertEqual(3, s3.size)\n self.assertEqual(5, s3.x)\n self.assertEqual(2, s3.y)\n\n s4 = Square(10, 2, 0, 12)\n self.assertEqual(10, s4.size)\n self.assertEqual(12, s4.id)\n self.assertEqual(2, s4.x)\n self.assertEqual(0, s4.y)", "def setUpClass(cls):\n cls.s1 = Square(10)\n cls.s2 = Square(2)\n cls.s3 = Square(10, 5)\n cls.s4 = Square(6, 2, 4)\n cls.s5 = Square(2, 3, 5, 20)", "def __init__(self, rows: int = 1, columns: int = 2):\n super().__init__()\n self.__squares = [[Floor._clean for i in range(columns)] for j in range(rows)]", "def __init__(self, size):\n self.array = [[Square() for x in xrange(size)] for x in xrange(size)]\n self.init_game()", "def test_singlesquarecreationwithallvalues(self):\n Square.reset_objects()\n s1 = Square(10, 10, 10, 10)\n self.assertEqual(s1.width, 10)\n self.assertEqual(s1.height, 10)\n self.assertEqual(s1.x, 10)\n self.assertEqual(s1.y, 10)\n self.assertEqual(s1.id, 10)\n s2 = Square(10, 10, 10, 10)\n self.assertEqual(s2.width, 10)\n self.assertEqual(s2.height, 10)\n self.assertEqual(s2.x, 10)\n self.assertEqual(s2.y, 10)\n self.assertEqual(s2.id, 10)", "def __init__(self, squares=None, ncols=8, nrows=8):\n self.ncols = ncols\n self.nrows = nrows\n\n if not squares:\n self.squares = dict((i, None) for i in xrange(ncols * nrows))\n\n # 0 begins as the top of the board, making it black\n for i in xrange(ncols * 3):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"black\")\n # red would be the bottom 3 rows\n for i in xrange(ncols * (nrows - 3), ncols * nrows):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"red\")", "def test_multiplesquarecreationwithallvalues(self):\n Square.reset_objects()\n s1 = Square(10, 10, 10, 10)\n self.assertEqual(s1.width, 10)\n self.assertEqual(s1.height, 10)\n self.assertEqual(s1.x, 10)\n self.assertEqual(s1.y, 10)\n self.assertEqual(s1.id, 10)", "def setUp(self):\n self.a = Square(2) # id is 1\n self.b = Square(3, 1, 2, 100) # id is 100\n self.c = Square(5, 0, 0, None) # id is 2", "def __init__(self, param):\n \n if type(param) is type(self):\n\n self._possibilities = param._possibilities.copy()\n \n else:\n \n if param < 1:\n \n raise ValueError( \"Cannot create a square with\"\n + \" a maximum possibility of %s\"\n % param)\n \n self._possibilities = set(range(1, param + 1))", "def __init__(self,width=8,height=8):\n\t\tif height > 32 or width < 1 or height < 1:\n\t\t\traise \"Height must be between 1 and 32, width must be greater than 0\"\n\n\t\tself.Width = width\n\t\tself.Height = height\n\t\tself.Grid = [0] * width # we'll use 8 bits of the number in the array", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def __init__(self):\n self.square_size = 3 # large squares on a side\n self.size = self.square_size**2 # squares on a side\n numbers = self.numbers = tuple(range(1, self.size + 1))\n rows = self.rows = range(self.size)\n cols = self.cols = range(self.size)\n self.values = {(r,c): numbers for r in rows for c in cols}\n self.number_strings = '.' + ''.join(str(x) for x in self.numbers)", "def test_1_square_attributes(self):\r\n self.assertEqual(self.S0.width, 2)\r\n self.assertEqual(self.S0.height, 2)\r\n self.assertEqual(self.S0.x, 0)\r\n self.assertEqual(self.S0.y, 0)", "def __init__(self, f):\n with open(f,'r') as gridfile:\n self.matrix = ([[char for char in row if char != '\\n'] \n for row in gridfile.readlines()])\n # beginning represents the upper most \n self.beginning = Square(0, 0, self)", "def __init__(self, sl):\n if int(math.sqrt(sl))**2 != sl:\n raise Sudoku_Errors.InvalidPuzzleException(sl, \"Invalid Sudoku puzzle side length, must be a square integer\")\n\n self.sl = sl\n self.bs = int(math.sqrt(sl))", "def __init__(self, s = None): # Constructor Initializes New TicTacToe Board\n\n if s is None:\n self.state = np.array([0] * BOARD_SIZE)\n self.reset()\n\n else:\n self.state = s.copy()", "def __init__(self):\n self.x_coord = default_init\n self.y_coord = default_init\n self._init_random_coord() # generating random coordinates\n self.x_speed = default_init\n self.y_speed = default_init\n self.degrees = default_init\n self.radius = ship_def_radius", "def __init__(self, pen, square_side_size, squares=None):\n self.border_color = (128, 101, 23)\n self.square_dark = (188, 100, 75)\n self.square_light = (255, 255, 255)\n self.not_select_color = (0, 0, 0)\n self.select_color = (0, 0, 255)\n self.pen = pen\n self.next_square = square_side_size + 1\n self.board_side = square_side_size * 8 + 7\n self.board_top_y = self.next_square * 4\n self.board_lft_x = self.next_square * -4\n self.square_side_size = square_side_size\n self.border_size = square_side_size * 1.2\n if squares is not None:\n self.squares = squares\n else:\n self.squares = [[None for _ in range(8)] for _ in range(8)]", "def __init__ (self, p, q):\n self.n = p * q\n self.n_sq = self.n * self.n\n self.g = self.n + 1", "def __init__(self):\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n\n self.black_checkers = []\n self.white_checkers = []\n\n logger.info(u'Initialized checkerboard {}'.format(self))", "def __init__(self, shape):\n\n self.shape = shape", "def __init__(self, width, height, x, y):\n self.w = width\n self.h = height\n self.x = x\n self.y = y", "def __init__(self, square_data, k=10): # k defaults to 10\n self.square_data = square_data\n M, rows, columns = self.square_data.shape\n self.k = k\n self.num_examples = M\n self.image_vec = np.reshape(square_data, (M, rows * columns))\n self.mean_face = np.mean(self.image_vec, axis=0)\n self.std_face = np.std(self.image_vec, axis=0)\n self.components, self.singular_values = self.get_components()", "def __init__(self, width=20, height=20):\n self.width = width\n self.height = height\n self.cells = []\n for y in range(self.height):\n for x in range(self.width):\n self.cells.append(Cell(x, y, [N, S, E, W]))", "def __init__(self, x = 0, y = 0):\n self.x = x\n self.y = y", "def __init__(self, width=0, height=0):\n (self.width, self.height) = (width, height)\n self.pixels = [Color() for i in range(self.width * self.height)]", "def __init__(self, u_size, x_size, z_size):\n self.I = 0\n self.size = z_size\n\n self.A = np.ones((z_size, x_size))\n self.B = np.ones((z_size, u_size))\n self.b = np.ones(z_size)", "def test_singlesquarecreation(self):\n Square.reset_objects()\n s1 = Square(10)\n self.assertEqual(s1.id, 1)", "def __init__(self, x=0, y=0):\n self._x = x\n self._y = y", "def test_2_no_args_square(self):\r\n with self.assertRaises(TypeError):\r\n S1 = Square()", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def Square(rot):\n\n return Ship(\"Square\",\n rotate_shape([Pos(0,0), Pos(1,0), Pos(0,1), Pos(1,1)], rot))", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x, y, u):\n self.x = x\n self.y = y\n self.u = u", "def __init__(self, x = 200, y = 100, r = 10,vx = 0, vy = 170, colour = (255,255,255)):\n pygame.sprite.Sprite.__init__(self)\n self.x, self.y, self.r, self.vx, self.vy, self.colour = \\\n x, y, r, vx, vy, colour\n square_1_color = (255,255,255)", "def create(cls, **dictionary):\n if cls.__name__ == \"Square\":\n dummy = cls(1)\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 1)\n dummy.update(**dictionary)\n return dummy", "def __init__(self,x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self):\n self.phi = sqrt(5.0)/2.0 - 0.5", "def __init__(self, x, y):", "def __init__(self, initializer, scale=1):\n self.scale = normalize_tuple(scale, 2, \"scale\")\n self.initializer = initializer", "def __init__(self,x=0,y=0):\n self.x = x\n self.y = y\n pass", "def __init__(self, row=4, col=4, initial=2):\n self.grid = Grid(row, col, initial)", "def __init__(s,i,j):\n # Posição do centro\n s.cx, s.cy = convert(i,j)\n # Cor (pode ser passada para o construtor no futuro)\n s.cor = (200,200,200)\n\n # Vértices do hexágono\n s.pontos = (\n (s.cx, s.cy-L),\n (s.cx+l, s.cy-L/2),\n (s.cx+l, s.cy+L/2),\n (s.cx, s.cy+L),\n (s.cx-l, s.cy+L/2),\n (s.cx-l, s.cy-L/2),\n )", "def __init__(self, X, y):\n pass", "def __init__(self, shape: Tuple[int, int] = (3, 3)):\r\n\r\n if shape[0] <= 2 or shape[1] <= 2:\r\n raise exc.MeshException(\r\n \"The rectangular pixelization must be at least dimensions 3x3\"\r\n )\r\n\r\n self.shape = (int(shape[0]), int(shape[1]))\r\n self.pixels = self.shape[0] * self.shape[1]\r\n super().__init__()\r\n\r\n self.run_time_dict = {}", "def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h", "def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h", "def initialize(cls):\n return cls( *([0.]*cls._parsize) )", "def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols", "def __init__(self, x: float, y: float):\n self.x = x\n self.y = y", "def __init__(self, x: float, y: float):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n\t\t\n\t\tself.x, self.y = x, y", "def test_multiplesquarecreation(self):\n Square.reset_objects()\n s1 = Square(10)\n s2 = Square(2)\n s3 = Square(3)\n self.assertEqual(s1.id, 1)\n self.assertEqual(s2.id, 2)\n self.assertEqual(s3.id, 3)", "def test_3_square_attributes_with_args(self):\r\n S2 = Square(4, 1, 2, -5)\r\n self.assertEqual(S2.width, 4)\r\n self.assertEqual(S2.height, 4)\r\n self.assertEqual(S2.x, 1)\r\n self.assertEqual(S2.y, 2)\r\n self.assertEqual(S2.id, -5)", "def __init__(self, row, column):\n self._row = row\n self._column = column\n self._selected = False\n\n # Whether the square is an \"edge\" from which a ray can be launched\n self._edge = self.is_edge()\n\n # Whether or not an atom is placed on the square\n self._atom = False\n\n # Whether or not a ray has originated from the square\n # False if no - points towards Ray object if yes\n self._originating_ray = False\n\n # Whether or not a ray has terminated on the square. Defaults to False\n # Contains tuple of origin of the terminating ray if one exists.\n self._terminating_ray = False", "def __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y", "def __init__(self, origin_x=-2.5, origin_y=-2.5, resolution=.1,\n width=50, height=50):\n self.origin_x = origin_x\n self.origin_y = origin_y\n self.resolution = resolution\n self.width = width\n self.height = height\n self.grid = np.zeros((height, width))", "def __init__(self, x: int, y: int, w: int, h: int):\n self.x1 = x\n self.y1 = y\n self.x2 = x + w\n self.y2 = y + h", "def __init__(self, x=None, y=None):\n if y is None:\n if x is None:\n object.__setattr__(self, 'x', 0)\n object.__setattr__(self, 'y', 0)\n else:\n object.__setattr__(self, 'x', x[0])\n object.__setattr__(self, 'y', x[1])\n else:\n object.__setattr__(self, 'x', x)\n object.__setattr__(self, 'y', y)", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h", "def __init__(self, s, x, t, sigma, rf):\n \n \n super().__init__(s, x, t, sigma, rf)", "def __init__(self, n_rows: int = 2, n_columns: int = 2):\n self.set_uniform(n_rows, n_columns)", "def __init__(self, shape_num):\n\n\t\tself.shape_num = shape_num\n\t\tself.shape = shapes[shape_num].copy()\n\t\tself.width = len(self.shape[0])\n\t\tself.height = len(self.shape)", "def __init__(self):\n self.X = np.zeros((0, 2))", "def __init__(self, *args):\n this = _libsbml.new_StoichiometryMath(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\t\tself.x, self.y, self.w, self.h = 0, 0, 0, 0\n\t\tself.vx, self.vy, self.vw, self.vh = 0, 0, 0, 0", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y" ]
[ "0.731118", "0.7260903", "0.70362157", "0.7032253", "0.7006199", "0.68428224", "0.6766883", "0.67145604", "0.6702565", "0.6646152", "0.65749985", "0.63614845", "0.6321423", "0.63113105", "0.624146", "0.6238611", "0.62014264", "0.60820174", "0.6076275", "0.6062393", "0.6061413", "0.6047881", "0.60329026", "0.6024459", "0.601514", "0.5997086", "0.5964496", "0.5961059", "0.59468806", "0.59466344", "0.5946624", "0.59407", "0.5937165", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5936769", "0.5934066", "0.59157044", "0.59157044", "0.59157044", "0.59157044", "0.59157044", "0.5915637", "0.590486", "0.58939826", "0.58936524", "0.58875245", "0.5882215", "0.5870689", "0.58704495", "0.586436", "0.58624244", "0.5861583", "0.58597755", "0.5853552", "0.5853552", "0.58407307", "0.5839438", "0.5835643", "0.5835643", "0.58323675", "0.5831694", "0.58269024", "0.58203846", "0.58101225", "0.58088726", "0.5794976", "0.57881004", "0.5783109", "0.5783109", "0.5779202", "0.57729", "0.57713395", "0.5768691", "0.57624096", "0.5756504", "0.5751919", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971", "0.5750971" ]
0.0
-1
Instantiate an Invalid Data Error.
def __init__(self, data_file_name: str, message: str) -> None: super().__init__( "Invalid data error. " "The file '{}' contained data of the wrong format: {}".format( data_file_name, message ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def from_data(cls, data):\n errors = data.get('errors')\n if not errors or len(errors) == 0:\n return cls('Unknown error!')\n\n # For simplicity, we'll just include the first error.\n err = errors[0]\n return cls(\n message=err.get('message'),\n code=err.get('code'),\n detail=err.get('detail'),\n )", "def test_from_object_fail(self):\n class InvalidClass(object):\n pass\n Invalid_object = InvalidClass()\n with self.assertRaises(TypeError):\n BaseDataClass.from_object(Invalid_object)", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def testBadDataToToken(self):\n key = createKey()\n self.assertRaises(ValueError, dataToToken, key, data=self)", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def test_invalid_data_construction(self):\n with self.assertRaises(Exception):\n LongDecimalEuler(term=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(term=\"aaa\")\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=\"aaa\")", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def testConstructorValueError(self):\n test_cases = [\n 'these',\n 'are',\n 'bad',\n 'data',\n 'types',\n 'FILE',\n 'STRING',\n 'JSON',\n ]\n for bad_data_type in test_cases:\n with self.assertRaises(ValueError):\n ASCIITransportFormat(bad_data_type, '')", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def test_invalid_input_data(self):\n self.data.diffusion_data = self.data.diffusion_data[0]\n self.assertRaises(ValueError, module_05.run_module,\n self.data)", "def test_constructor_invalid():\n with pytest.raises(TypeError, match='missing 1 required positional argument'):\n PseudoPotentialData() # pylint: disable=no-value-for-parameter", "def test_invalid_data_raises_error(self):\n with self.assertRaises(ValueError):\n PoincareModel([(\"a\", \"b\", \"c\")])\n with self.assertRaises(ValueError):\n PoincareModel([\"a\", \"b\", \"c\"])\n with self.assertRaises(ValueError):\n PoincareModel(\"ab\")", "def from_d(d):\n return SMRTServiceBaseError(\n d['httpCode'], d['errorType'], d['message'])", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def testTokenToDataWithBadKey(self):\n key = createKey()\n data = {u'user': u'aliafshar'}\n token = dataToToken(key, data)\n self.assertRaises(ValueError, tokenToData, createKey(), token=token)", "def __init__(self):\n\n # dictionary that link all the invalid equation code to the\n # corresponding error massage\n self.ERROR_MASSAGE_DIC = {\n InvalidEquationCode.VALID: \"Valid\",\n InvalidEquationCode.UNDEFINED_CHARACTERS:\n \"Undefined characters in your equation\",\n InvalidEquationCode.CLOSE_WITHOUT_OPEN_BRACKET:\n \"Close bracket without open one\",\n InvalidEquationCode.EMPTY_EQUATION:\n \"Empty equation\",\n InvalidEquationCode.TOO_MANY_OPEN_BRACKET:\n \"Too many open brackets...(missing close brackets)\",\n InvalidEquationCode.OPERATORS_OPERANDS_ERROR:\n \"Missing operators/operands..\",\n InvalidEquationCode.TOO_MANY_DOTS:\n \"Too many dots in one number\",\n InvalidEquationCode.UNNECESSARY_BRACKET:\n \"Unnecessary brackets in your equation\",\n InvalidEquationCode.DIVISION_BY_ZERO:\n \"Division by zero is undefined\",\n InvalidEquationCode.FACTORIAL_ON_NEGATIVE_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.FACTORIAL_ON_DECIMAL_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.NUMBER_TOO_BIG: \"Number is too big\",\n InvalidEquationCode.COMPLEX_NUMBER: \"Complex number\",\n InvalidEquationCode.EMPTY_DECIMAL_POINT:\n \"Empty decimal point....(missing number)\",\n }", "def create_error(test, time, error):\n info = _TestInfo(test, time)\n info._error = error\n return info", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def decode(cls, data: bytes):\n\n error_code, = struct.unpack(Protocol.Formats.ERROR_CODE_FORMAT, data)\n return cls(error_code=error_code)", "def _validate_create_data(self, data):\n return", "def test_150(self):\n self.assertRaises(\n exceptions.DataONEExceptionException, exceptions.deserialize,\n INVALID_ERROR_DOC[0]\n )", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def test_create_data_lookup_error(self):\n with self.assertRaises(LookupError):\n _ = create_data({\"name\": \"fake_data\"})", "def test_deserialize_with_bad_data(self):\n pet = Pet(0)\n self.assertRaises(DataValidationError, pet.deserialize, \"string data\")", "def error(self, key, value, context, errorclass=InvalidDataError, **values):\n msg_template = self.message_for_key(key, context)\n raise errorclass(msg_template % values, value, key=key, context=context)", "def __init__(self, datatype, stage=\"\", context=\"\"):\n filler = \"unspecified\"\n if isinstance(datatype, str):\n typename = datatype\n else:\n try:\n typename = datatype.__name__\n except AttributeError:\n typename = str(datatype)\n explanation = \"Error creating {dt}; stage: {s}; context: {c}\".\\\n format(dt=typename, s=stage or filler, c=context or filler)\n super(ModelConstructionException, self).__init__(explanation)", "def invalid_scalar(data):\n return object.__new__(object)", "def __init__(self, obj, path, notes=()):\n format_dict = {'index': path[-1], 'object_name': obj._name}\n message = (\"Invalid entry found in '{object_name}' at index, '{index}'\"\n .format(**format_dict))\n note = \"It's invalid because it doesn't contain a valid 'type' value.\"\n notes = [note] + list(notes)\n super(PlotlyDataTypeError, self).__init__(\n message=message, path=path, notes=notes\n )", "def __init__(self, *args):\n this = _ida_hexrays.new_vd_failure_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_validate_on_invalid_data_type(self):\n args = (self.bytes_a, 'invalid')\n self.assertRaises(TypeError, objects.OpaqueObject, *args)", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))", "def test_init_value_error(self):\n data = [[0, 0], [0, 0], [0, 0]]\n with self.assertRaises(ValueError):\n Board(data)", "def invalid(self):\n pass", "def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def getValue(self, data):\r\n raise Exception(\"Exception via Error Parameter\")", "def test_construct_with_invalid(self):\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 100, 500, 50) # xmin > xmax\r\n\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 100, 200, 500) # ymin > ymax\r\n\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 100, 300, 500) # ymin == ymax\r\n\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 500, 400, 500) # ymin == ymax\r\n\r\n # unconvertalbe string\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(\"GG\", 500, 400, 500)", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def validate(cls, data, errors):", "def test_invalid_instantiation(invalid_instance):\n with pytest.raises(ValueError):\n invalid_instance()", "async def test_create_invalid_field(self):\n data = {'id': 33, 'value': 'foo'}\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.create(data)\n self.assertEqual(\n 'Error: \"id\": 33 is not a string: {\\'id\\': \\'\\'}',\n str(cm.exception))", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def test_parse_obj_invalid(self) -> None:\n with pytest.raises(ValidationError):\n RunwayTestDefinition.parse_obj({\"type\": \"invalid\"})", "def test_validate_on_invalid_value(self):\n args = (0, enums.OpaqueDataType.NONE)\n self.assertRaises(TypeError, objects.OpaqueObject, *args)", "def test_value_init12(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(10, 1, 17, -9)\n msg = \"y must be >= 0\"\n self.assertEqual(str(err.exception), msg)", "def checkError(self, data):\n if data and ('error' in data):\n e = T411Error(data['code'], data['error'])\n T411.log.error(str(e))\n # Error 201 = Token has expired\n # Error 202 = Invalid token\n if e.code in [201, 202]:\n self.headers[T411.authentication_header] = None\n self.token_timestamp = None\n raise e", "def __init__(self, message):\n ModelException.__init__(self, message)", "def __init__(self,\n type_id: int,\n data,\n data_type: DataType = DataType.AUTODETECT,\n length=-1):\n if type_id < 0 or 255 < type_id:\n raise ValueError('The type_id parameter must between 0 and 255 but is {val}'.format(val=type_id))\n self.type_id = type_id\n self.data_type = data_type\n self.data = data\n self.length = length", "def __init__(self, *args):\n this = _ida_hexrays.new_hexrays_failure_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )", "def test_error(cls, err, data):\n do_error_test(cls, err, data)", "def create_exception(self, msg: str):", "def test_value_init10(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(10, 2, 3, -1)\n msg = \"y must be >= 0\"\n self.assertEqual(str(err.exception), msg)", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def __new__(cls, player, data):\n try:\n reason_object = data[LAVALINK_KEY_EXCEPTION_REASON_OBJECT]\n except KeyError:\n reason = data[LAVALINK_KEY_EXCEPTION_REASON_DEPRECATED]\n severity = ''\n else:\n reason = reason_object[LAVALINK_KEY_EXCEPTION_REASON_OBJECT_MESSAGE]\n severity = reason_object[LAVALINK_KEY_EXCEPTION_REASON_OBJECT_SEVERITY]\n \n track = player.get_current()\n \n self = object.__new__(cls)\n self.player = player\n self.track = track\n self.reason = reason\n self.severity = severity\n return self", "def test_002_init(self):\n self.assertRaises(TypeError,rawdata.rawdata,\"id\")", "def __init__(self, *args):\n this = _libsbml.new_XMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def received_error(self, data: Data, source: tuple, destination: tuple):\n pass", "def test_call_incompatible_data(self):\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm,\r\n self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview)", "def __init__(self, error: List[Error] = None, info: Dict = None):\n self.error = error if error is not None else []\n self.info = info if info is not None else {}\n self.is_valid = True", "def __init__(self,data_name):\n\t\tif data_name.lower().strip() not in DATASETS.keys():\n\t\t\tprint(f\"{data_name} isn't a valid data name! One of \"+\", \".join(DATASETS.keys()))\n\t\t\traise Exception\n\n\t\tself.data_name = data_name.lower().strip()", "def odata_error(self, request, environ, start_response, sub_code,\n message='', code=400):\n response_headers = []\n e = core.Error(None)\n e.add_child(core.Code).set_value(sub_code)\n e.add_child(core.Message).set_value(message)\n response_type = self.content_negotiation(\n request, environ, self.ErrorTypes)\n if response_type is None:\n # this is an error response, default to text/plain anyway\n response_type = params.MediaType.from_str(\n 'text/plain; charset=utf-8')\n elif response_type == \"application/atom+xml\":\n # even if you didn't ask for it, you get application/xml in this\n # case\n response_type = \"application/xml\"\n if response_type == \"application/json\":\n data = str(''.join(e.generate_std_error_json()))\n else:\n data = str(e)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (code, sub_code), response_headers)\n return [data]", "def test_with_invalid_input(self):\n for dataset_type in ['ruler', 'pencil', 'cheese']:\n with self.assertRaises(ValueError) as exc:\n check_dataset_type(dataset_type)\n self.assertEqual(\"Dataset type not 'regular' or 'raw' is %s\" % dataset_type,\n str(exc.exception))", "def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')", "def test_load__fail_malformed_eth_address():\n with pytest.raises(InvalidAddress):\n ContractHandler._load(\"DTFactory\", \"foo address\")", "def cursor_error(cls, val):\n return cls('cursor_error', val)", "def cursor_error(cls, val):\n return cls('cursor_error', val)", "def __init__(self, buff):\n fmt = 'h'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n self.error_code = response[0]", "def __init__(self, buff):\n fmt = 'h'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n self.error_code = response[0]", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Data requested from a class could not be found: {}\".format(message)\n )", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def test_value_init11(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(10, 2, {})\n msg = \"x must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def test_validate_on_invalid_name(self):\n args = (self.bytes_a, enums.OpaqueDataType.NONE)\n kwargs = {'name': 0}\n self.assertRaises(TypeError, objects.OpaqueObject, *args, **kwargs)", "def test_001_init(self):\n self.assertRaises(TypeError,rawdata.rawdata)", "def test_create_error(self):\n metadata_dict = {\n '1.SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status_id': 1,\n 'str_column': 'Value for sample 1'},\n '1.SKD8.640184': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status_id': 1,\n 'str_column': 'Value for sample 2'},\n '1.SKB7.640196': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status_id': 1,\n 'str_column': 'Value for sample 3'}\n }\n metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')\n with self.assertRaises(QiitaDBColumnError):\n PrepTemplate.create(metadata, self.new_raw_data, self.test_study,\n self.data_type)", "def raise_on_invalid(self) -> None:\n if not self.is_valid:\n raise InvalidDataFrameError(self.report)", "def __init__(self, error, status_code, *args, **kwargs):\n super(BusinessException, self).__init__(*args, **kwargs)\n self.error = error\n self.status_code = status_code", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def test_invalid_dataset():\n train = ((\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train)", "def make_error(self, key: str, **kwargs) -> ValidationError:\n try:\n msg = self.error_messages[key]\n except KeyError as error:\n class_name = self.__class__.__name__\n message = (\n \"ValidationError raised by `{class_name}`, but error key `{key}` does \"\n \"not exist in the `error_messages` dictionary.\"\n ).format(class_name=class_name, key=key)\n raise AssertionError(message) from error\n if isinstance(msg, (str, bytes)):\n msg = msg.format(**kwargs)\n return ValidationError(msg)", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate(2)", "def bad_data_fail(self, good_data, bad_data, message):\n self.add_fail(bad_data, message)\n self.update_fail(good_data, bad_data, message)", "def error(self, **data):\n template_specification = dict(mainContent=\"../error\", title=\"Error page\", data=data)\n template_specification = self._fill_user_specific_attributes(template_specification)\n return self.fill_default_attributes(template_specification)", "def test_bad_data_fail2(self):\n with self.assertRaises(ValueError):\n mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_corrupt_header.map'))", "def test_create_metric_using_invalid_type(self):\n with self.assertRaises(Exception) as context:\n pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n 7,\n self.counter_metric_data,\n )\n self.assertIn(\"Invalid metric_type\", str(context.exception))", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def factory(request):\n error_code = request.code\n error_mapping = {\n '204': DataCiteNoContentError,\n '400': DataCiteBadRequestError,\n '401': DataCiteUnauthorizedError,\n '403': DataCiteForbiddenError,\n '404': DataCiteNotFoundError,\n '410': DataCiteGoneError,\n '412': DataCitePreconditionError,\n }\n Err = error_mapping.get(error_code, DataCiteServerError)\n return Err(request)" ]
[ "0.7352192", "0.6801983", "0.65621036", "0.646361", "0.64199716", "0.6389275", "0.6367765", "0.63524276", "0.63038003", "0.6254848", "0.62355447", "0.6224742", "0.6218904", "0.6123961", "0.61025405", "0.6082676", "0.6062875", "0.6062328", "0.60509044", "0.6037686", "0.60182846", "0.60182846", "0.60182846", "0.60015196", "0.59960586", "0.5994591", "0.598619", "0.59776855", "0.59619975", "0.59614944", "0.5923164", "0.5917688", "0.59155625", "0.5904885", "0.5903577", "0.58825487", "0.58571273", "0.5851031", "0.5827277", "0.581406", "0.5803001", "0.578966", "0.57762766", "0.5748487", "0.57406855", "0.57316184", "0.572872", "0.57174516", "0.571492", "0.57098615", "0.5703827", "0.5702292", "0.5678932", "0.5676878", "0.5676304", "0.5673308", "0.5664906", "0.5662316", "0.5653677", "0.56486696", "0.5647726", "0.5636388", "0.56307065", "0.561446", "0.56132835", "0.56108665", "0.55982447", "0.5595402", "0.5591435", "0.5588055", "0.55879647", "0.55823016", "0.55758804", "0.55733985", "0.5568651", "0.5568171", "0.55609614", "0.55609614", "0.556023", "0.556023", "0.5559174", "0.55587476", "0.5554174", "0.55525434", "0.5549316", "0.55444694", "0.5533583", "0.5531237", "0.55304456", "0.5526552", "0.55240357", "0.5523062", "0.5519276", "0.5518253", "0.55128974", "0.5511179", "0.55073935", "0.55057454", "0.5503789", "0.5503468" ]
0.64582944
4
Instantiate a Missing Data Error.
def __init__(self, message: str) -> None: super().__init__( "Data requested from a class could not be found: {}".format(message) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def test_create_data_lookup_error(self):\n with self.assertRaises(LookupError):\n _ = create_data({\"name\": \"fake_data\"})", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def missing_information(self, info, field):\n raise NoData", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def from_data(cls, data):\n errors = data.get('errors')\n if not errors or len(errors) == 0:\n return cls('Unknown error!')\n\n # For simplicity, we'll just include the first error.\n err = errors[0]\n return cls(\n message=err.get('message'),\n code=err.get('code'),\n detail=err.get('detail'),\n )", "def test_constructor_invalid():\n with pytest.raises(TypeError, match='missing 1 required positional argument'):\n PseudoPotentialData() # pylint: disable=no-value-for-parameter", "def test_invalid_input_data(self):\n self.data.diffusion_data = self.data.diffusion_data[0]\n self.assertRaises(ValueError, module_05.run_module,\n self.data)", "def test_from_object_fail(self):\n class InvalidClass(object):\n pass\n Invalid_object = InvalidClass()\n with self.assertRaises(TypeError):\n BaseDataClass.from_object(Invalid_object)", "def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepSample('Not_a_Sample', self.prep_template)", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n Sample('Not_a_Sample', self.sample_template)", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate(2)", "def testConstructorValueError(self):\n test_cases = [\n 'these',\n 'are',\n 'bad',\n 'data',\n 'types',\n 'FILE',\n 'STRING',\n 'JSON',\n ]\n for bad_data_type in test_cases:\n with self.assertRaises(ValueError):\n ASCIITransportFormat(bad_data_type, '')", "def test_create_error(self):\n metadata_dict = {\n '1.SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status_id': 1,\n 'str_column': 'Value for sample 1'},\n '1.SKD8.640184': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status_id': 1,\n 'str_column': 'Value for sample 2'},\n '1.SKB7.640196': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status_id': 1,\n 'str_column': 'Value for sample 3'}\n }\n metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')\n with self.assertRaises(QiitaDBColumnError):\n PrepTemplate.create(metadata, self.new_raw_data, self.test_study,\n self.data_type)", "def test_create_a_recommendation_missing_data(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=None, relationship=Type.UP_SELL)\n self.assertRaises(DataValidationError,recommendation.create)", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def test_narrative_data_missing(self, config_handler):\n spec = Spec.from_dict(\n {\"name\": \"homogeneity_coefficient\", \"unit\": \"percentage\", \"dtype\": \"float\"}\n )\n with raises(SmifDataNotFoundError):\n config_handler.read_narrative_variant_data(\"does not exist\", spec)", "def testBadDataToToken(self):\n key = createKey()\n self.assertRaises(ValueError, dataToToken, key, data=self)", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def test_invalid_data_raises_error(self):\n with self.assertRaises(ValueError):\n PoincareModel([(\"a\", \"b\", \"c\")])\n with self.assertRaises(ValueError):\n PoincareModel([\"a\", \"b\", \"c\"])\n with self.assertRaises(ValueError):\n PoincareModel(\"ab\")", "async def test_create_missing_field(self):\n # the \"value\" field is missing\n data = {'id': 'foo'}\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.create(data)\n self.assertEqual(\n 'Error: \"value\": Required', str(cm.exception))", "def test_invalid_data_construction(self):\n with self.assertRaises(Exception):\n LongDecimalEuler(term=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(term=\"aaa\")\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=\"aaa\")", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate(2)", "def test_name_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_name('Missing ObservationType Name')", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def test_bad_data_fail2(self):\n with self.assertRaises(ValueError):\n mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_corrupt_header.map'))", "def __init__(self, *args):\n this = _ida_hexrays.new_vd_failure_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, datatype, stage=\"\", context=\"\"):\n filler = \"unspecified\"\n if isinstance(datatype, str):\n typename = datatype\n else:\n try:\n typename = datatype.__name__\n except AttributeError:\n typename = str(datatype)\n explanation = \"Error creating {dt}; stage: {s}; context: {c}\".\\\n format(dt=typename, s=stage or filler, c=context or filler)\n super(ModelConstructionException, self).__init__(explanation)", "def create_error(test, time, error):\n info = _TestInfo(test, time)\n info._error = error\n return info", "def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def test_001_init(self):\n self.assertRaises(TypeError,rawdata.rawdata)", "def error(self, **data):\n template_specification = dict(mainContent=\"../error\", title=\"Error page\", data=data)\n template_specification = self._fill_user_specific_attributes(template_specification)\n return self.fill_default_attributes(template_specification)", "def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)", "def test_missing_dataset_label():\n svl_string = \"HISTOGRAM X temperature\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_error_initialisation_from_xdmf_missing_label():\n with pytest.raises(ValueError, match=r\"label\"):\n festim.InitialCondition(value=\"my_file.xdmf\", label=None, time_step=1)", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def test_empty_input_data(self):\n self.data.diffusion_data = np.array([])\n self.assertRaises(ValueError, module_05.run_module,\n self.data)", "def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))", "def test_id_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_id(-1)", "def test_empty_source_constructor_exception():\n with pytest.raises(robox.RDJParameterErr):\n test01 = Source()", "def test_ensure_data_no_totalseconds(self):\n album = Album(artist='Artist', album='Album', totaltracks=2)\n with self.assertRaises(Exception):\n album.ensure_data()", "def test_002_init(self):\n self.assertRaises(TypeError,rawdata.rawdata,\"id\")", "def test_get_error_data_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def __init__(self, name):\n super(NodeExistsError, self).__init__(name)\n self.name = name", "def __init__(self,data_name):\n\t\tif data_name.lower().strip() not in DATASETS.keys():\n\t\t\tprint(f\"{data_name} isn't a valid data name! One of \"+\", \".join(DATASETS.keys()))\n\t\t\traise Exception\n\n\t\tself.data_name = data_name.lower().strip()", "def test_get_error_data_table_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(TypeError):\n knxipframe.init(23)", "def __init__(self, obj, path, notes=()):\n format_dict = {'index': path[-1], 'object_name': obj._name}\n message = (\"Invalid entry found in '{object_name}' at index, '{index}'\"\n .format(**format_dict))\n note = \"It's invalid because it doesn't contain a valid 'type' value.\"\n notes = [note] + list(notes)\n super(PlotlyDataTypeError, self).__init__(\n message=message, path=path, notes=notes\n )", "def test_cannot_instantiate(self):\n with self.assertRaises(TypeError):\n Distribution()", "def __init__(self, message):\n ModelException.__init__(self, message)", "def test_init_value_error(self):\n data = [[0, 0], [0, 0], [0, 0]]\n with self.assertRaises(ValueError):\n Board(data)", "def test_create_error_template_special(self):\n metadata_dict = {\n '1.SKB8.640193': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 1',\n 'barcodesequence': 'GTCCGCAAGTTA'},\n '1.SKD8.640184': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 2',\n 'barcodesequence': 'CGTAGAGCTCTC'},\n '1.SKB7.640196': {'center_name': 'ANL',\n 'center_project_name': 'Test Project',\n 'ebi_submission_accession': None,\n 'EMP_status': 'EMP',\n 'str_column': 'Value for sample 3',\n 'barcodesequence': 'CCTCTGAGAGCT'}\n }\n metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')\n with self.assertRaises(QiitaDBColumnError):\n PrepTemplate.create(metadata, self.new_raw_data, self.test_study,\n self.data_type)", "def testUidMissingError(self):\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', None,\n '4.5.6')\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', 'stuid',\n None, '7.8.9')", "def initialize(self):\n self.data = None\n self.errors = []", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def test_150(self):\n self.assertRaises(\n exceptions.DataONEExceptionException, exceptions.deserialize,\n INVALID_ERROR_DOC[0]\n )", "def test_init_throws_missing_argument_exception(self):\n with self.assertRaises(Exception) as ex:\n MarkerId() # trying to create MarketId objectand waits for Exception\n\n self.getLogger().warning(\"Exception: %s\", ex.exception)", "def test_no_data(self):\n self.assertRaises(NoDataError, lambda: GroupLinearRegression([], []))", "def test_bad_data(self):\n # bad data file has:\n # 1 bad status\n # particle A has bad timestamp\n # particle B has bad dark fit\n # particle C has bad frame type\n # particle D has bad year\n stream_handle = open(os.path.join(RESOURCE_PATH,\n 'bad_SNA_SNA.txt'), MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n # get E, since it is first it will generate a metadata\n particles = self.parser.get_records(2)\n\n # check all the values against expected results.\n self.assert_particles(particles, 'last_and_meta_SNA_recov.yml', RESOURCE_PATH)\n\n # should have had 5 exceptions by now\n self.assertEqual(len(self.exception_callback_value), 5)\n\n for exception in self.exception_callback_value:\n self.assert_(isinstance(exception, RecoverableSampleException))", "def test_error(self):\n metric = self.metric()\n measurement = self.measurement(metric, sources=[self.source(metric, parse_error=\"error\")])\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def error(self):\n pass", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def test_error_initialisation_from_xdmf_missing_time_step():\n\n with pytest.raises(ValueError, match=r\"time_step\"):\n festim.InitialCondition(value=\"my_file.xdmf\", label=\"my_label\", time_step=None)", "def test_creation_throws_error_on_missing_fields(self, test_domain):\n with pytest.raises(ValidationError) as err:\n test_domain.repository_for(Person)._dao.create(last_name=\"Doe\")\n\n assert err.value.messages == {\"first_name\": [\"is required\"]}", "def _validate_create_data(self, data):\n return", "def test_create_investigation_type_error(self):\n with self.assertRaises(QiitaDBColumnError):\n PrepTemplate.create(self.metadata, self.new_raw_data,\n self.test_study, self.data_type_id,\n 'Not a term')", "def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )", "def __init__(self, error: List[Error] = None, info: Dict = None):\n self.error = error if error is not None else []\n self.info = info if info is not None else {}\n self.is_valid = True", "def test_call_incompatible_data(self):\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm,\r\n self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview)", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_bad_data_fail3(self):\n with self.assertRaises(ValueError):\n # create a map file with a header larger than 1024 to see the exception\n map = mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.map'))\n for i in range(map._nlabl):\n label = getattr(map, '_label_{}'.format(i))\n y = 11\n for j in range(1, y):\n setattr(map, '_label_{}'.format(j), label)\n map._nlabl = y\n with open('rm.map', 'w') as f:\n map.write(f)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def test_ensure_data_no_totaltracks(self):\n album = Album(artist='Artist', album='Album', totalseconds=120)\n with self.assertRaises(Exception):\n album.ensure_data()", "def __init__(self, code, reason):\n super(RequestError, self).__init__(code, reason)", "def __init__(self):\n raise", "def test_incorrect_creation_1(input_data, exact):\n with pytest.raises(ValueError):\n TaxCalcIO(input_data=input_data,\n tax_year=2013,\n reform=None,\n assump=None,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=exact)", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def test_003_init(self):\n self.assertRaises(TypeError,rawdata.rawdata,\"id\",testRawdata.ioc)", "def test_missing_dataset_definition():\n svl_string = \"\"\"\n DATASETS\n bigfoot\n BAR bigfoot\n X classification\n Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_init(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n MetadataTemplate(1)", "def __init__(self, data: dict):\n # Check if all elements are in the passed dict, else raise an Error\n if any(k not in data for k in [\"locations\", \"info\"]):\n raise DIRECTVError(\n \"DirecTV data is incomplete, cannot construct device object\"\n )\n self.update_from_dict(data)", "def __init__(self, message, fatal, error_num=None):\n Exception.__init__(self, message)\n self.fatal = fatal\n self.errno = error_num", "def test_invalid_value(self):\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.length('25a', LENGTH_KILOMETERS)\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.temperature('50K', TEMP_CELSIUS)", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def test_create_metric_using_invalid_type(self):\n with self.assertRaises(Exception) as context:\n pmp.utils.create_metric_family(\n self.counter_metric_name,\n self.counter_metric_help,\n 7,\n self.counter_metric_data,\n )\n self.assertIn(\"Invalid metric_type\", str(context.exception))", "def missing(self, value):\n self.MISSING = value", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_exc(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n with pytest.raises(TypeError):\n grp.create_dataset('foo', (10,), dtype=\"float32\", fillvalue={\"a\": 2})", "def __init__(self, *args):\n this = _ida_hexrays.new_hexrays_failure_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def test_required_dataset_missing(self):\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n bar_inst1._Bar__data = None # make data dataset None\n\n msg = \"Bar 'my_bar1' is missing required value for attribute 'data'.\"\n with self.assertWarnsWith(MissingRequiredBuildWarning, msg):\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n )\n self.assertBuilderEqual(expected, builder)" ]
[ "0.7335149", "0.65505475", "0.65356326", "0.62742686", "0.62742686", "0.62742686", "0.6204105", "0.6189645", "0.6147461", "0.6143845", "0.6142353", "0.6132027", "0.60576224", "0.60308665", "0.6008944", "0.5981049", "0.5965471", "0.5946241", "0.5938257", "0.59336597", "0.5889658", "0.5877268", "0.58754295", "0.5857168", "0.58362436", "0.5835265", "0.58256334", "0.5819807", "0.58162624", "0.58080584", "0.5774647", "0.5765732", "0.5738157", "0.57305425", "0.5713041", "0.57113254", "0.57106453", "0.5710531", "0.57051", "0.5700688", "0.57001287", "0.5698023", "0.56896186", "0.56884414", "0.5686954", "0.5661931", "0.5658174", "0.5652455", "0.5646198", "0.56435394", "0.56370056", "0.5636277", "0.5623208", "0.56225264", "0.56213534", "0.56209534", "0.56178945", "0.5612639", "0.5610787", "0.5608723", "0.55923414", "0.5590561", "0.5587113", "0.55804586", "0.556758", "0.5561555", "0.5561476", "0.55532926", "0.5547425", "0.5547053", "0.5545752", "0.5542812", "0.5534859", "0.55295974", "0.55218107", "0.55202407", "0.55147207", "0.55127543", "0.55114406", "0.55003476", "0.5498887", "0.5480965", "0.54804313", "0.54789585", "0.5474846", "0.54722786", "0.54701823", "0.5465587", "0.54581165", "0.5455733", "0.5452783", "0.5450874", "0.54426146", "0.54359823", "0.543441", "0.543312", "0.5427322", "0.5421449", "0.5414431", "0.5411322" ]
0.60894585
12
Instantiate a resolution mismatch error.
def __init__(self, message: str) -> None: super().__init__( "Due to the nature of the load data, an integer multiple of resolutions, " "or divsions of resolutions, must be supplied with the '--resolution' or " "'-r' flag.\nI appreciate that this is poor coding, but at least I took " "the time to write a custom exception for it :p .\n Error message: " f"{message}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name, pattern_factory):\n\t\tsuper(AlreadyRegisteredError, self).__init__(name, pattern_factory)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def test_cannot_instantiate(self):\n with self.assertRaises(TypeError):\n Distribution()", "def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')", "def resolve_failure(self):\n\t\tpass", "def __init__(self, *args):\n this = _ida_hexrays.new_vd_failure_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, uri):\n super(ResolutionError, self).__init__(\n \"{0} is not an absolute URI.\".format(uri.unsplit()))", "def _create_violation_error(contract: Contract, resolved_kwargs: Mapping[str, Any]) -> BaseException:\n exception = None # type: Optional[BaseException]\n\n if contract.error is None:\n try:\n msg = icontract._represent.generate_message(contract=contract, resolved_kwargs=resolved_kwargs)\n except Exception as err:\n parts = [\"Failed to recompute the values of the contract condition:\\n\"]\n if contract.location is not None:\n parts.append(\"{}:\\n\".format(contract.location))\n\n if contract.description is not None:\n parts.append(\"{}: \".format(contract.description))\n\n parts.append(icontract._represent.represent_condition(condition=contract.condition))\n\n raise RuntimeError(''.join(parts)) from err\n\n exception = ViolationError(msg)\n elif inspect.ismethod(contract.error) or inspect.isfunction(contract.error):\n assert contract.error_arg_set is not None, (\"Expected error_arg_set non-None if contract.error a function.\")\n assert contract.error_args is not None, (\"Expected error_args non-None if contract.error a function.\")\n\n error_kwargs = select_error_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)\n\n exception = cast(BaseException, contract.error(**error_kwargs))\n\n if not isinstance(exception, BaseException):\n raise TypeError(\n \"The exception returned by the contract's error {} does not inherit from BaseException.\".format(\n contract.error))\n elif isinstance(contract.error, type):\n if not issubclass(contract.error, BaseException):\n raise TypeError(\n \"The exception class supplied in the contract's error {} is not a subclass of BaseException.\".format(\n contract.error))\n\n msg = icontract._represent.generate_message(contract=contract, resolved_kwargs=resolved_kwargs)\n exception = contract.error(msg)\n elif isinstance(contract.error, BaseException):\n exception = contract.error\n else:\n raise NotImplementedError(\n (\"icontract does not know how to handle the error of type {} \"\n \"(expected a function, a subclass of BaseException or an instance of BaseException)\").format(\n type(contract.error)))\n\n assert exception is not None\n return exception", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def test_constructor_wrong_options(self):\n with self.assertRaises(TypeError):\n base = BaseExporter(something_else=6)", "def test_invalid_instantiation(invalid_instance):\n with pytest.raises(ValueError):\n invalid_instance()", "def test_class_errored(self, cls, exception):", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(TypeError):\n knxipframe.init(23)", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def __init__(self, *args):\n this = _ida_hexrays.new_hexrays_failure_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_lookup_exception(self):\n self.assertIsInstance(BuildGraph.TransitiveLookupError(), AddressLookupError)", "def test_from_object_fail(self):\n class InvalidClass(object):\n pass\n Invalid_object = InvalidClass()\n with self.assertRaises(TypeError):\n BaseDataClass.from_object(Invalid_object)", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def create_error(test, time, error):\n info = _TestInfo(test, time)\n info._error = error\n return info", "def __init__(self, msg):\n super(CpoSolverException, self).__init__(msg)", "def __init__(self, *args):\n this = _libsbml.new_XMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def test_instantiate_7():\n with raises(ValueError):\n FixedPoint(1.5, 'Q20.204')", "def __init__(self, message, repl):\n super(LinterFailure, self).__init__()\n self.message = message\n self.replacement = repl", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def test_DistanceMatrices_setter_mismatched_labels(self):\r\n mismatch = DistanceMatrix(array([[0]]), ['s2'])\r\n\r\n self.assertRaises(ValueError, setattr, self.cs, 'DistanceMatrices',\r\n [self.single_ele_dm, mismatch])\r\n # Also test that constructor raises this error.\r\n self.assertRaises(ValueError, CorrelationStats, [self.single_ele_dm,\r\n mismatch])", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate(2)", "def create_exception(self, msg: str):", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate(2)", "def test_init_wrong_ap_type(self):\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\",\n ip=\"3.3.3.3\", ap_info=ap_wrong)", "def test_create_unexpected_problem(self):\n pass", "def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def test_duplicate_names_fail(self):\n name = 'some_name'\n instance_types.create(name, 256, 1, 120, 200, 'flavor1')\n self.assertRaises(exception.InstanceTypeExists,\n instance_types.create,\n name, 256, 1, 120, 200, 'flavor2')", "def test_invalid_type_cr_name(self):\n self.assertRaises(QISKitError, ClassicalRegister, size=3, name=1)", "def test_init_invalid_retire_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date='invalid')", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def _TypeMismatch(a, b):\n return 'Types do not match, %s v. %s' % (str(a), str(b))", "def failure(self: _UnwrappableType) -> _SecondType:", "def test_component_loading_instantiation_exception(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=AEAInstantiationException(\"Generic exception\"),\n ):\n with pytest.raises(AEAInstantiationException):\n load_component_from_config(component_configuration)", "def __init__(self, errors):\n strerrors = \"\\n - \".join(errors)\n text = tr(\n \"Application error occurred on secondary appliance. \"\n \"Please read logs on the secondary appliance.\"\n )\n HAError.__init__(\n self,\n SECONDARY_FAILED_TO_APPLY,\n \"%s\\n - %s\" % (text, strerrors)\n )", "def test_init_Error(self):\r\n with self.assertRaises(ValueError):\r\n int.IDWInterpolator(-1)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def test_create_data_lookup_error(self):\n with self.assertRaises(LookupError):\n _ = create_data({\"name\": \"fake_data\"})", "def __init__(self, real_path, first_path, second_path):\n\t\tsuper(RecursionError, self).__init__(real_path, first_path, second_path)", "def unexpected_error(self, exception):", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with pytest.raises(AttributeError):\n knxipframe.init(23)\n\n with pytest.raises(CouldNotParseKNXIP):\n # this is not yet implemented in xknx\n knxipframe.init(KNXIPServiceType.SEARCH_REQUEST_EXTENDED)", "def test_empty_source_constructor_exception():\n with pytest.raises(robox.RDJParameterErr):\n test01 = Source()", "def __init__(self, port):\n super(InvalidPort, self).__init__(\n 'The port (\"{0}\") is not valid.'.format(port))", "def __init__(self, name):\n super(NodeExistsError, self).__init__(name)\n self.name = name", "def from_d(d):\n return SMRTServiceBaseError(\n d['httpCode'], d['errorType'], d['message'])", "def __init__(self):\n raise Exception(\"Cannot create this object\")", "def __init__(self):\n raise Exception(\"Cannot create this object\")", "def test_type(self):\n assert issubclass(Error, Exception)\n assert Error.__name__ == \"Error\"", "def testRaisesErrorValueMismatch(self):\n c = Simulation(logging_level=logging.CRITICAL)\n c.set_simulation_parameters(\n seed=4,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse_zeros.tif\",\n dispersal_map=\"sample/dispersal_fine_cumulative.tif\",\n )\n with self.assertRaises(RuntimeError):\n c.run()", "def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)", "def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def __init__(self, msg=None, wrapped=None):\n if not msg:\n name= None\n if wrapped:\n msg = \"Access error due to %s: %s\" % \\\n (self._excname(wrapped), str(wrapped))\n else:\n msg = \"Unknown error during blackboard access\"\n \n Exception.__init__(self, msg)\n\n # the wrapped exception\n self.wrapped = None", "def test__init__raise_exception(self):\n self.assertRaises(TypeError, MasterNodeInterface)", "def test_base_class_expection():\n with pytest.raises(TypeError):\n cardinal.CardinalPoints()", "def test_prevent_wrong_type(self):\n self.assertRaises(cinv.host.Error, self.wrong_host_type)", "def __init__(self, *args):\n this = _libsbml.new_SBMLErrorLog(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_value_init11(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(10, 2, {})\n msg = \"x must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_invalid_type_circuit_name(self):\n qr = QuantumRegister(size=3)\n cr = ClassicalRegister(size=3)\n self.assertRaises(QISKitError, QuantumCircuit, qr, cr, name=1)", "def test_get_other_typeerror_2(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, '1')", "def test_broken_module(self):\r\n module = self.descriptor._xmodule\r\n self.assertIsInstance(module, ErrorModule)", "def test_constructor_wrong_parameter_type(self):\n\n for invalid in (None, 1):\n with self.assertRaises(TypeError):\n group_tr = OCIO.FixedFunctionTransform(invalid)", "def test_errors(self):\n DifferentRangePassage = self.text.getPassage(\n MyCapytain.common.reference.Reference(\"1.pr.2-1.2\")\n )\n with self.assertRaises(MyCapytain.errors.InvalidSiblingRequest, msg=\"Different range passage have no siblings\"):\n a = DifferentRangePassage.next\n\n with self.assertRaises(MyCapytain.errors.InvalidSiblingRequest, msg=\"Different range passage have no siblings\"):\n a = DifferentRangePassage.prev", "def test_constructor_invalid():\n with pytest.raises(TypeError, match='missing 1 required positional argument'):\n PseudoPotentialData() # pylint: disable=no-value-for-parameter", "def test_errorfornoarguments(self):\n Square.reset_objects()\n with self.assertRaises(TypeError) as e:\n s1 = Square()\n self.assertEqual(\n str(e.exception),\n \"__init__() missing 1 required positional argument: 'size'\")", "def test_get_other_typeerror(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, 3.4)", "def test_trestle_not_found_error() -> None:\n msg = 'Custom not found error'\n try:\n raise TrestleNotFoundError(msg)\n except TrestleNotFoundError as err:\n assert str(err) == msg\n assert err.msg == msg", "def test_registering_an_instance_as_concrete_is_exception():\n container = Container()\n writer = MessageWriter()\n\n with pytest.raises(InvalidRegistrationError):\n container.register(writer)", "def test_declare_error(self):\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n pass\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n _id = IDField()\n _id_2 = IDField()", "def test_value_init17(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(1, 2, 3, \"hi\")\n msg = \"y must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_load_raise_constructor_error(\n self, mocker: MockerFixture, tmp_path: Path\n ) -> None:\n config = Mock(load=Mock(side_effect=ConstructorError(problem=\"something else\")))\n get_config = mocker.patch.object(CFNgin, \"_get_config\", return_value=config)\n with pytest.raises(ConstructorError, match=\"something else\"):\n assert CFNgin(ctx=self.get_context(), sys_path=tmp_path).load(tmp_path)\n get_config.assert_called_once_with(tmp_path)", "def __init__(self):\n raise", "def test_check_if_error_two(self):\n with self.assertRaises(MyError):\n OtherErrorsChecking(OK_RETURN_CODE, ERROR_MESSAGE_HOST2) \\\n .check_if_error()", "def test_create_bad_layout(self):\n with self.assertRaises(AssertionError):\n layout = Layout(width=10, height=5)", "def test_make_plot_invalid_plot_type(self):\n print(sys._getframe().f_code.co_name)\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n self.assertRaises(Exception,pp.make_plot,x,y,plot_type='wrong',msg='Invalid plot type')", "def test_construct_with_invalid(self):\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 100, 500, 50) # xmin > xmax\r\n\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 100, 200, 500) # ymin > ymax\r\n\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 100, 300, 500) # ymin == ymax\r\n\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(300, 500, 400, 500) # ymin == ymax\r\n\r\n # unconvertalbe string\r\n with pytest.raises(ValueError):\r\n rect = Rectangle(\"GG\", 500, 400, 500)", "def test_failure(t):\n objmap = ObjectMap({}, modname=\"py.module.name\", classname=\"ClassName\")\n ret = _create_object(objmap)\n t.assertIsNone(ret)", "def test_controller_status_from_knx_wrong_value(self):\n with pytest.raises(ConversionError):\n DPTControllerStatus.from_knx((1, 2))", "def __init__(self, datatype, stage=\"\", context=\"\"):\n filler = \"unspecified\"\n if isinstance(datatype, str):\n typename = datatype\n else:\n try:\n typename = datatype.__name__\n except AttributeError:\n typename = str(datatype)\n explanation = \"Error creating {dt}; stage: {s}; context: {c}\".\\\n format(dt=typename, s=stage or filler, c=context or filler)\n super(ModelConstructionException, self).__init__(explanation)", "def test_str6(self):\n with self.assertRaises(TypeError):\n r6 = Rectangle(\"hey\", \"there\")", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def test_error_on_incorrect_version(self):\n config = dict(nodes={}, version=2)\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._deployment_from_configuration,\n config, set())\n self.assertEqual(\n \"Deployment configuration has an error. \"\n \"Incorrect version specified.\",\n exception.message\n )", "def test_runtime_errors(self, graph_entry_class):\n graph_entry_class.return_value.state = \"Pending\"\n graph_entry_class.return_value.path = \"foo/app1\"\n graph_entry_class.return_value.execute.return_value = (0, ['Failure'], False)\n\n graph = ApplyGraph('plan', self.graph, self.post_graph, \"foo\")\n\n self.assertRaises(RuntimeError, graph.execute_graph())\n self.assertRaises(RuntimeError, graph.execute_post_graph())", "def test_simple_source_constructor_exception():\n TESTPATH = \"/usr/local/share/testfile.mp3\"\n with pytest.raises(robox.RDJResourceErr):\n test01 = Source(path=TESTPATH, exist=True)", "def test_cclerror_not_equal():\n e = pyccl.CCLError(\"blah\")\n e2 = pyccl.CCLError(\"blahh\")\n assert e is not e2\n assert e != e2\n assert hash(e) != hash(e2)", "def test_verify_dicom_instance_exception(\n mock_engine: DicomImagePiiVerifyEngine,\n get_mock_dicom_instance: pydicom.dataset.FileDataset,\n):\n with pytest.raises(Exception) as exc_info:\n # Arrange\n padding_width = 25\n test_instance = deepcopy(get_mock_dicom_instance)\n del test_instance.PixelData\n expected_error_type = AttributeError\n\n # Act\n _, _, _ = mock_engine.verify_dicom_instance(test_instance, padding_width)\n\n # Assert\n assert expected_error_type == exc_info.typename", "def _softwareInstanceError(self, compute_node_id,\n compute_partition_id, error_log=\"\"):\n instance = self._getSoftwareInstanceForComputePartition(\n compute_node_id,\n compute_partition_id)\n \n if error_log is None:\n error_log = \"\"\n instance.setErrorStatus(\n 'while instanciating: %s' % error_log[-80:], reindex=1)" ]
[ "0.64293504", "0.62001663", "0.62001663", "0.62001663", "0.61242396", "0.61132437", "0.60633713", "0.5971559", "0.589876", "0.5896457", "0.58818203", "0.587032", "0.5862494", "0.5797569", "0.57908887", "0.57661104", "0.57357174", "0.57350785", "0.57019705", "0.5677138", "0.5672282", "0.5670192", "0.56640035", "0.5646662", "0.56457627", "0.5644268", "0.5626649", "0.5623513", "0.56187963", "0.55825484", "0.5564232", "0.55636674", "0.55601734", "0.55432916", "0.55341315", "0.55331564", "0.55271864", "0.5526699", "0.55139947", "0.55045515", "0.54986334", "0.5483997", "0.5477328", "0.54716504", "0.5465817", "0.54633325", "0.5461417", "0.54599917", "0.54523754", "0.5447538", "0.5442357", "0.54398274", "0.5434503", "0.5402393", "0.53992766", "0.5389835", "0.5381656", "0.53728944", "0.5372508", "0.5365087", "0.5365087", "0.535898", "0.53488773", "0.53456736", "0.5334179", "0.53340805", "0.5327211", "0.5317975", "0.53124917", "0.5309729", "0.53064525", "0.5304895", "0.52976656", "0.52868146", "0.5286438", "0.52854365", "0.528234", "0.52815944", "0.5279867", "0.5278234", "0.52779496", "0.5276091", "0.5265581", "0.52622527", "0.5260527", "0.5258145", "0.52524894", "0.5251485", "0.5248653", "0.524545", "0.52424365", "0.52367586", "0.52334017", "0.52322274", "0.523219", "0.52315825", "0.5231365", "0.5227205", "0.5225923", "0.5223959" ]
0.5793322
14
Instantiate the daily profile class..
def __init__(self, profile: Dict[datetime.time, float] = None) -> None: if profile is None: profile = dict() if not isinstance(profile, dict): raise ProgrammerJudgementFault( "The input daily profile provided is not a mapping of the correct type." ) self._profile = profile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name):\n self.__username = name\n self.__startDate = datetime.now().date().today() # This attributes will not be change ever once it has been initialized.", "def __init__(self, dt=60*60*24):\n pass", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def __init__(self):\n super(Profile, self).__init__()", "def __init__(self, period=None, date=None):\r\n self.period = period\r\n self.date = date", "def __init__(self, profile: AskarProfile):\n self._profile = profile", "def __init__(self):\n self.users = {}\n self.tweetTime = {}\n self.recentMax = 0\n self.time = 0", "def __init__(self, subject_id, gender, dob, dod, dod_hosp, dod_ssn, expire_flag):\n self.id = int(subject_id)\n self.gender = gender\n self.dob = utils.convert_to_date_time_object(dob)\n self.dod = utils.convert_to_date_time_object(dod)\n self.dod_hosp = utils.convert_to_date_time_object(dod_hosp)\n self.dod_ssn = utils.convert_to_date_time_object(dod_ssn)\n self.expire_flag = expire_flag\n\n self.hospital_visits = {}\n self.num_of_hospital_visits = 0\n self.total_num_of_icu_stays = 0", "def __init__(self, day, hour, minute):\n self.day = day\n self.hour = hour\n self.minute = minute", "def __init__(self,\n day=None,\n end_time=None,\n start_time=None,\n ):\n\n # Initialize members of the class\n self.day = day\n self.end_time = end_time\n self.start_time = start_time", "def create_instance(self, date):\n raise NotImplementedError", "def __init__(self, year, month, day):", "def __init__(self, Date, TimeOfDay):\n self.date = Date\n self.time_of_day = TimeOfDay", "def __init__(self):\n\n from dateutil import tz\n\n # Get timezone descriptions\n self.UTC = tz.tzutc()\n self.LOCAL = tz.gettz(\"Europe/Berlin\")\n\n # Lookup FOOD event type_id\n table = current.s3db.dvr_case_event_type\n query = (table.code == \"FOOD\") & \\\n (table.deleted != True)\n row = current.db(query).select(table.id, limitby=(0, 1)).first()\n self.FOOD = row.id if row else None\n\n self.SURPLUS_MEALS = s3_str(current.T(\"Surplus Meals\"))", "def __init__(self):\n self.now = datetime.now()", "def __init__(self, month, day, year):", "def __init__(self):\r\n super(ProfileParser, self).__init__([self.ProfileEntryHandler()])", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.creation_date = datetime.now()", "def __init__(self):\n self.site = ('http://vortex.plymouth.edu/cgi-bin/gen_statlog-u.cgi')\n \"\"\"Root of URL to query for data.\"\"\"\n yesterday = datetime.today() - timedelta(days=1)\n self.year = yesterday.year\n \"\"\"Year to get data for.\"\"\"\n self.month = yesterday.month\n \"\"\"Month to get data for.\"\"\"\n self.day = yesterday.day\n \"\"\"Day to get data for.\"\"\"\n self.stns = dict(yvr=\"CYVR\",\n sandheads=\"CWVF\")\n \"\"\"Mapping of common station names to official station IDs.\"\"\"", "def setUpClass(cls):\n now = timezone.now()\n cls.expired_dt = now + timedelta(days=-10)\n cls.current_dt = now + timedelta(days=90)", "def __init__(self, rate, from_weekday, to_weekday, from_hour, to_hour):\n self.from_weekday = from_weekday\n self.to_weekday = to_weekday\n self.from_hour = from_hour\n self.to_hour = to_hour\n self.rate = rate", "def __init__(self, className, name, title=None, unit=None):\n self.className = className\n filename = os.path.join(DATA, name + '.csv')\n with open(filename,encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n for row_number, row in enumerate(reader):\n if row[1] == filename:\n continue\n if row[0] == '':\n self.title = row[1]\n continue\n if row[0] == 'unit':\n self.unit = row[1]\n continue\n try:\n datetime.strptime(row[0], \"%Y-%m-%d\")\n break\n except: ValueError\n super().__init__(name, title, unit)\n with open(filename,encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n for skip in range(row_number): # row_number is first data line\n next(reader)\n for row in reader:\n try:\n self.data[datetime.strptime(row[0], \"%Y-%m-%d\")]=float(row[1])\n except: ValueError\n self.first_date=min(self.data.keys())\n self.last_date=max(self.data.keys())", "def __init__(self, security_identifier, profile_path):\n super(UserProfile, self).__init__()\n self.profile_path = profile_path\n self.security_identifier = security_identifier", "def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }", "def __init__(self, profile):\n self.subject_name = \"playbook\"\n Subject.__init__(self, profile, self.subject_name)", "def __init__(self, date_time, diastolic):\n Encounter.__init__(self, date_time)\n self.__diastolic = diastolic", "def __init__(self):\n # 保存用户推特数据\n self.user_pool = defaultdict(UserInfo)\n self.twitter_pool = defaultdict(list)\n self.time = 0", "def __init__(self, name, title=None, unit=None):\n super().__init__(name.lower(), title, unit)\n filename = os.path.join(DATA, name + '.csv')\n with open(filename) as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n try:\n value = float(row[name])\n except ValueError:\n continue\n date = datetime.strptime(row['DATE'], \"%Y-%m-%d\")\n self.data[date] = value\n self.first_date = min(self.data)\n self.last_date = max(self.data)", "def __init__(self, *args):\n this = _libsbml.new_Date(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, d, m, y):\n\n self.set_calendar(d, m, y)", "def __init__(self, name, value, start_date, end_date, period, interest): \n SavingPlan.__init__(self, name, value, start_date, end_date, period)\n self.interest = interest", "def __init__(self):\n super(GenomicSetMemberGen, self).__init__(load_data=False)\n\n self.set_dao = GenomicSetDao()\n self.member_dao = GenomicSetMemberDao()\n\n # Genomic attributes\n self.OUTPUT_CSV_TIME_FORMAT = \"%Y-%m-%d-%H-%M-%S\"\n self.DRC_BIOBANK_PREFIX = \"Genomic-Manifest-AoU\"\n\n self.nowts = clock.CLOCK.now()\n self.nowf = _UTC.localize(self.nowts).astimezone(_US_CENTRAL) \\\n .replace(tzinfo=None).strftime(self.OUTPUT_CSV_TIME_FORMAT)", "def __init__(self, name, birthday, premium):\n self.name = name\n self.birthday = birthday\n self.premium = premium", "def __init__(self, x, y, date):\n super().__init__(x, y)\n self.date = date", "def __init__(\n self, datetime,\n provider, asset_license,\n ext_properties\n ):\n self.ext_properties = ext_properties\n self.license = asset_license\n self.provider = provider\n self.datetime = datetime", "def __init__(self):\n NodeObject.__init__(self)\n self.priv = None\n self.sex = 0\n self.confidence = \"\"\n self.sortname = \"\"\n self.dates = None # Daterange: Estimated datetype, date1, date2\n\n self.birth_low = None\n self.death_low = None\n self.birth_high = None\n self.death_high = None", "def __init__(self):\n super(ManDaySerie, self).__init__()", "def __init__(self, *args, **kwargs):\n if kwargs:\n for key, value in kwargs.items():\n if key in ('created_at', 'updated_at'):\n date = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')\n setattr(self, key, date)\n elif key != '__class__':\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n models.storage.new(self)", "def __init__(self, prefix, date, county):\n# self._dummyivo = DUMMYIVO\n\n # this to keep pylint happy\n self._ballots = []\n self._filename = ''\n self._pctname = ''\n self._pctnumber = ''\n self._registered = 0\n\n self.readdata(prefix, date, county)", "def __init__(self):\n self.tweets = {}\n self.followees = {}\n self.timestamp = 0", "def __init__(self, profile):\n\n self.subject_name = \"user\"\n Subject.__init__(self, profile, self.subject_name)\n self.api_base_url = self.profile.platform_url + \"/api/v1/\"", "def __init__(self, firstname, lastname):\r\n\t\tsuper().__init__(firstname, lastname)\r\n\t\tself.privelages= Privelages()", "def __init__(self, first, last, email, grade):\n super().__init__(first, last, email, grade)\n self.sessions = []", "def setUpClass(self):\n self.q = query(parse(self.date1).date(), parse(self.date1).date())\n self.fnames = download_granule(self.q[0])", "def __init__(self):\n self.stats = None\n self.ticker = None", "def __init__(self,i,gender, age, days, sexual_activity):\n \n self.identifier = i\n self.gender = gender\n self.days = days\n self.age = age\n self.sexual_activity = sexual_activity\n self.disease_status = 0\n self.time_since_infection = -1\n self.number_of_partners = 0\n self.current_partners = set()", "def __init__(self, students, end_date, section, start_date=None,\n num_weeks=2):\n self.students = students\n self.section = section\n self.end_date = end_date\n self.start_date = start_date\n self.num_weeks = num_weeks\n self.display_end_date = \\\n AttendanceTableCreator.compute_display_end_date(self.end_date)\n if not self.start_date:\n self.start_date = \\\n AttendanceTableCreator.compute_default_start_date(\n self.display_end_date, self.num_weeks)\n self.days_count = self.compute_real_days()\n self.total_days_count = SchoolDB.models.get_num_days_in_period(\n self.start_date, self.display_end_date)\n self.dayperiod_type = []\n self.date_list = []\n self.day_description = []\n self.html_table = '<table id=\"headerTable\" class=\"simple\">'\n self.html_pretty = True\n self._load_days_lists()", "def __init__(self, data=None, **kw):\n def _get_class_by_id(profile_id):\n from solariat_bottle.db.user_profiles.social_profile import DELIMITER, TwitterProfile, FacebookProfile\n pos = unicode(profile_id).rfind(DELIMITER) + 1\n if pos == 0:\n return self.__class__\n platform = None\n try:\n index = int(profile_id[pos:])\n except ValueError:\n logger.info(u\"Could not obtain platform from profile id: {}\".format(profile_id))\n else:\n platform = PLATFORM_BY_INDEX.get(index)\n class_ = {\n TwitterProfile.platform: TwitterProfile,\n FacebookProfile.platform: FacebookProfile\n }.get(platform, self.__class__)\n return class_\n\n if data:\n profile_id = data.get('_id')\n else:\n profile_id = kw.get('id')\n if isinstance(profile_id, basestring):\n self.__class__ = _get_class_by_id(profile_id)\n super(UserProfile, self).__init__(data, **kw)", "def __init__(self, paymentDict):\n self.createdTime = calendar.timegm(\n time.strptime(paymentDict['created_time'], '%Y-%m-%dT%XZ'))\n self.actor = paymentDict['actor']\n self.target = paymentDict['target']", "def __init__(self):\n self.meeting_DAO = MeetingDAO()\n self.meeting_person_DAO = MeetingPersonDAO()\n self.person_DAO = PersonDAO()\n self.date_format = \"%d-%m-%Y %H:%M\"", "def daily(self):\r\n return RecordsDaily(self)", "def __init__(self, first_name, last_name, birthday, username):\n self.first_name = first_name\n self.last_name = last_name\n self.birthday = birthday\n self.username = username\n self.login_attempts = 0\n self.age = self.set_birthday()", "def __init__(self):\r\n self.postgres = PostgreSQL()\r\n self.couch_query = Queries()\r\n self.unit_conversion = UnitConversion()\r\n self.calc = calculate_alarm_trigger.CalculateAlarmTrigger()\r\n super(DeviceOverview, self).__init__()", "def __init__(self):\n self.annual_interest_rate = 10.0 / 100.0\n self.initial_loan_date = date(2014, 12, 1)\n self.currency = 'HKD'\n self.total_loan_amount = Money('100000.00', 'HKD')\n self.final_payment_date = self.initial_loan_date + \\\n relativedelta(years=1)", "def __init__(self, cohorts_number, time_zone):\n\t\tself.cohorts_number = cohorts_number\n\t\tself.time_zone = time_zone", "def __init__(self, start: datetime.date) -> None:\n self.start = start\n self.bill = None", "def __init__(self):\n self._calendars = {}\n self._scopes = SCOPES\n self._client_id = CLIENT_ID\n self._auth_type = AUTH_TYPE\n self._credentials = (\n self._client_id,\n )\n self.account = Account(\n credentials=self._credentials,\n auth_flow_type=self._auth_type,\n token_backend=WorkdayTokenBackend(),\n )\n self.itinerary_map = {\n MULTIPLE_EVENTS: self.generate_itinerary_multiple_events,\n SINGLE_EVENT: self.generate_itinerary_single_event,\n }", "def __init__(self):\n self._profiling_mode = False\n self._total_time_ms = 0.0\n self._traced_records = []\n self._statistical_results = {}", "def __init__(self, list_of_availabilities, date):\n Schedule.__init__(self)\n\n self.daily_schedules = {}\n\n number_of_days_in_month = monthrange(date.get_year(), date.get_month())[1]\n\n # The minimum and maximum keys of the dictionary of DailySchedules\n self.min_date_key = 1\n\n self.max_date_key = number_of_days_in_month\n\n # Generates all dictionary keys for all days in the month\n for day in range(self.min_date_key, self.max_date_key + 1):\n self.daily_schedules[day] = None\n\n self.date_of_monthly_schedule = date\n\n self.create_all_daily_schedules(list_of_availabilities)", "def __init__(self, velocity, vorticity, prof_coords, \n direction, beginMeanComput, **kwds):\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Profiles, self).__init__(variables=[velocity, vorticity],\n **kwds)\n ## velocity field\n self.velocity = velocity\n ## vorticity field\n self.vorticity = vorticity\n ## X and Y coordinates of the profile\n self.prof_coords = prof_coords\n ## profile direction (0, 1 or 2)\n self.direction = direction\n ## time at which the computation of mean profile must begin\n self.beginMeanComput = beginMeanComput\n self.input = [velocity, vorticity]\n self.output = []", "def __init__(self, miser, fromdt, todt):\r\n self.miser = miser\r\n print self.summary(fromdt, todt)", "def grabDaily(self):\n raise NotImplemented(\"method should be redefined in a subclass\")", "def setUpClass(cls):\n cls.w = pd.read_csv(_df.WEATHER_TWO_WEEK, index_col='time',\n parse_dates=True)", "def __init__(self, current_user, produce_name, quantity, price_range):\n self.current_user = current_user\n self.produce_name = produce_name\n self.quantity = quantity\n self.price_range = price_range\n self.date_created = datetime.datetime.utcnow()\n self.date_modified = datetime.datetime.utcnow()", "def __init__(self,\r\n pay_period=None,\r\n billable=None,\r\n asset_id=None,\r\n pay_date=None,\r\n start_date=None,\r\n end_date=None,\r\n net_pay_current=None,\r\n net_pay_ytd=None,\r\n gross_pay_current=None,\r\n gross_pay_ytd=None,\r\n payroll_provider=None,\r\n employer=None,\r\n employee=None,\r\n pay_stat=None,\r\n deductions=None,\r\n direct_deposits=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.pay_period = pay_period\r\n self.billable = billable\r\n self.asset_id = asset_id\r\n self.pay_date = pay_date\r\n self.start_date = start_date\r\n self.end_date = end_date\r\n self.net_pay_current = net_pay_current\r\n self.net_pay_ytd = net_pay_ytd\r\n self.gross_pay_current = gross_pay_current\r\n self.gross_pay_ytd = gross_pay_ytd\r\n self.payroll_provider = payroll_provider\r\n self.employer = employer\r\n self.employee = employee\r\n self.pay_stat = pay_stat\r\n self.deductions = deductions\r\n self.direct_deposits = direct_deposits\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def __init__(self):\n\n self.start_datetime_wallclock = None;\n self.end_datetime_wallclock = None;\n self.action_count = 0;\n self.reward_cumulative = 0;\n self.mission_xml = None;\n self.mission_type = None;\n self.mission_seed = None;\n self.student_guid = None;\n self.mission_xml_as_expected = None;\n self.is_goal = None;\n self.is_timeout = None;", "def __init__(self, principle=0, interest_rate=0.0, year=0):\n\n super().__init__(principle, interest_rate, year)\n self.principle = principle\n self.interest_rate = interest_rate\n self.year = year\n # private variable\n self.__date_of_calc = datetime.datetime.now()\n self.__percentage_interest = self.interest_rate / 100\n self.__months = self.year * 12\n # assert validation for the interest rate\n assert isinstance(interest_rate, float), 'is a not a float'", "def __init__(self):\n self.time = 0\n self.tweets = {}\n self.follows = {}", "def __init__(self, checkpoint_id, student_id, date, title, card):\n self.id = checkpoint_id\n self.student = models.users.User.get_user_by_id(student_id)\n self.date = date\n self.title = title\n self.card = card", "def __init__(self, args, config_file):\n super(Timesheet, self).__init__()\n self.configure_attr(args, config_file)", "def __init__(self, *args, **kwargs):\n if kwargs:\n for key, value in kwargs.items():\n if key != \"__class__\":\n if key == \"created_at\":\n self.created_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"updated_at\":\n self.updated_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"id\":\n self.id = value\n else:\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()", "def __init__(self):\n self.born = None\n self.sex = None\n self.dnp = None\n self.grid_queue = None\n self.name = None\n \n self.time_of_infection = np.Inf \n self.last_match = -np.Inf\n self.attributes = {}", "def __init__(self):\n self.time = 0\n self.tweets = {}\n self.followstar = {}", "def __init__(self, file_path, day, exec_time):\n\n self.file_path = file_path\n self.day = day\n self.exec_time = exec_time", "def __init__(self):\r\n self.id = random.randint(0, 1 * (10 ** 9))\r\n self.title = None\r\n self.date = None\r\n self.time = None\r\n self.datetime = None\r\n self.duration = None\r\n self.notes = None\r\n self.recurring = None\r\n self.rec_interval = {\r\n \"unit\": None, \"skip\": None, \"days\": None, \"ordinal\": None,\r\n \"dates\": None, \"end\": None}\r\n self.rec_total = None\r\n self.rec_child_seq = None\r\n self.rec_parent = None\r\n self.info = {}", "def __init__(self, timestamp, rider):\n super().__init__(timestamp)\n self.rider = rider", "def __init__(self, timestamp, rider):\n super().__init__(timestamp)\n self.rider = rider", "def set_heritage(self):\n if self.has_non_empty_attribute(\"registration_date\"):\n try:\n iso_date = JalaliCalendar(self.registration_date).get_date()\n except TypeError:\n print(\"dateparser.JalaliCalendar could not handle: {}\".format(\n self.registration_date))\n iso_date = None\n\n if iso_date:\n date_dict = utils.datetime_to_dict(\n iso_date.get('date_obj'), \"%Y%m%d\")\n qualifier = {\"start_time\": utils.package_time(date_dict)}\n heritage = self.mapping[\"heritage\"][\"item\"]\n self.add_statement(\"heritage_status\", heritage, qualifier)\n else:\n self.add_to_report(\n \"registration_date\", self.registration_date, \"start_time\")\n else:\n super().set_heritage()", "def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None", "def __init__(self):\n self.__dao = None\n self._id: int = None\n self._frequency = None\n self._from_date = None\n self._to_date = None\n self._payment_account_id = None", "def __init__(self, timestamp, driver):\n super().__init__(timestamp)\n self.driver = driver", "def __init__(self, *args, **kwargs):\r\n if kwargs:\r\n for key, value in kwargs.items():\r\n\r\n if key == \"created_at\" or key == \"updated_at\":\r\n setattr(self, key, datetime.strptime(value,\r\n \"%Y-%m-%dT%H:%M:%S.%f\"))\r\n\r\n elif key != \"__class__\":\r\n setattr(self, key, value)\r\n\r\n else:\r\n self.id = str(uuid.uuid4())\r\n self.created_at = datetime.now()\r\n self.updated_at = datetime.now()\r\n models.storage.new(self)", "def __init__(self, date, latitude, longitude, timezone):\n self.name = \"Sunrise Sunset Calculator\"\n self.date = date\n self.latitude = latitude\n self.longitude = longitude\n self.timezone = timezone\n return", "def __init__(self):\n self.date = str(date.today())\n today_date = str(date.today())\n today_date = today_date.split(\"-\")\n self.curr_year = int(today_date[0])\n self.curr_month = int(today_date[1])\n self.curr_date = int(today_date[2])", "def init():\n global last_datetime\n global data_file\n global last_minute\n\n # Atualiza as variáveis 'last_datetime' e 'last_minute'\n last_datetime = datetime.datetime.today()\n last_minute = last_datetime.minute\n\n # Define o diretório do arquivo\n data_file_path = 'data/' + get_date(last_datetime)\n\n # Gera o diretório\n try:\n os.makedirs(data_file_path)\n\n except FileExistsError:\n pass\n\n # Arbre o arquivo de dados\n data_file = open(\n data_file_path + '/' + get_time(last_datetime) + '.csv', 'w'\n )", "def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14", "def __init__(self, *args, **kwargs):\n self.id = str(uuid4())\n self.created_at = datetime.today()\n self.updated_at = datetime.today()\n\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n if len(kwargs) != 0:\n \"\"\"Conditionals for kwargs\"\"\"\n for ky, val in kwargs.items():\n if ky == \"created_at\" or ky == \"updated_at\":\n self.__dict__[ky] = datetime.strptime(val, format)\n else:\n self.__dict__[ky] = val\n else:\n models.storage.new(self)", "def __init__(self, simulated_cohort):\n\n self._eclampsiaTimes = [] # patients' eclampsia times\n self._utilities = []\n self._costs = []\n \n \n\n # eclampsia curve\n self._eclampsiaCurve = \\\n PathCls.SamplePathBatchUpdate('Population size over time', id, simulated_cohort.get_initial_pop_size())\n\n # find patients' ec times\n for patient in simulated_cohort.get_patients():\n\n # get the patient EC time\n eclampsia_time = patient.get_eclampsia_time()\n if not (eclampsia_time is None):\n self._eclampsiaTimes.append(eclampsia_time) # store the EC time of this patient\n self._eclampsiaCurve.record(eclampsia_time, -1) # update the EC curve\n\n self._costs.append(patient.get_total_discounted_cost())\n self._utilities.append(patient.get_total_discounted_utility())\n\n # summary statistics\n self._sumStat_ECTime = StatCls.SummaryStat('Patient Eclampsia time', self._eclampsiaTimes)\n self._sumStat_cost = StatCls.SummaryStat('Patient discounted cost', self._costs)\n self._sumStat_utility = StatCls.SummaryStat('Patient discounted utility', self._utilities)", "def __init__(self, month, day, year):\n self.month = month\n self.day = day\n self.year = year", "def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):", "def _createModuleObj(self):\n ModuleTimeHistory.__init__(self)", "def __init__(self, timestamp):\n self.timestamp = timestamp", "def __init__(self, **kwargs):\n self._defined = dict(_DEFAULT_TASKS_CHARACTERISTICS)\n self._defined.update(kwargs)\n self.clean_dates()\n self._parent = None", "def __init__(self, time, metadata):\n self.time = time\n self.metadata = metadata", "def __init__(self, sleep=1, path_driver=None, headless=True, date_format='%Y-%m-%d'):\n # Current directory\n self.dir = os.getcwd()\n # Define download folder for browser:\n if os.name == 'nt':\n self.download_path = self.dir + r'\\tmp'\n else:\n self.download_path = self.dir + '/tmp'\n # Create a temporary folder in case it does not exist yet\n if not os.path.isdir('tmp'):\n os.mkdir('tmp')\n # Define the path to the downloaded csv-files (this is where the trends are saved)\n if os.name == 'nt':\n self.filename = 'tmp\\\\multiTimeline.csv'\n else:\n self.filename = './tmp/multiTimeline.csv'\n # Whether the browser should be opened in headless mode\n self.headless = headless\n # Path to the driver of Google Chrome\n self.path_driver = path_driver\n # Initialize the browser variable\n self.browser = None\n # Sleep time used during the scraping procedure\n self.sleep = sleep\n # Maximal number of consecutive days scraped\n self.max_days = 200\n # Format of the date-strings\n self.date_format = date_format\n # Format of dates used by google\n self._google_date_format = '%Y-%m-%d'\n # Lunch the browser\n self.start_browser()", "def __init__(self, clock=proctime):\n self._clock = clock", "def __init__(self):\n self._data = None\n self._forecast_data = None\n self._today_data = None\n self.last_updated = None", "def __init__(self, f=None, date='2020-12-11'):\n super().__init__()\n self.freq = f\n self.date = date # Not used, but could if wanted \"real\" datetimes.", "def __init__(self, timestamp, rider, driver):\n super().__init__(timestamp)\n self.driver, self.rider = driver, rider", "def __init__(self, timestamp, rider, driver):\n super().__init__(timestamp)\n self.driver, self.rider = driver, rider" ]
[ "0.6303617", "0.6276865", "0.61957604", "0.6129965", "0.60512453", "0.5994838", "0.59334636", "0.5906946", "0.57980454", "0.57551837", "0.5752836", "0.57355404", "0.5726834", "0.5710793", "0.5709311", "0.56886977", "0.5673901", "0.5655608", "0.5648033", "0.5615956", "0.55905086", "0.5588783", "0.5588267", "0.5580841", "0.5574352", "0.55732423", "0.55709463", "0.5559031", "0.5553749", "0.55436355", "0.5543592", "0.5539966", "0.55385375", "0.5530021", "0.5529546", "0.5525701", "0.55252904", "0.5518925", "0.5516282", "0.551616", "0.5514034", "0.55067587", "0.5503319", "0.5499573", "0.5491251", "0.5487815", "0.5487709", "0.5483828", "0.5473526", "0.54695606", "0.54615754", "0.5457805", "0.5453458", "0.544981", "0.54362226", "0.5434501", "0.5433852", "0.5427858", "0.5413772", "0.54091644", "0.54058206", "0.5404817", "0.5402217", "0.53776205", "0.5371445", "0.53659827", "0.53566295", "0.5356357", "0.53558505", "0.535098", "0.5346226", "0.53453547", "0.5343523", "0.5340735", "0.5308813", "0.53041005", "0.53041005", "0.5302624", "0.530186", "0.53007305", "0.52930367", "0.52916014", "0.52883375", "0.5287221", "0.52856046", "0.52855617", "0.52765507", "0.52757555", "0.5275501", "0.5274395", "0.5272864", "0.526705", "0.52626383", "0.5262447", "0.5256377", "0.52563286", "0.52533346", "0.52480036", "0.52461696", "0.52461696" ]
0.6949065
0
Return an irradiance value from the profile.
def __getitem__(self, index: datetime.time) -> float: # If the index is in the profile, return the index. if index in self._profile: return self._profile[index] # If the index is not in the profile, then the closest value needs to be # determined. If there is a tie, this does not matter. delta_t_to_t_map = { ( abs( time.hour * 3600 + time.minute * 60 + time.second - (index.hour * 3600 + index.minute * 60 + index.second) ) ): time for time in self._profile } return self._profile[delta_t_to_t_map[min(delta_t_to_t_map)]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def irradiance(self) -> float:\n\n if self.declination > 0:\n return self._irradiance\n return 0", "def internal_rate_of_return(proforma):\n return np.irr(proforma['Yearly Net Value'].values)", "def getIR2() -> int:\n pass", "def emissivity_profile(R):\n E = R**(-3.0)*(1 - (R_in/R)**(1.0/2.0))\n return E", "def getRA(self):\n return self._ra", "def getIR1() -> int:\n pass", "def radiant_score(self):\n return self._get(\"radiant_score\")", "def __get_suggested_risk_score(user_risk_profile):\n try:\n return get_risk_profile(user_risk_profile.user)['risk_profile'][\n 'value']\n except ReportWasNotGenerated:\n return user_risk_profile.risk_profile.value", "def _get_fpr(self, arg):", "def getIntensitiy(self, which_photocell):\n if which_photocell == \"A\":\n return self.A_intensity\n elif which_photocell == \"B\":\n return self.B_intensity\n elif which_photocell == \"C\":\n return self.C_intensity\n elif which_photocell == \"D\":\n return self.D_intensity\n else:\n exit(f\"Wrong photocell ID given: <<{which_photocell}>>. Only (A, B, C or D) allowed\")", "def _do_get_rate(self):\n rate = {\n 1: \"1 : Helium Probe in FAST rate\",\n 0: \"0 : Helium Probe in SLOW rate\"\n }\n result = self._execute('X')\n return rate.get(int(format(int(result[5:7]), '08b')[6]), \"Unknown\")", "def getIR3() -> int:\n pass", "def get_patient_race(patient_resource):\n for ext in patient_resource['extension']:\n if (ext['url'] == 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-race'):\n for map in ext['extension']:\n if 'valueCoding' in map:\n vc = map['valueCoding']\n my_tuple = (vc['system'], vc['code'], vc['display'])\n return my_tuple", "def get_pir_status(self):\n response = self.parent.pir.status()\n return response[0]", "def test_get_risk_profile_using_get(self):\n pass", "def risk_score(self):\n return GoalMetric.objects.filter(group=self.metric_group,\n type=GoalMetric.METRIC_TYPE_RISK_SCORE).values_list('configured_val',\n flat=True).first()", "def get_firerate(self):\n return self._firerate", "def rsi(date):\n\n # print(float(r_json['Technical Analysis: RSI'][date]['RSI']))\n return float(r_json['Technical Analysis: RSI'][date]['RSI'])", "def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")", "def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")", "def real_retrieve_irr(self, irr, duration=rospy.Duration(600, 0)):\n start = rospy.Time.now()\n wp = self._current_wp\n rospy.loginfo(\"%s is approaching side of %s ...\" %\n (self.namespace, irr))\n maximum_distance = 5\n pickup_radius = 0.12\n approaching = self.follow_target(irr, False,\n [0., -1 * maximum_distance, 1.6], 0.,\n None, rospy.Duration(3, 0), duration)\n duration = duration - (rospy.Time.now() - start)\n start = rospy.Time.now()\n rospy.loginfo(\"%s is waiting for engagement ...\" % self.namespace)\n while (self._irr_ready_to_be_picked == 0) and not (\n rospy.is_shutdown()) and (rospy.Time.now() - start < duration\n ) and (not self.external_intervened):\n self._rate.sleep()\n duration = duration - (rospy.Time.now() - start)\n start = rospy.Time.now()\n if self._irr_ready_to_be_picked > 0:\n rospy.loginfo(\"%s is trying to engage with %s ...\" %\n (self.namespace, irr))\n engaging = self.irr_follow_engagement(irr, pickup_radius, duration)\n duration = duration - (rospy.Time.now() - start)\n start = rospy.Time.now()\n else:\n engaging = self.ACTION_FAIL\n self._irr_ready_to_be_picked = 0\n back_to_wp = self.goto(wp, duration, True, False)\n return np.min([approaching, engaging, back_to_wp])", "def profile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"profile\")", "def Insurance(Md,X):\n u = X[iu]\n b = Md.b()\n return u/b - u/(1-u+u*b)", "def gas_profile(self, R, potential='NFW'):\n \n if 'potential' in self.ic.keys():\n potential = self.ic['potential']\n \n if 'potential_type' in self.ic.keys():\n potential = self.ic['potential_type']\n \n if potential == 'NFW':\n dens = prof.NFW_isothermal_gas(R, r_s=self.ic['b'],\n M200=self.ic['M200'],\n T = self.ic['T_dwarf'], \n n_o = self.ic['n_o'],\n mu=self.ic['mu_dwarf'],\n rho_crit=self.ic['rho_crit'],\n Pcorona = self.ic['n_halo']*cgs.kb*self.ic['T_halo'])\n elif potential == 'Burkert' or potential == 'Burkert_isothermal':\n dens = prof.Burkert_isothermal_gas(R, r_s = self.ic['b'], M200 = self.ic['M200'],\n T = self.ic['T_dwarf'], n_o = self.ic['n_o'],\n mu = self.ic['mu_dwarf'],\n rho_crit = self.ic['rho_crit'])\n \n return dens", "def _risco(self):\n return self.coordinator.risco", "def getResistence(self):\n return self.resistence", "def getIntensityP(self):\n return self._Epi.intensity()", "def calc_collector_irradiance(irradiance_on_collector, cleanliness):\n collector_irradiance = irradiance_on_collector * cleanliness**1.5\n collector_irradiance[collector_irradiance < 0] = 0\n collector_irradiance = collector_irradiance.fillna(0)\n\n return collector_irradiance", "def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir", "def get_damage():\n\n return character['Damage']", "def get_age(self):\n return self.glb[iage]", "def getIR4() -> int:\n pass", "def getPapiInterpretationById(self,score):\n #logging.info('UserController.getUser(%s)', user_id)\n uraian = interpret_service.getPapiInterpretationById(score)\n return uraian", "def RA(self):\n return self.meta[\"header\"][\"OBJRA\"] * u.deg", "def get_inj_rate(inj_type, inj_fac):\n\n def inj_rate(rs):\n if inj_type == 'sWave':\n return inj_fac*(rs**6)\n elif inj_type == 'decay': \n return inj_fac*(rs**3)\n\n return inj_rate", "def profile(self):\n return self._profile", "def profile(self):\n return NumericStatsMixin.profile(self)", "def profile(self) -> AskarProfile:\n return self._profile", "def raw_value(self):\n if self.__trigger == gyro_trigger_mode.GET_RAW_VALUE_TRIGGER_READ:\n self.read_and_update_angle()\n return self.__value", "def _accident_insurance(total_salary, industry_risk_rate):\n industry_type_rate = industry_risk_rate / 100\n\n accident_insurance_rate = industry_type_rate\n accident_insurance = accident_insurance_rate * total_salary\n\n return round(accident_insurance, 1)", "def get_report_tau_or_i(self):\r\n return self._arm.get_report_tau_or_i()", "def profile(x):\n return x", "def getIR5() -> int:\n pass", "def getIntensity(self):\n return self.__intensity", "def _baseline_value(self):\n t = self['primary']\n return np.median(t.data[:int(10e-3/t.dt)])", "def LoadRateValue(self):\n\t\treturn self._get_attribute('loadRateValue')", "def tirageR1(self):\n\n proba = np.zeros(shape=(3))\n proba[0] = self.__alpha0 + self.__beta + self.__eta/2.\n proba[1] = self.__alpha1 + self.__beta + self.__eta/2.\n proba[2] = 1. - (proba[0]+proba[1])\n # typeSample = np.random.choice(a=['0.', '1.', 'F'], size=1, p=proba)[0]\n typeSample = random.choices(population=['0.', '1.', 'F'], weights=proba)[0]\n if typeSample != 'F':\n r1 = float(typeSample)\n else:\n r1 = self.__rv_parab.rvs()\n\n return r1", "def intensity(self) -> int:", "def getIntensity(self):\n return self.getIntensityS() + self.getIntensityP()", "def getLoanInterest(self, credit):\n if credit >= 720:\n return 0.11\n elif credit >= 680:\n return 0.14\n elif credit >= 640:\n return 0.19\n else:\n return -1", "def aire(self, r):\r\n self.r_num(r)\r\n return self.r**2 * pi", "def testPsychIrritation(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"irritation\")\n\n self.util.intPropertyTest(self, attr, \"irritation\")", "def get_grating(self):\n grating = c_int()\n self._dll.ShamrockGetGrating(self._device, byref(grating))\n return grating.value", "def get_icc_profile(decoded_data):\n # fixme: move this function somewhere?\n icc_profiles = [res.data for res in decoded_data.image_resource_blocks\n if res.resource_id == ImageResourceID.ICC_PROFILE]\n\n if not icc_profiles:\n return None\n\n icc_profile = icc_profiles[0]\n\n if isinstance(icc_profile, bytes): # profile was not decoded\n return None\n\n return icc_profile", "def incomeBar(self):\r\n return self._createTextProfile(self.income)", "def getAI(self):\n device = self.reducetoolbar.detectorcombobox.currentText()\n ai = self.calibrationsettings.AI(device)\n return ai", "def brightness(self):\n return self.get_value('bri')", "def EstimateIlluminationProfile(image, kernel_sigma): \n \n illumination_profile = Denoising(image, kernel_sigma);\n \n map_corr = np.ones(image.shape);\n map_corr = Denoising(map_corr, kernel_sigma);\n \n illumination_profile = illumination_profile / map_corr;\n \n return illumination_profile;", "def get_pir_mode(self):\n return self.parent._usa_pir", "def _death_insurance(self, total_salary):\n return int(total_salary * self.death_insurance_rate)", "def get_utility_value(self):\n raise AIError(\"Must be implemented in child class!\")", "def get_profile_stats():\n return p_stats", "def profile(self):\n\n def _flatten(f):\n return [coeffifient for value in f.values()\\\n for coeffifient in value.coefficients()]\n\n elements = _flatten(self.domain().j) +\\\n _flatten(self.codomain().j) +\\\n _flatten(self)\n\n\n profile = enveloping_profile_elements(elements)\n\n # Avoid returning the zero profile because it triggers a corner case\n # in FP_Module_class.resolution().\n # \n # XXX: Fix FP_Module_class.resolution().\n #\n return (1,) if profile == (0,) else profile", "def getRandomRarity():\n r = random.randint(1,100)\n if r <= Rarities.IMPOSIBIL:\n return \"IMPOSIBIL\"\n elif r <= Rarities.LEGENDAR:\n return \"LEGENDAR\"\n elif r <= Rarities.EPIC:\n return \"EPIC\"\n else:\n return \"COMUN\"", "def profile(self):\n return self.__profile", "def getCurrentResourceValue(self):\n AL = self.myDesign.costAL - self.repairCost[1]\n EC = self.myDesign.costEC - self.repairCost[2]\n IA = self.myDesign.costIA - self.repairCost[3]\n return (AL, EC, IA)", "def get_risk_status(reg_type, mom_dob, edd):\n\n # high risk if postbirth registration\n if \"postbirth\" in reg_type:\n return \"high\"\n\n # high risk if age < 18\n age = utils.get_mom_age(utils.get_today(), mom_dob)\n if age < 18:\n return \"high\"\n\n # high risk if registering after 20 weeks pregnant\n weeks = utils.get_pregnancy_week(utils.get_today(), edd)\n if weeks >= 20:\n return \"high\"\n\n # otherwise normal risk\n return \"normal\"", "def rsi(self) -> float:\n return self._rsi", "def read_ir(self):\n return self._read16(0x24, little_endian=True)", "def getprofile(): # real signature unknown; restored from __doc__\n pass", "def _get_isis_level(self):\n return self.__isis_level", "def profile_pic(self):\n raise AttributeError('profile_pic is not a readable attribute')", "def get_grade(self) -> int :\n return self.grade", "def reader_score():\n reader_ranef_negative, reader_ranef_positive = sigma_r * rng.randn(2)\n error_term = np.sqrt(1 - sigma_c**2) * rng.randn(num_cases)\n reader_score = (mu + delta_mu) * disease\n reader_score += reader_ranef_negative * (1 - disease)\n reader_score += reader_ranef_positive * disease\n reader_score += sigma_c * case_random_effect\n reader_score += error_term\n return reader_score", "def get_isolar(self):\n return self.read_register(4099, 1, 3)", "def get_r_score(self):\n return self.r_score", "def get_bg(bg_reader):\n print('Type your current bg.')\n bg = bg_reader() # Type your current bg\n try:\n bg_as_integer = int(bg)\n return bg_as_integer\n except Exception as e:\n return math.nan", "def personal_allowance(self):\n\t\treturn self._personal_allowance", "def get_pulse_value(object = pulse_value_req):\n try:\n response = urllib2.urlopen(object).read()\n pulse_value = json.loads(response)\n return pulse_value['GetSensorValue'][0]\n except URLError, e:\n print 'Error: No Heartrate Value.'", "async def rain_rate(self, value):\n if not value:\n return 0\n return await self.rain(value * 60)", "def get_HRRR_value(getthisDATE):\n print getthisDATE\n H = get_hrrr_variable(getthisDATE, variable, fxx=fxx, model='hrrr', field='sfc', value_only=True, verbose=False)\n if H['value'] is None:\n print \"!! WARNING !! COULD NOT GET\", getthisDATE\n return H['value']", "def get_rate(parent=None):\n dialog = RateDialog(parent)\n dialog.exec_()\n rate = dialog.rate\n return rate", "def _get_profile(self, chrom, start, end, hdf5_reader):\n if self.profile_size:\n start, end = CoordsToVals._resize_interval(\n start, end, self.profile_size\n )\n return hdf5_reader[chrom][start:end]", "def get_rz(self):\n return self.rz", "def humidity(self):\n names = ['anc_air_relative_humidity']\n return self.sensor.get_with_fallback('humidity', names)", "def get_alicuota(self, txt_line):\n return int(txt_line.tax_wh_iva_id.tax_id.amount * 100)", "def ir(irspec):\n\tcalendar = None\n\ttokens = irspec.split()\n\tfor tok in tokens:\n\t\tm = re.match('^(\\d+)(\\.\\d+)?$', tok)\n\t\tif m:\n\t\t\trate = float(m.group())\n\t\telif tok in Compounding.names:\n\t\t\tcompounding = Compounding(tok)\n\t\telif tok in DayCount.names:\n\t\t\tdaycount = DayCount(tok)\n\t\telif tok in Frequency.names:\n\t\t\tfrequency = Frequency(tok)\n\t\telif tok.startswith('cal'):\n\t\t\tcalendar = Calendar(tok.replace('cal', ''))\n\treturn InterestRate(rate, frequency, compounding, daycount, calendar)", "def get_roll_value_for_rating(self) -> int:\n if self.rating:\n return super().get_roll_value_for_rating()\n self.rating = self.check.get_difficulty_rating(self.target, **self.roll_kwargs)\n return self.rating.value", "def get_rain():\n global rain\n\n # Report rain only if the condition is 'rainy' (and not always).\n if weather_condition == CONDITION_RAINY and random.random() > 0.7:\n rain += round(random.random(), 2)\n return rain", "def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)", "def analize(self, text):\n\n try:\n profile = self.personality_insights.profile(text,\n raw_scores=True,\n consumption_preferences=True)\n except Exception as e:\n print(\"Error during API call\", e)\n profile = \"\"\n\n return profile", "def _getLilyAccidental(self):\n s = \"\"\n if self.accidental == 0:\n return \"\"\n elif self.accidental < 0:\n s = _LILYFLAT\n elif self.accidental > 0:\n s = _LILYSHARP\n return s * abs(self.accidental)", "def getInteractionRate(self):\n m = mctal.MCTAL(self.name+'.m')\n t = m.tallies[4]\n # Returing the total\n return t.data[-1],t.errors[-1]", "def getValue(self):\n if self.__success:\n return self.__personalCode\n else:\n return self.__success", "def profile_status(self) -> Optional[str]:\n return pulumi.get(self, \"profile_status\")", "def relative_rate_i(self) -> \"uint64_t\":\n return _beamforming_swig.doaesprit_sptr_relative_rate_i(self)", "def getRaiz(self):\n return self.__raiz", "def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)", "def read_scored_qr(profilefn, phenofile, alpha, nsnps, score_type='sum'):\n if score_type == 'sum':\n col = 'SCORESUM'\n else:\n col = 'SCORE'\n # Read the profile\n sc = pd.read_table(profilefn, delim_whitespace=True)\n # Read the phenotype file\n pheno = pd.read_table(phenofile, delim_whitespace=True, header=None, names=[\n 'FID', 'IID', 'pheno'])\n # Merge the two dataframes\n sc = sc.merge(pheno, on=['FID', 'IID'])\n # Compute the linear regression between the score and the phenotype\n lr = linregress(sc.pheno, sc.loc[:, col])\n # Return results in form of dictionary\n dic = {'File': profilefn, 'alpha': alpha, 'R2': lr.rvalue ** 2,\n 'SNP kept': nsnps}\n return dic", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate" ]
[ "0.68686956", "0.5816468", "0.5590658", "0.5589434", "0.5456386", "0.5432771", "0.5328536", "0.5325384", "0.52494484", "0.52432454", "0.5228382", "0.5184835", "0.5157474", "0.5141578", "0.5102126", "0.5093269", "0.5086786", "0.5064944", "0.5061839", "0.5061839", "0.50400925", "0.50385624", "0.5038496", "0.5016203", "0.50140893", "0.4994967", "0.49897972", "0.49851453", "0.49758354", "0.4975678", "0.49681172", "0.4953339", "0.4943794", "0.4932322", "0.49071568", "0.49039954", "0.48956856", "0.4892107", "0.4885312", "0.4882682", "0.48792616", "0.4870447", "0.48627606", "0.48589128", "0.48568276", "0.48522905", "0.48511356", "0.4849416", "0.48394796", "0.4828775", "0.48198456", "0.4818042", "0.48114616", "0.4799851", "0.4796497", "0.4791946", "0.47854686", "0.47776794", "0.47765422", "0.47752538", "0.4769914", "0.476159", "0.47599167", "0.47585258", "0.4758283", "0.47455683", "0.47447497", "0.4744233", "0.47428373", "0.4739907", "0.4738319", "0.4733329", "0.4714163", "0.46990594", "0.4695292", "0.46885327", "0.46870062", "0.46832007", "0.46809822", "0.46785685", "0.46775144", "0.46731538", "0.46728098", "0.46723452", "0.46698618", "0.46635324", "0.46574143", "0.4655005", "0.46491295", "0.46482953", "0.4644685", "0.46437904", "0.4641408", "0.46383816", "0.46286628", "0.46232983", "0.46216938", "0.4620931", "0.46204528", "0.46201804", "0.4618988" ]
0.0
-1
Sets an item in the profile.
def __setitem__(self, index: datetime.time, value: float) -> None: self._profile[index] = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_item(self, item):\n self.item = item", "def set_item(self, item):\n self.item = item", "def SetItem(self, item):\r\n\r\n self._item = item", "def setItem(self, item):\n self.setItem(0, item)", "def set(self, item, value):\r\n raise NotImplementedError", "def set(self, value):\n self._storage.set(self._item, value)", "def _set_item(self, index, item):\r\n switched_item = self._items[index]\r\n if switched_item:\r\n switched_item.on_taken_from_bag(self)\r\n self.set_switched_item(switched_item)\r\n if item:\r\n item.on_put_into_bag(self)\r\n self._items[index] = item", "def __setitem__(self, key, item):\n self.set_field(key, item)", "def assignValue(self,value):\n self.itemset(value)", "def assignValue(self,value):\n self.itemset(value)", "def set(self, item, **attrs):\n attrs[self.type] = item\n self.graph._setattrs(self.parent.handle, **attrs)", "def setItem(self, item: Optional[items.Item]):\n previous = self.getItem()\n if previous is not None:\n previous.sigItemChanged.disconnect(self.__itemChanged)\n\n self.__itemRef = None if item is None else weakref.ref(item)\n if item is not None:\n if isinstance(item, self._SUPPORTED_ITEM_CLASS):\n # Only listen signal for supported items\n item.sigItemChanged.connect(self.__itemChanged)\n self._updateFromItem()", "def set_item(self, key, value):\n # TODO: Add self.prefix\n self.table.putValue(key, value)", "def give_item(self,item):\n self.inv[item.alias] = item.desc", "def _single_setitem(self, key, item):\n getattr(self._cpp_obj, self._setter)(key, item)", "def _single_setitem(self, key, item):\n self._dict[key] = item", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def updateItem(self, object):\n pass", "def __setitem__(self, item, value):\n self.vars[item] = value", "def updateColorItem(self, item, itemColor): \n self.avatarConfiguration[item] = itemColor\n self.paintAvatarItem(item)", "def __setitem__(self, item, value):\r\n debug.write(\"[SourceRPG] Assigning attribute %s with the value of %s to player %s\" % (item, value, self.name), 3)\r\n if item in self.currentAttributes:\r\n debug.write(\"Value is in current attributes, assign to the currentAttributes dict\", 4)\r\n self.currentAttributes[item] = value\r\n elif item in self.currentSkills or item in skills:\r\n debug.write(\"Value is in skills, assign to the currentSkills dict\", 4)\r\n self.currentSkills[item] = value\r\n else:\r\n debug.write(\"Value is not in any dictionary, assign to the custom playerAttributes dict\", 4)\r\n self.playerAttributes[item] = value\r\n debug.write(\"[SourceRPG] Value updated\", 3)", "def item_info(self, item_info):\n\n self._item_info = item_info", "def __setitem__(self, item, val):\r\n item.set_value(val, borrow=True)", "def setitem_key_value(self):\n raise NotImplementedError", "def __setitem__(self, key, val):\n self._user_data[key] = val", "def setitem(obj, attr, value):\n from functools import partial\n _setattr = partial(setattr, obj)\n setter = getattr(obj, '__setitem__', None) or _setattr\n setter(attr, value)", "def set(self, name1, natl, item):\n if name1 not in self.data: self.data[name1] = {}\n self.data[name1][natl] = item", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def item_id(self, item_id):\n\n self._item_id = item_id", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def setitem(obj, idx, value):\n obj.__setitem__(idx, value)", "def set_item(self, row, col, value):\n self.board[row][col] = value", "def setItem(item, **kwargs):\n item.wdgEnabled.setChecked(kwargs['enabled'])\n item.wdgLabel.setText(kwargs['label'])\n item.wdgType.setCurrentIndex(item.wdgType.findText(kwargs['type']))\n item.wdgValue.setText(kwargs['value'])\n item.wdgComment.setText(kwargs['comment'])", "def set_profile(self, profile: str):\n self._profile = profile", "def store_item(self, item_in_json): # pragma: no cover\n raise NotImplementedError", "def update_item(self, table, item):", "def set_item_selection(self, item):\n self._set_item_selection(item.encode())", "def __setitem__(self, key, value):\n self.settings.set(key, value)", "def set(self, key, value):\r\n self.set_many({key: value})", "def remember(self, item):\n self.memory.append(item)", "def set_followups(self, elem_name, item_name):\n self.elems[elem_name].followup = self.items[item_name]", "def put(self, item, value, set_doc):\n if item is None:\n raise Exception(\"call __setitem__ with None argument\")\n else:\n self.size += 1\n self.root = self.set(self.root, item, int(value), 0, set_doc)", "def set_ixtools_account(self, item_name):\n self.single_selection_from_kendo_dropdown(self.ixtools_account_kendo_dropdown_locator, item_name)\n self.wait_for_ajax_spinner_load()", "def set(self, value):\n self._nsObject.selectItemAtIndex_(value)", "def _set_item(dic: dict, keys: list, value):\n\tdic = _get_item(dic, keys[:-1])\n\tdic[keys[-1]] = value", "def __setitem__(self, key, value):\n self.set_attribute(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def setprofile(variable, value, account, pair):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n keys = []\n values = []\n if pair:\n for p in pair:\n key, value = p.split(\"=\")\n keys.append(key)\n values.append(value)\n if variable and value:\n keys.append(variable)\n values.append(value)\n\n profile = Profile(keys, values)\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n\n json_metadata = Profile(acc[\"json_metadata\"] if acc[\"json_metadata\"] else {})\n json_metadata.update(profile)\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)", "def put(self, item): \n self.__db.rpush(self.key, item)", "def _update_item(self, item, user):\n item.user_modified = user\n try:\n item.panel = item.panel\n item.item_priority = item.priority\n except AttributeError:\n pass\n item.is_packed = True\n item.save()\n return item", "def SetOldItem(self, item):\r\n \r\n self._itemOld = item", "def item_read(self, item):\n self.update_item(item)", "def __set__(self, page, value):\n element = self.get(page)\n element.value = value", "def __setitem__(self, item_index: Index, new_item: Item) -> None:\n raise NotImplementedError(\"__setitem__\")", "def setItem(self, column_number, row_number, value):\n self.data[column_number, row_number] = value\n return", "def SetPyData(self, item, data):\r\n\r\n item.SetData(data)", "def SetItemType(self, item, ct_type):\r\n\r\n item.SetType(ct_type)\r\n self.CalculatePositions()\r\n self.Refresh()", "def f_set_single(self, name, item):\n\n if self.v_stored:\n self._logger.debug(\n \"You are changing an already stored result. If \"\n \"you not explicitly overwrite the data on disk, this change \"\n \"might be lost and not propagated to disk.\"\n )\n\n if self._supports(item):\n\n # self._check_if_empty(item, name) # No longer needed\n\n if name in self._data:\n self._logger.debug(\n \"Replacing `%s` in result `%s`.\" % (name, self.v_full_name)\n )\n\n self._data[name] = item\n else:\n raise TypeError(\n \"Your result `%s` of type `%s` is not supported.\"\n % (name, str(type(item)))\n )", "def __setitem__(self, index, newItem):\r\n self._items[index] = newItem", "def _put(self, item: T) -> None:\n ...", "def set_user(self, user):\r\n self.user = user", "def setUser(self, value):\n return self._set(user=value)", "def save_user_profile(user_profile):\r\n config_settings = config.get_config()\r\n db_client = boto3.resource('dynamodb', region_name = config_settings['region'])\r\n table = db_client.Table(config_settings['table_name'])\r\n\r\n try:\r\n table.put_item(Item=user_profile)\r\n except ClientError as e:\r\n logger.error(\"Failed to save profile {}:{}\"\r\n .format(jsons.dumps(user_profile), e.response['Error']['Message']))", "def SetUserInformation(self, user_info):\n self._session[_USER_INFO_KEY] = user_info", "def __setitem__(self,i,v):\n _items[i] = v", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item", "def __setitem__(self, key, value):\n self.list[key] = value", "def set_card_profile_by_index(self, index, profile):\n self._set_card_profile_by_index = pa_context_success_cb_t(self._context_success_cb)\n pa_context_set_card_profile_by_index(self._context,\n index,\n profile,\n self._set_card_profile_by_index,\n None)", "def setvalue(self, index, value):\n self._checkIndex(index)\n self._items[index].value = value", "def __setitem__(self, index, value):\n self._update_value_at(index, value)", "def item_shared(self, item):\n self.update_item(item)", "def __setitem__(self, name, value):\n if self.count() == 0:\n return\n index = self.currentIndex()\n tab_data = self.tabData(index)\n tab_data[name] = value\n return self.setTabData(index, tab_data)", "def __setitem__(self, key, value):\n Identifier.checkIdentifier(key)\n self.graph.saveExtendedAttributes(self.entityId, {key: value})", "def testSetItem(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g[6] = 7\n\t\tc.replay()\n\t\tx.g[6] = 7\n\t\tc.verify()", "def set_user(self, user):\n self._user = user", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def setItem(self,row,column,value):\n data = self.data\n if row not in data:\n data[row] = {}\n data[row][column] = value\n self.hasChanged = True", "def __setitem__(self, key, item):\n if key>=len(self.trained_rqrmi):\n raise KeyError('Stage index invalid')\n self.trained_rqrmi[key]=item\n self.rqrmi_state_changed=True", "def set_item(self, key, value):\n key, value = str(key), str(value)\n key = self.locate.match_context_key(key)\n replaced = self.selector.get(key, None)\n self.selector[key] = value\n return key, replaced", "def __setitem__(self, index: int, value: object) -> None:\n self.set_at_index(index, value)", "def setContents(self, item):\n if item == None:\n self.pot.a(None, 0)\n else:\n self.pot.a(CraftMagicNumbers.getItem(item.getItemType()), item.getData())\n # PAIL: rename", "def set_value ( self, object, value ):\n object[ self.index ] = value", "def f_set_single(self, name, item):\n\n if self.v_stored:\n self._logger.debug(\n \"You are changing an already stored result. If \"\n \"you not explicitly overwrite the data on disk, this change \"\n \"might be lost and not propagated to disk.\"\n )\n\n if name == PickleResult.PROTOCOL:\n raise AttributeError(\n \"You cannot name an entry `%s`\" % PickleResult.PROTOCOL\n )\n\n self._data[name] = item", "def __setitem__(self, key, val):\n self.set[key] = val", "def profile_data(self, profile_data):\n\n self._profile_data = profile_data", "def SetUserData(self, key, data):\n self._userdata[key] = data", "def save_user_profile(instance, **_):\n instance.profile.save()", "def __setitem__(self, item: str, value: int) -> None:\n self.stoi[item] = value", "async def ws_set_preferred_item(\n self,\n hass: HomeAssistant,\n connection: websocket_api.ActiveConnection,\n msg: dict[str, Any],\n ) -> None:\n try:\n self.storage_collection.async_set_preferred_item(msg[self.item_id_key])\n except ItemNotFound:\n connection.send_error(\n msg[\"id\"], websocket_api.const.ERR_NOT_FOUND, \"unknown item\"\n )\n return\n connection.send_result(msg[\"id\"])", "def setSuit(self, arg):\n self.suit = arg", "def set_name(self, item_name):\r\n self.name = item_name", "def __setitem__(self, idx: int, value: Card):\n self.deck[idx] = value", "def __setitem__(self, key, value):\r\n if key == 'name':\r\n self.name = value\r\n elif key == 'tab_id':\r\n self.tab_id = value\r\n else:\r\n raise KeyError('Key {0} cannot be set in tab {1}'.format(key, self.to_json()))", "def item_starred(self, item):\n self.update_item(item)", "def __setitem__(self, item, value):\n index = self.reindex(item)\n self.parent.__setitem__(index, value)" ]
[ "0.77816904", "0.77816904", "0.7673996", "0.7493998", "0.72386533", "0.68001693", "0.66733706", "0.6579954", "0.6452752", "0.6452752", "0.6386416", "0.62725997", "0.6261532", "0.6182471", "0.616033", "0.6158893", "0.6145247", "0.6110622", "0.60548663", "0.6047665", "0.60314333", "0.60142857", "0.59918827", "0.5946632", "0.59213793", "0.5885403", "0.58426964", "0.5826014", "0.5826014", "0.5826014", "0.5822565", "0.5822565", "0.58218026", "0.5800833", "0.57960826", "0.5763051", "0.57627684", "0.575689", "0.5751306", "0.5734876", "0.5719871", "0.5717504", "0.568859", "0.56722105", "0.5668885", "0.56593746", "0.56562644", "0.5654884", "0.56393474", "0.56393474", "0.56393474", "0.563473", "0.56307065", "0.5619425", "0.5604709", "0.56005675", "0.5597878", "0.5590935", "0.55884844", "0.5578213", "0.5568265", "0.55617744", "0.55601585", "0.55574846", "0.55502653", "0.5549816", "0.55299854", "0.55203706", "0.55192244", "0.55188066", "0.55188066", "0.55167526", "0.55140984", "0.5510231", "0.5508848", "0.55086166", "0.5508471", "0.5507553", "0.5505694", "0.55052626", "0.55043167", "0.54939514", "0.5486575", "0.54836136", "0.54828775", "0.5482873", "0.54804784", "0.5478984", "0.54732186", "0.5468072", "0.5455152", "0.545482", "0.54540205", "0.5450503", "0.545048", "0.54437333", "0.543115", "0.54308796", "0.5425082", "0.5416451" ]
0.5924909
24
Updates the internal profile with the mapping provided.
def update(self, profile: Dict[datetime.time, float]) -> None: if self._profile is None: self._profile = profile else: self._profile.update(profile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr(type(self), 'field_names')\n for key, value in mapping.items():\n if key in field_names:\n setattr(self, key, value)", "def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)", "def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })", "def update(self, profiles, matches):\n raise NotImplementedError()", "def update_profile(self, channels=None): # pragma: no cover\n pass", "def mapping(self, mapping):\n self.set_mapping(mapping)", "def update(self, mapItem: MapItem):\n pass", "def update(\n self,\n mapping: Mapping | Iterable[tuple[str, Any]] | None = None,\n **kwargs: Any,\n ) -> None:\n with self.changed.blocked():\n if mapping:\n items = mapping.items() if isinstance(mapping, Mapping) else mapping\n for key, value in items:\n getattr(self, key).value = value\n for key, value in kwargs.items():\n getattr(self, key).value = value\n self.changed.emit()", "def setMappedInfo(self, mapped_info):\n \n self.mapped_info = mapped_info", "def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )", "def update(self, upddict):\n\t\tfor (key, value) in upddict.iteritems():\n\t\t\tsetattr(self, key, value)", "def profile_data(self, profile_data):\n\n self._profile_data = profile_data", "def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)", "def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)", "def put(self, request, flavor_profile_id):\n update_flavor_profile(request)", "def update(self, key, value):\n if key in self.map:\n self.map[key] = value", "def current_mapping(self, current_mapping):\n self._current_mapping = current_mapping", "def update(self, other_cmap):\r\n if not isinstance(other_cmap, CorrectMap):\r\n raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap)\r\n self.cmap.update(other_cmap.get_dict())\r\n self.set_overall_message(other_cmap.get_overall_message())", "def update(self):\n self.send_tf_msg()\n super(Map).update()", "def update(self, mapper_info: dict):\n self.update_from_dict(\n [\n \"form_id\",\n \"form_name\",\n \"form_revision_number\",\n \"process_key\",\n \"process_name\",\n \"status\",\n \"comments\",\n \"modified_by\",\n ],\n mapper_info,\n )\n self.commit()", "def applyMapping(self):\n pass", "def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True", "def test_update_risk_profile_using_put(self):\n pass", "def update_dict(new,old):", "def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass", "def update(self, obj):\n self.identity_map[obj._instance_key] = obj\n self.register_dirty(obj)", "def set_profile(self, profile: str):\n self._profile = profile", "def recursive_update(\n base_dict: typing.Dict[typing.Any, typing.Any],\n new_dict: typing.Mapping[typing.Any, typing.Any],\n ) -> None:\n for key, value in new_dict.items():\n if isinstance(value, collections.Mapping) and (\n base_dict.get(key) is not None\n ):\n TrainingConfig.recursive_update(base_dict[key], value)\n else:\n base_dict[key] = value", "def update_map(mapping, map_file):\n #Replace commas in mapping string with newlines\n mapping = mapping.replace(',', '\\n')\n\n try:\n with open(map_file, 'w') as f:\n f.write(mapping)\n except IOError as e:\n logging.error(\"Can not write %s\", map_file)\n logging.error(e)", "def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)", "def update(self,\n port_mirroring_profile_id,\n port_mirroring_profile,\n ):\n return self._invoke('update',\n {\n 'port_mirroring_profile_id': port_mirroring_profile_id,\n 'port_mirroring_profile': port_mirroring_profile,\n })", "def update_combo_profile(self):\n self._get_selected_model().metadata[\"profile\"] \\\n = self.combo_profile.currentText().lower()\n return None", "def update(self, **kwargs: Any):\n if not kwargs:\n return False\n for key, value in kwargs.items():\n if key.lower() == _PROFILE.lower():\n self._set_profile(value)\n else:\n try:\n self._config_parser.set(self.profile, key, str(value))\n except NoSectionError:\n # Create and set default profile if it does not exist in .bonsaiconfig\n self._set_profile(self.profile)\n self._config_parser.set(self.profile, key, str(value))\n\n if not self._write_dot_bonsaiconfig():\n return False\n\n self._parse_config(self.profile)\n\n return True", "def update_map(user, main_topic, subtopic, url=None):\n # does the knowledge map exists?\n new = False\n the_map = get_map(user, main_topic)\n\n if the_map is None:\n the_map = KnowledgeMap(main_topic)\n new = True\n\n the_map.update(subtopic, url)\n save_map(user, main_topic, the_map)\n return new", "def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200", "def __profileChanged(self, inst, topic, value):\n\n old, new = value\n\n if new is orthoeditprofile.OrthoEditProfile:\n self.__addEditMenu()\n elif old is orthoeditprofile.OrthoEditProfile:\n self.__removeEditMenu()", "def _1_profile(self, _1_profile):\n\n self.__1_profile = _1_profile", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover", "def setprofile(variable, value, account, pair):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n keys = []\n values = []\n if pair:\n for p in pair:\n key, value = p.split(\"=\")\n keys.append(key)\n values.append(value)\n if variable and value:\n keys.append(variable)\n values.append(value)\n\n profile = Profile(keys, values)\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n\n json_metadata = Profile(acc[\"json_metadata\"] if acc[\"json_metadata\"] else {})\n json_metadata.update(profile)\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)", "def update_segmentation_map(segmap, object_map):\n obj_pix = object_map != 0\n segmap[obj_pix] = object_map[obj_pix]\n return segmap", "def setup(self, profile_map=None):\n if profile_map:\n self.map_profile(profile_map)\n self.op_setup()\n self._setup()", "def update_profile(profile_id):\n \n profile = mongo.db.profiles\n profile.find_one_and_update({'_id': ObjectId(profile_id)},\n {'$set': {'date': datetime.utcnow(),\n 'headline': request.form.get('headline'),\n 'bio': request.form.get('bio'),\n 'xp': request.form.get('xp'),\n 'interests': request.form.get('interests'),\n 'stack': request.form.get('stack'),\n 'languages': request.form.get('languages'),\n 'frameworks': request.form.get('frameworks'),\n 'github': request.form.get('github'),\n 'linkedin': request.form.get('linkedin')\n }\n }\n )\n return redirect(url_for('dashboard'))", "def test_update_hyperflex_node_profile(self):\n pass", "def update_profile(username):\n\n description = request.json.get('description')\n token = request.headers.get('token')\n\n if description is None:\n return jsonify({'message': 'New description not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"You may not edit others profiles\"}), 404\n\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n Profiles[username]['description'] = description\n return Profiles[username]", "def _update(self):\n path = \"/members/%s\" % self._dict['member_id']\n data = self.extract()\n if self._dict['member_status_id'] in (\n MemberStatus.Active, MemberStatus.Error, MemberStatus.OptOut):\n data['status_to'] = self._dict['member_status_id']\n if not self.account.adapter.put(path, data):\n raise ex.MemberUpdateError()", "def updateMap(self) :\n\t\tself.dot.setPos( \\\n\t\t (self.avatarNP.getX()/(self.modelSizeX))*0.79+0.4, 0, \\\n\t\t (self.avatarNP.getY()/(self.modelSizeY))*0.79+0.21)\n\t\tfor id in self.remoteMap:\n\t\t\tself.remoteMap[id][3].setPos( \\\n\t\t\t\t(self.remoteMap[id][0].getX() / \\\n\t\t\t\t\tself.modelSizeX)*0.79+0.4, \\\n\t\t\t\t0, (self.remoteMap[id][0].getY() / \\\n\t\t\t\t\tself.modelSizeY)*0.79+0.21)", "def update_profile(orcid_id, data=None):\n \n u = db.session.query(User).filter_by(orcid_id=orcid_id).first()\n if u:\n u.updated = datetime.utcnow()\n if data:\n u.profile = data\n # save the user\n db.session.begin_nested()\n try:\n db.session.add(u)\n db.session.commit()\n except exc.IntegrityError as e:\n db.session.rollback()\n # per PEP-0249 a transaction is always in progress \n db.session.commit()", "def update_user_profile(user_info):\n user_id = user_info[\"USER_ID\"]\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"username\": user_info[\"username\"],\n \"email\": user_info[\"email\"],\n \"avatar\": user_info[\"avatar\"],\n \"githubURL\": user_info[\"githubURL\"],\n \"linkedinURL\": user_info[\"linkedinURL\"],\n \"stackoverflowURL\": user_info[\"stackoverflowURL\"],\n \"skills\": user_info[\"skills\"],\n }\n },\n upsert=False,\n )", "def mapper_updated(self):\n self.invalidate()\n return", "def update_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass", "def change_map_up(self):\n if self.current_map_idx > 0:\n self.change_map(self.current_map_idx + 1)", "async def test_update(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps({'name': 'new name'}).encode('utf-8')", "def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)", "def update(self, mappings):\n self._mappings.update(mappings)\n return self", "def do_update(self):\n params = self.inputs\n new_profile_id = params.get('new_profile_id', None)\n if new_profile_id and new_profile_id == self.entity.profile_id:\n params.pop('new_profile_id')\n\n if not params:\n return self.RES_OK, 'No property to update.'\n\n res = self.entity.do_update(self.context, params)\n if res:\n return self.RES_OK, 'Node updated successfully.'\n else:\n return self.RES_ERROR, 'Node update failed.'", "def put(mapper_id):\n application_json = request.get_json()\n\n try:\n mapper_schema = FormProcessMapperSchema()\n dict_data = mapper_schema.load(application_json)\n sub = g.token_info.get('preferred_username')\n dict_data['modified_by'] = sub\n\n FormProcessMapperService.update_mapper(mapper_id, dict_data)\n\n return 'Updated successfully', HTTPStatus.OK\n except ValidationError as mapper_err:\n return {'systemErrors': mapper_err.messages}, HTTPStatus.BAD_REQUEST", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()", "def partial_update(self, request, *args, **kwargs):\n profile = self.get_object()\n metadata = profile.metadata or {}\n if request.data.get(\"overwrite\") == \"false\":\n if isinstance(request.data.get(\"metadata\"), str):\n metadata_items = json.loads(request.data.get(\"metadata\")).items()\n else:\n metadata_items = request.data.get(\"metadata\").items()\n\n for key, value in metadata_items:\n if check_if_key_exists(key, metadata):\n metadata = replace_key_value(key, value, metadata)\n else:\n metadata[key] = value\n\n profile.metadata = metadata\n profile.save()\n return Response(data=profile.metadata, status=status.HTTP_200_OK)\n\n return super().partial_update(request, *args, **kwargs)", "def updateMap(self):\n self.clearMap()\n self.neofetchwin, self.neofetch, self.values = self.detect_neofetch()\n self.neofetch_parser(self.values)", "def test_update_payment_profile(self):\n self.cim.update_payment_profile(\n customer_profile_id=u\"122\",\n customer_payment_profile_id=u\"444\",\n card_number=u\"422222222222\",\n expiration_date=u\"2009-10\"\n )", "def put(self, request):\n profile = Profile.get_by_id(request.user.id)\n if not profile:\n return HttpResponse(status=403)\n user = CustomUser.objects.get(id=request.user.id)\n update_data = json.loads(request.body.decode('utf-8'))\n user.update(first_name=update_data.get('first_name'),\n last_name=update_data.get('last_name'))\n profile.update(\n birthday=update_data.get('birthday'),\n gender=update_data.get('gender'),\n hobbies=update_data.get('hobbies'),\n facebook=update_data.get('facebook'))\n data = profile.to_dict()\n return JsonResponse(data, status=200)", "def update_attr_map(self, attr, val):\n lock = threading.Lock()\n lock.acquire()\n self.attr_map[attr] = val\n lock.release()", "def update_attr_map(self, attr, val):\n lock = threading.Lock()\n lock.acquire()\n self.attr_map[attr] = val\n lock.release()", "def update_attributes_map(self, extended_attributes,\n extension_attrs_map=None):\n if not extension_attrs_map:\n return\n\n for resource, attrs in extension_attrs_map.items():\n extended_attrs = extended_attributes.get(resource)\n if extended_attrs:\n attrs.update(extended_attrs)", "def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })", "def update(self,\n ipfix_l2_profile_id,\n i_pfix_l2_profile,\n ):\n return self._invoke('update',\n {\n 'ipfix_l2_profile_id': ipfix_l2_profile_id,\n 'i_pfix_l2_profile': i_pfix_l2_profile,\n })", "def update_my_profile(\n body: Optional[UserProfileUpdate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = UpdateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def include(self, map):\n self.map.update(map)", "def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):\n return self.profile_template.update(body, uri, api, headers)", "def put(self, id ):\n adm = Administration()\n print(api.payload)\n p = Profile.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_profile(p)\n return p, 200\n else:\n return '', 500", "def test_update_hyperflex_cluster_profile(self):\n pass", "def updateLoadInfo(self, loadInfo):\n self._loadInfo = loadInfo", "def updateMap(toname, fromname, map):\n fromobj = map[fromname]\n if isinstance(fromobj, ForcefieldResidue):\n if toname not in map:\n newres = ForcefieldResidue(fromname)\n map[toname] = newres\n for atomname in fromobj.atoms:\n map[toname].atoms[atomname] = fromobj.atoms[atomname]\n elif isinstance(fromobj, ForcefieldAtom):\n map[toname] = fromobj", "def _update_object(self, data_dict):\r\n pass", "def test_map_update_updates(self):\r\n partition = uuid4()\r\n cluster = 1\r\n TestQueryUpdateModel.objects.create(\r\n partition=partition, cluster=cluster,\r\n text_map={\"foo\": '1', \"bar\": '2'})\r\n TestQueryUpdateModel.objects(\r\n partition=partition, cluster=cluster).update(\r\n text_map__update={\"bar\": '3', \"baz\": '4'})\r\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})", "def testUpdate(self):\n gen = self.gen\n prof = self.profile\n\n # Make sure attributes get updated with a change in the calculation\n # points.\n x = arange(0, 9, 0.1)\n prof.setCalculationPoints(x)\n self.assertTrue(gen._value is None)\n val = gen.value\n self.assertTrue(array_equal(x, prof.ycalc))\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n self.assertTrue(array_equal(val, prof.ycalc))\n self.assertTrue(array_equal(gen._value, prof.ycalc))\n\n # Make sure attributes get updated with a new profile.\n x = arange(0, 8, 0.1)\n prof = Profile()\n prof.setCalculationPoints(x)\n gen.setProfile(prof)\n self.assertTrue(gen._value is None)\n val = gen.value\n self.assertTrue(array_equal(x, prof.ycalc))\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n self.assertTrue(array_equal(val, prof.ycalc))\n self.assertTrue(array_equal(gen._value, prof.ycalc))\n return", "def modify_map(self, position):\n old_x, old_y = self.user.position\n news_x, news_y = position\n if self.check_position(position):\n self.object_in_case(news_x, news_y)\n if self.full_map[news_x][news_y] == \"A\":\n if self.user.objects_collect < self.objects_numbers:\n self.user.dead = True\n self.user.end = True\n else:\n self.full_map[old_x][old_y] = \"_\"\n self.full_map[news_x][news_y] = \"M\"\n self.user.position = position", "def visit_dict(self, sydict):\n self.current.update(sydict)", "def update_sp_profile(self,\n settings=None,\n headers=None,\n payload=None,\n active_validation=True,\n **request_parameters):\n check_type(headers, dict)\n check_type(payload, dict)\n if headers is not None:\n if 'X-Auth-Token' in headers:\n check_type(headers.get('X-Auth-Token'),\n basestring, may_be_none=False)\n\n _params = {\n }\n _params.update(request_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n }\n _payload = {\n 'settings':\n settings,\n }\n _payload.update(payload or {})\n _payload = dict_from_items_with_values(_payload)\n if active_validation:\n self._request_validator('jsd_e22c99a82f5764828810acb45e7a9e_v2_2_1')\\\n .validate(_payload)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n e_url = ('/dna/intent/api/v1/service-provider')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n json_data = self._session.put(endpoint_full_url, params=_params,\n json=_payload,\n headers=_headers)\n else:\n json_data = self._session.put(endpoint_full_url, params=_params,\n json=_payload)\n\n return self._object_factory('bpm_e22c99a82f5764828810acb45e7a9e_v2_2_1', json_data)", "def test_map_update_updates(self):\n partition = uuid4()\n cluster = 1\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster,\n text_map={\"foo\": '1', \"bar\": '2'})\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_map__update={\"bar\": '3', \"baz\": '4'})\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n self.assertEqual(obj.text_map, {\"foo\": '1', \"bar\": '3', \"baz\": '4'})", "def profile(self, profile):\n\n self.width = profile['width']\n self.height = profile['height']\n self.crs = profile['crs']\n self.interleave = profile['interleave']\n self.resampling = profile['resampling']", "def update_user_profile(req_data):\n logger.debug(\"entering function update_user_profile\")\n\n update_fields = {}\n for field in req_data:\n update_fields[field] = req_data[field]\n if \"password\" in req_data:\n update_fields[\"password\"] = generate_password_hash(req_data[\"password\"])\n\n find_query = {\"user_id\": current_user.id}\n update_query = {\"$set\": update_fields}\n run_update_one_query(config.USERS_COL, find_query, update_query,\n error=True, error_msg=PROFILE_UPDATE_FAILED_ERR_MSG)\n logger.info(\"Profile update success for %s\", current_user.id)\n\n logger.debug(\"exiting function update_user_profile\")\n return get_success_response(PROFILE_UPDATE_SUCCESS_MSG)", "def update(self, info):\n self.is_active = info.p.active\n self.rev_info = info.rev_info", "def test_update_zr_location_profile(self):\n pass", "def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })", "def patch(self, username, level):\n try:\n UserService.set_user_mapping_level(username, level)\n return {\"Success\": \"Level set\"}, 200\n except UserServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def change_map(self, map_name):\n cmd = '{}changeMap {}'.format(self.console, map_name)\n self.write_command(cmd)", "def update(self, obs, shared):\n self._last_obs[shared['env'].timestamp % IdleTracker._MINIMAP_IDLE_STEPS] = obs\n\n if self._idle_units_map is None:\n self._idle_units_map = np.zeros((shared['minimap'].width(obs), shared['minimap'].height(obs)))\n if self._blacklist_map is None:\n self._blacklist_map = np.zeros((shared['minimap'].width(obs), shared['minimap'].height(obs)))\n\n self._update_idle_units_map(obs, shared)\n self._update_blacklist_map(obs, shared)", "def test_mapfield_update(self):\n\n class Member(EmbeddedDocument):\n gender = StringField()\n age = IntField()\n\n class Club(Document):\n members = MapField(EmbeddedDocumentField(Member))\n\n Club.drop_collection()\n\n club = Club()\n club.members[\"John\"] = Member(gender=\"M\", age=13)\n club.save()\n\n Club.objects().update(set__members={\"John\": Member(gender=\"F\", age=14)})\n\n club = Club.objects().first()\n assert club.members[\"John\"].gender == \"F\"\n assert club.members[\"John\"].age == 14", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')", "def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc", "def add_profile(self, profile):\r\n self.profiles.append(profile)", "def patch(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('patch',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })", "def update_properties(self, update_dict: Dict[str, Any]):\n self.event_properties.update(update_dict)", "def edit_profile():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify that user has logged in and the request is legit\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified != True: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # part2: check json\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_CREATE_USER_NO_JSON)\n if checked_json != True: return response\n # part3: verify json data\n try:\n user_email = login_session[\"login_user_email\"]\n except KeyError:\n # key error means we are offline til this far\n user_email = requested_json[\"email\"]\n # design decision: if there are invalid field names, only update the valid fields.\n # check updates keys and formats\n try:\n update_pairs = convert_to_underscore(requested_json[\"updates\"])\n \n if isinstance(update_pairs,dict) != True:\n response = generate_message(MESSAGE_UPDATE_PROFILE_NON_DICT,400)\n else:\n correct_format,valid_update_pairs, response = process_request_json(User,update_pairs)\n if correct_format == True: \n update_field(User, session, {\"email\": user_email},valid_update_pairs)\n response = generate_message(MESSAGE_UPDATE_PROFILE_SUCCESS,200)\n except KeyError:\n response = generate_message(MESSAGE_UPDATE_PROFILE_NO_ENTRY,400)\n return response", "def _update_bio_collection(self, player_bio_profile):\n self.db_sync(dataset=player_bio_profile, db_name=self.db_name, db_collection=self.player_bio_collection,\n db_operation=self.DB_UPDATE_ONE)", "def users_profile_update(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n token = auth.current_user()[1]\n content = request.form\n password = content[\"password\"] if \"password\" in content else None\n fullname = content[\"fullname\"] if \"fullname\" in content else None\n phone_number = content[\"phone_number\"] if \"phone_number\" in content else None\n photo = Photo.from_bytes(request.files['photo'].stream) if 'photo' in request.files else None\n try:\n self.auth_server.profile_update(email=email_query, user_token=token,\n password=password, fullname=fullname,\n phone_number=phone_number, photo=photo)\n except UnauthorizedUserError:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n except UnexistentUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % email_query)\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % email_query), 404\n return messages.SUCCESS_JSON, 200", "def _mark_study_mapping_update(self, unit_id: str) -> None:\n self._last_study_mapping_update_times[unit_id] = time.monotonic()", "def test_patch_user_identity_mapping(self):\n pass" ]
[ "0.63624704", "0.609009", "0.60749924", "0.5952606", "0.59126085", "0.5860821", "0.5851326", "0.58165675", "0.57689905", "0.5755722", "0.57045287", "0.56417894", "0.5599517", "0.55890405", "0.5579434", "0.55684793", "0.55682325", "0.5558063", "0.55190516", "0.5518738", "0.54925174", "0.5489183", "0.5453527", "0.54371965", "0.5415451", "0.54064655", "0.540205", "0.5388321", "0.53722847", "0.5354438", "0.5343568", "0.53415215", "0.5323078", "0.5319654", "0.53180367", "0.5317012", "0.5310054", "0.5293597", "0.5291977", "0.5291947", "0.5288331", "0.5275547", "0.52611345", "0.5254509", "0.5231084", "0.52176255", "0.52172476", "0.5217118", "0.5213316", "0.5210923", "0.5205952", "0.52049536", "0.520437", "0.5193771", "0.51908284", "0.51907825", "0.51901865", "0.51886153", "0.5179587", "0.51752454", "0.51699674", "0.5169626", "0.5169567", "0.5169567", "0.5169051", "0.51618546", "0.51585144", "0.51564807", "0.5156406", "0.5155162", "0.51447475", "0.5128026", "0.5126243", "0.5123565", "0.5123063", "0.5122471", "0.51133996", "0.5105834", "0.5098118", "0.5094894", "0.50922734", "0.5088384", "0.50605667", "0.5050708", "0.5043662", "0.503937", "0.5038381", "0.5037154", "0.503269", "0.502507", "0.501947", "0.5015899", "0.5014278", "0.50107145", "0.50094104", "0.50062746", "0.50061625", "0.5002488", "0.49985155", "0.4998469" ]
0.7125943
0
The density of air varies as a function of temperature.
def density_of_air(self) -> float: return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def air_density(self):\n return self.flow_field.air_density", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho", "def density(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_p = iceair_h(0,0,1,wair,pres,temp=temp,airf=airf,dhum=dhum)\n dens = h_p**(-1)\n return dens", "def fGasDensity(GasGravity, Temperature, Pressure):\n\tGasConstant = 8.314\n\tPress = Pressure / 145.038 # MPa\n\tTemp = Temperature + 273.16 # Deg K\n\tPr = Press / (4.892 - (0.4048 * GasGravity))\n\tTr = Temp / (94.72 + (170.75 * GasGravity))\n\tA = 0.03 + 0.00527 * ((3.5 - Tr)**3)\n\tB = (0.642 * Tr) - (0.007 * (Tr**4)) - 0.52\n\tC = 0.109 * ((3.85 - Tr)**2)\n\tD = exp(-((0.45 + (8 * ((0.56 - (1 / Tr))**2))) * ((Pr**1.2) / Tr)))\n\tZ = (A * Pr) + B + (C * D)\n\treturn (28.8 * GasGravity * Press) / (Z * GasConstant * Temp)", "def fWaterDensity(Salinity, GasWaterRatio, Temperature, Pressure):\n\tTemp = Temperature\n\tPress = Pressure / 145.038\n\tSal = Salinity / 1000\n\tA = (-80 * Temp) + (-3.3 * (Temp**2)) + (0.00175 * (Temp**3))\n\tB = (489 * Press) + (-2 * Temp * Press) + (0.016 * (Temp**2) * Press)\n\tC = (-0.000013 * (Temp**3) * Press) + (-0.333 * (Press**2)) + (0.002 * Temp * (Press ** 2))\n\tPureWaterDensity = 1 + ((A + B + C) * 1e-6)\n\tA = 80 + (3 * Temp) + (-3300 * Sal) + (-13 * Press) + (47 * Press * Sal)\n\tB = (300 * Press) + (-2400 * Press * Sal)\n\tC = 0.000001 * (B + (Temp * A))\n\tD = 0.668 + (0.44 * Sal)\n\treturn PureWaterDensity + (Sal * (D + C))", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def temperature() -> float:", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def density(self):\n return self.get_density()", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def density_from_pressure(temperature, pressure, RH):\n # R = specific gas constant , J/(kg*degK) = 287.05 for dry air\n Rd = 287.05\n # http://www.baranidesign.com/air-density/air-density.htm\n # http://wahiduddin.net/calc/density_altitude.htm\n # Evaporation into the Atmosphere, Wilfried Brutsaert, p37\n # saturation vapor pressure is a polynomial developed by Herman Wobus\n e_so = 6.1078\n c0 = 0.99999683\n c1 = -0.90826951e-2\n c2 = 0.78736169e-4\n c3 = -0.61117958e-6\n c4 = 0.43884187e-8\n c5 = -0.29883885e-10\n c6 = 0.21874425e-12\n c7 = -0.17892321e-14\n c8 = 0.11112018e-16\n c9 = -0.30994571e-19\n \n p = (c0 + temperature*(\n c1 + temperature*(\n c2 + temperature*(\n c3 + temperature*(\n c4 + temperature*(\n c5 + temperature*(\n c6 + temperature*(\n c7 + temperature*(\n c8 + temperature*(\n c9)))))))))) \n \n sat_vp = e_so / p**8\n Pv = sat_vp * RH\n density = (pressure / (Rd * temperature)) * (1 - (0.378 * Pv / pressure))\n return density", "def getDensityEstimate(self):\n return self.density", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def density(self):\n return self._density", "def __density(self, x):\n\n z = np.power(self.rate, x) / m.factorial(x)\n return z * np.exp(-self.rate)", "def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9", "def density(self, alt):\n (Z, T, CN2, CO2, CO, CAr, CHe, CH, CM, WM) = self.altitude_profile(alt)\n\n # using eqn(42) of COESA for multiple gases\n M_i = [wmN2, wmO2, wmO, wmAr, wmHe, wmH] << (u.g / u.mol)\n n_i = [\n CN2.to_value(u.m**-3),\n CO2.to_value(u.m**-3),\n CO.to_value(u.m**-3),\n CAr.to_value(u.m**-3),\n CHe.to_value(u.m**-3),\n CH.to_value(u.m**-3),\n ] << (1 / u.m**3)\n rho = (n_i @ M_i) / Na\n return rho.to(u.kg / u.m**3)", "def calc_air_density(temperature, pressure, elevation_ref=None, elevation_site=None, lapse_rate=-0.113,\n specific_gas_constant=286.9):\n\n temp = temperature\n temp_kelvin = temp + 273.15 # to convert deg C to Kelvin.\n pressure = pressure * 100 # to convert hPa to Pa\n ref_air_density = pressure / (specific_gas_constant * temp_kelvin)\n\n if elevation_ref is not None and elevation_site is not None:\n site_air_density = round(ref_air_density + (((elevation_site - elevation_ref) / 1000) * lapse_rate), 3)\n return site_air_density\n elif elevation_site is None and elevation_ref is not None:\n raise TypeError('elevation_site should be a number')\n elif elevation_site is not None and elevation_ref is None:\n raise TypeError('elevation_ref should be a number')\n else:\n return ref_air_density", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def getDensity(h, R_w, R_sun): # k is a fitting constant\n\n R = np.sqrt(R_w**2+h**2)\n r = R/R_sun # units need to be in solar radii \n a = 77.1\n b = 31.4\n c = 0.954\n d = 8.30\n e = 0.550\n f = 4.63\n\n return (a*r**(-b) + c*r**(-d) + e*r**(-f))*10**8 #[cm-3]", "def get_flux_density(self):\n if self.no_flux is False:\n return self.snu_at_1GHz\n else:\n return -1", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def get_air_density(self, altitude):\n\n altitude /= 1000 # convert to kilometers\n\n return self.get_air_density_from_model(altitude)", "def Density(self, *args):\n return _gmat_py.AtmosphereModel_Density(self, *args)", "def density(self):\n return self.num_arcs() / (self.nframes / FRATE)", "def density(self):\n return _cantera.reactor_density(self.__reactor_id)", "def Density(material):\n if material == \"mild\":\n return 7850.0\n else:\n if material == \"al\":\n return 2700.0\n else:\n raise ValueError(\"Invalid material `\"+material+\"'\")", "def num_dens(self, temperature):\n return self.pressure/temperature*self.Const_N", "def electron_density(self):\n return N_avo * self.num_electrons * self.density / self.molar_mass", "def get_density(element):\n return pt.elements.isotope(element).density", "def x_density_function(self, x):\n return self.wavefunction(x) * self.wavefunction(x)", "def get_fiber_density_average():\n return Global_Module.global_fiber_density_with_average", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def local_density_mean(self):\n\n # the simulation units are msun / kpc ^3\n local = np.mean(self.dens)\n\n return local", "def get_fiber_density():\n return Global_Module.global_fiber_density", "def current_density(self, xyz_m, xyz_n=None):\n\n j = self.electric_field(xyz_m, xyz_n=xyz_n) / self.rho\n return j", "def get_density(self, asset=None):\n if asset is None or 'pc:density' not in asset.properties:\n return self.item.properties.get('pc:density')\n else:\n return asset.properties.get('pc:density')", "def t_rh_2_dewT(ds, var):\n ds['dew'] = 243.04 * (np.log(ds[var['rh']] / 100) + ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))/\\\n (17.625-np.log(ds[var['rh']] / 100) - ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))\n return ds", "def get_production_factor(self, temp_atmosphere):\n return 1.", "def test_density_multiple(self):\n earth = CoreMantleCrustModel()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def rate_density(x, a):\n return a * x", "def tensor_density(self):\r\n from .converter import Converter\r\n return Converter.convert_density(self)", "def number_density(pressure, temperature, volume_mixing_ratio):\n return pressure*volume_mixing_ratio/(kb*temperature)", "def Ag_density():\n # initialise no infection default for the number of infections required\n agcurves = [np.zeros(cf.endtime + 1) for inf in cf.tinf]\n # for every infection, calculate its individual effect per timepoint\n for i in range(len(cf.tinf)):\n pag = cf.dose[i] # peak\n tai = 0 # tnow after infection\n while pag > 0.01:\n pag = cf.dose[i] * math.exp(-float(tai) / cf.tdecay)\n agcurves[i][cf.tinf[i] + tai] = pag\n tai += 1\n if cf.tinf[i] + tai >= cf.endtime:\n break\n # sum up all effects\n agcurve_uncapped = np.sum(agcurves, axis=0)\n # set all values above 100% to 100%\n agcurve = [np.min([val, 1]) for val in agcurve_uncapped]\n\n return agcurve", "def potdensity(wair,temp,pres,ppot,airf=None,dhum=None,apot=None,\n tpot=None,dhpot=None,chkvals=False,chktol=_CHKTOL,airf0=None,\n dhum0=None,apot0=None,tpot0=None,dhpot0=None,chkbnd=False,\n mathargs=None):\n airf, dhum, apot, tpot, dhpot = eq_pot(wair,temp,pres,ppot,airf=airf,\n dhum=dhum,apot=apot,tpot=tpot,dhpot=dhpot,chkvals=chkvals,chktol=chktol,\n airf0=airf0,dhum0=dhum0,apot0=apot0,tpot0=tpot0,dhpot0=dhpot0,\n chkbnd=chkbnd,mathargs=mathargs)\n hp_p = iceair_h(0,0,1,wair,ppot,temp=tpot,airf=apot,dhum=dhpot)\n dpot = hp_p**(-1)\n return dpot", "def magnetic_flux_density(self, xyz):\n return self.mu * self.magnetic_field(xyz)", "def magnetic_flux_density(self, xyz):\n return self.mu * self.magnetic_field(xyz)", "def density_by_ideal_gas_law(\n p: tf.Tensor,\n r: tf.Tensor,\n t: tf.Tensor,\n ) -> tf.Tensor:\n return p / r / t", "def daily_temp(dft):\n\n tavg = (dft[\"T_Max\"] + dft[\"T_Min\"]) / 20 # tenths of degree C\n return tavg", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def test_density_multiple(self):\n earth = PREM()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def electron_density_per_m3(self):\n return self.electron_density * 1e6", "def population_density(self) -> float:\n return self.population / self.area", "def h_func(air_density, dt, rah):\n h = np.array(air_density, copy=True, ndmin=1)\n h *= 1004.\n h *= dt\n h /= rah\n return h", "def get_specific_heat() -> float:\n return 1006.0", "def number_density(self) -> u.m**-3:\n return self._number_density", "def fOilDensity(APIGravity, GasOilRatioOFU, GasGravity, Temperature, Pressure):\t\n\tT = Temperature\n\tP = Pressure / 145.038 # converts psia to MPa.\n\tGasOilRatio=GasOilRatioOFU*(28.3168/158.9873) # Converts scf/bbl to l/l\n\n\t# A reference density that can be used to characterize an oil Rho_0 is measured\n\t# at 15.6 degC and atmospheric pressure.\n\tRho_0 = 141.5 / (APIGravity + 131.5)\n\n\t# B_0 is a volume factor derived by Standing (1962)\n\tB_0 = 0.972 + 0.00038 * ((2.4 * GasOilRatio * ((GasGravity/Rho_0)**0.5) + T + 1.78)**1.175)\n\n\t# True densities of live oils are also calculated using B_0, but\n\t# the mass of dissolved gas must be included.\n\tRho_G = (Rho_0 + 0.0012*GasGravity*GasOilRatio) / B_0\n\n\t# The pressure dependence is comparatively small and the published data for density at\n\t# pressure pp can be described by the polynomial\n\tRho_GP = Rho_G + (0.00277*P - 1.71e-7*(P**3)) * ((Rho_G - 1.15)**2) + (3.49e-4*P)\n\n\t# The effect of temperature is larger, and one of the most\n\t# common expressions used to calculate the in-situ density\n\t# was developed by Dodson and Standing (1945).\n\t# Rho_T = Rho_P / (0.972 + 0.000381 * ((T + 17.78) ** 1.175))\n\t# This is accounted for in the B_0 and Rho_G terms which collapse when GasOilRation = 0\n\n\treturn Rho_GP", "def dx(self):\n values = self._interpolate_table(\"dx\")\n return values", "def calculate_dew_point(temp, hum):\n return temp - (100 - hum) / 5", "def PowerSpectralDensity(f):\n sky_averaging_constant = (20/3) # Sky Averaged <--- I got this from Jonathan's notes but I need\n # to check where he got it...\n L = 2.5*10**9 # Length of LISA arm\n f0 = 19.09*10**-3 \n\n Poms = ((1.5*10**-11)**2)*(1 + ((2*10**-3)/f)**4) # Optical Metrology Sensor\n Pacc = (3*10**-15)**2*(1 + (4*10**-3/(10*f))**2)*(1 + (f/(8*10**-3))**4) # Acceleration Noise\n Sc = 9*10**(-45)*f**(-7/3)*np.exp(-f**0.171 + 292*f*np.sin(1020*f)) * (1 \\\n + np.tanh(1680*(0.00215 - f))) \n\n PSD = (sky_averaging_constant)* ((10/(3*L**2))*(Poms + (4*Pacc)/((2*np.pi*f))**4)*(1 + 0.6*(f/f0)**2) + Sc) # PSD\n return PSD", "def gas_zfactor(T_pr, P_pr):\n # T_pr : calculated pseudoreduced temperature\n # P_pr : calculated pseudoreduced pressure \n from scipy.optimize import fsolve # non-linear solver\n import numpy as np\n\n a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475\n a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210\n\n def f(y):\n rho_pr, z = y\n c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))\n c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))\n c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))\n c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))\n\n f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1\n f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))\n return[f1, f2]\n\n solve = fsolve(f, [1, 1]) # initial guess\n return(solve[0], solve[1]) # result is density, z-factor", "def testDensityCalculation(self):\n known_densities = np.array([1.76776695e-01, 1.76776695e-01, 1.76776695e-01,\n 4.59619433e-01, 4.59619433e-01, 1.76776695e-01, 5.00000000e-01, \n 8.84538011e-02, 3.40206909e-02, 2.26040275e-04])\n densities = nb._get_local_densities() \n np.testing.assert_allclose(densities, known_densities)", "def density(x, kind=\"geopotential\"):\n\n rho = table(x, kind)[3]\n return rho", "def density(self, psi):\n return np.square(np.abs(psi))", "def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def debye_length_m(electron_density, electron_temperature):\n return 0.069 * np.sqrt(electron_temperature / electron_density)", "def compute_dewpoint(temperature, humidity):\n\n temp_C = (temperature - 32) * 5 / 9 # Convert temperature from deg F to deg C\n rh = humidity / 100\n\n b = 18.678\n c = 257.14 # deg C\n\n gamma = math.log(rh) + (b * temp_C) / (c + temp_C)\n tdp = c * gamma / (b -gamma)\n\n tdp_F = 9 / 5 * tdp + 32 # Convert temperature from deg C to deg F\n return tdp_F;", "def density(R, Rs, rho0, gamma_inner, gamma_outer):\n x = R/Rs\n outer_slope = (gamma_outer-gamma_inner)/2\n return rho0 / (x**gamma_inner * (1 + x ** 2) ** outer_slope)", "def get_energy_density(self, obj):\n row = CreditCalculationService.get(\n category_id=obj.energy_density_category_id,\n effective_date=self.effective_date,\n model_name=\"EnergyDensity\"\n )\n\n return row.density if row else None", "def density(self):\n return self.nnz/self.dim", "def density(self) -> float:\n if self.is_directed():\n factor = 1\n else:\n factor = 2\n\n num_e = self._Impl.number_of_edges(directed_edges=True)\n num_v = self._Impl.number_of_vertices()\n\n density = (factor * num_e) / (num_v * (num_v - 1))\n return density", "def update_density(\n self,\n states: FlowFieldMap,\n additional_states: FlowFieldMap,\n ) -> FlowFieldVal:\n zz = additional_states.get('zz', [tf.constant(0, dtype=TF_DTYPE)] *\n self._params.nz)\n\n if 'T' in states:\n t = states['T']\n elif 'theta' in states:\n t = self._potential_temperature_to_temperature(states['theta'], zz)\n else:\n raise ValueError(\n 'Either temperature or potential temperature is required for the '\n 'ideal gas law.'\n )\n\n scalars = {\n sc_name: thermodynamics_utils.regularize_scalar_bound(states[sc_name])\n for sc_name in self._molecular_weights.keys()\n if sc_name != INERT_SPECIES\n }\n\n if scalars:\n scalars.update({\n INERT_SPECIES:\n thermodynamics_utils.compute_ambient_air_fraction(scalars)\n })\n sc_reg = thermodynamics_utils.regularize_scalar_sum(scalars)\n else:\n sc_reg = {\n INERT_SPECIES: [\n tf.ones_like(sc_i, dtype=TF_DTYPE)\n for sc_i in list(states.values())[0]\n ]\n }\n\n mixture_molecular_weight = (\n thermodynamics_utils.compute_mixture_molecular_weight(\n self._molecular_weights, sc_reg))\n\n return [\n self.density_by_ideal_gas_law(p_i, R_U / w_mix_i, t_i)\n for p_i, w_mix_i, t_i in zip(\n self.p_ref(zz, additional_states), mixture_molecular_weight, t)\n ]", "def get_production_factor(self, temp_atmosphere):\n a1 = self.damages_terms[0]\n a2 = self.damages_terms[1]\n a3 = self.damages_terms[2]\n pf = self.params.prod_frac\n return ne.evaluate('1 - pf * (1 - 1 / (1 + a1 * temp_atmosphere + a2 * temp_atmosphere ** a3))')", "def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT", "def _ion_densities(self):\n nD = self.ne_in*(6-self.zeff_in)/(5.)\n nC = self.ne_in*(self.zeff_in-1)/(30.)\n nC[np.where(nC<0)]=0.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def calculate_density(composition):\n density = 0.0\n\n for z, fraction in composition.items():\n density += fraction / ep.mass_density_kg_m3(z)\n\n return 1.0 / density", "def ambient_temperature(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"F\":\r\n return self.ambient_temperature_f\r\n elif self.temperature_scale == \"C\":\r\n return self.ambient_temperature_c\r\n else:\r\n return self._ambient_temperature", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def specificHeatCapacity(d, d_iso, density, cp):\n d_t = min(0.5 * np.sum(d), d_iso , 0.1)\n sum_d_i = d[0]\n i = 0 \n kappa = 0 \n while sum_d_i <= d_t:\n kappa += d[i] * density[i] * cp[i]\n i += 1\n sum_d_i += d[i]\n else:\n sum_d_i -= d[i]\n d_part = d_t - sum_d_i \n kappa += d_part * density[i] * cp[i]\n\n return kappa", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def density(self):\n self.convert_window(\"Density\", \"kilograms/liter\", [\"grains/gallon(UK)\", \"grains/gallon(US)\", \"grams/cubic centimeters\", \"grams/liter\", \"grams/millimeters\", \"kilograms/cubic meters\", \"kilograms/liter\", \"megagrams/cubic meter\", \"milligrams/liter\", \"milligrams/millimeters\", \"ounces/cubic inch\", \"ounces/gallon(UK)\", \"ounces/gallon(US)\", \"pounds/cubic foot\", \"pounds/cubic inch\", \"pounds/gallon(UK)\", \"pounds/gallon(US)\", \"slugs/cubic foot\", \"tonnes/cubic meter\", \"tons(UK)/cubic yard\", \"tons(US)/cubic yard\"])", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def calc_density(self, density_standard=None):\r\n\r\n if density_standard is not None:\r\n return density.ansi_density(self, density_standard)\r\n else:\r\n return density.auto_density(self)", "def air_dps(self) -> Union[int, float]:\n return self.air_weapon and (self.air_weapon.damage * self.air_weapon.attacks) / self.air_weapon.speed", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)" ]
[ "0.7871237", "0.77620345", "0.73548687", "0.7349471", "0.6937895", "0.6883913", "0.6820493", "0.6713705", "0.6696069", "0.66546506", "0.6654112", "0.6648508", "0.6641304", "0.6609871", "0.65910405", "0.6577765", "0.6577765", "0.6577765", "0.6552875", "0.654293", "0.65178937", "0.65178937", "0.6496522", "0.6484519", "0.64358747", "0.6431408", "0.6410963", "0.638376", "0.634123", "0.62969977", "0.62762845", "0.626123", "0.6247469", "0.6226004", "0.6225423", "0.621847", "0.61842096", "0.61727744", "0.6153972", "0.61362576", "0.6107171", "0.6095128", "0.60777414", "0.60749173", "0.60644174", "0.6051623", "0.6028602", "0.6018837", "0.60187936", "0.60048187", "0.5983411", "0.59827834", "0.59796697", "0.5978204", "0.5971733", "0.59459233", "0.5944215", "0.5944215", "0.5937146", "0.5935657", "0.59354603", "0.59293073", "0.5927464", "0.5923077", "0.5913391", "0.5911639", "0.59093046", "0.59072566", "0.5899808", "0.5891819", "0.58873737", "0.58869946", "0.58840734", "0.58800787", "0.58715373", "0.5857371", "0.58556217", "0.58471674", "0.5844613", "0.5828698", "0.5820803", "0.58207935", "0.5815455", "0.5798204", "0.57890844", "0.57815456", "0.57582355", "0.5740868", "0.5726576", "0.570988", "0.5686071", "0.5679464", "0.5672203", "0.56672615", "0.56620544", "0.5633769", "0.5621237", "0.5609811", "0.560808", "0.5601864" ]
0.7765341
1
The dynamic viscosity of air varies as a function of temperature.
def dynamic_viscosity_of_air(self) -> float: return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / ( self.ambient_temperature + 110.4 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kinematic_viscosity_of_air(self) -> float:\n\n return self.dynamic_viscosity_of_air / self.density_of_air", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def air_density(self):\n return self.flow_field.air_density", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def variable_vis(self):\n return self._variable_vis", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def liquid_viscosity(id, temperature=298.15, pressure=constants.atm): # noqa: A002\n return rx._misc._get_chemical(id, temperature, pressure).mul # noqa: SLF001", "def viscosity(altitude):\n t_ref = temperature(0) # R\n t = temperature(altitude) # R\n s = 198.72 # R\n mu_ref = 3.737 * 10 ** (-7) # [slug/(ft*s)]\n mu = mu_ref*((t/t_ref)**(3/2))*(t_ref + s)/(t + s) # [slug/(ft*s)]\n return mu", "def calculate_visibility(qv,qc,qr,qi,qs,T,p):\n Rd = 287.\n COEFLC = 144.7\n COEFLP = 2.24\n COEFFC = 327.8\n COEFFP = 10.36\n EXPLC = 0.88\n EXPLP = 0.75\n EXPFC = 1.\n EXPFP = 0.7776\n\n Tv = T * (1+0.61*qv) # Virtual temperature\n\n rhoa = p/(Rd*Tv) # Air density [kg m^-3]\n rhow = 1e3 # Water density [kg m^-3]\n rhoi = 0.917e3 # Ice density [kg m^-3]\n\n vovmd = (1+qv)/rhoa + (qc+qr)/rhow + (qi+qs)/rhoi\n\n conc_lc = 1e3*qc/vovmd\n conc_lp = 1e3*qr/vovmd\n conc_fc = 1e3*qi/vovmd\n conc_fp = 1e3*qs/vovmd\n\n # Make sure all concentrations are positive\n conc_lc[conc_lc < 0] = 0\n conc_lp[conc_lp < 0] = 0\n conc_fc[conc_fc < 0] = 0\n conc_fp[conc_fp < 0] = 0\n\n betav = COEFFC*conc_fc**EXPFC\\\n + COEFFP*conc_fp**EXPFP\\\n + COEFLC*conc_lc**EXPLC\\\n + COEFLP*conc_lp**EXPLP+1E-10\n\n vis = -np.log(0.02)/betav # Visibility [km]\n vis[vis > 24.135] = 24.135\n\n return vis", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def temperature() -> float:", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)", "def _CloudVar(self): \n # q is MA order of ARMA(1,q)\n q = int(round(self.lambda_avg/self.lambda_s))\n a = exp(-self.lambda_s / self.lambda_p) \n (var, var_ratio) = self._ARMAvar(q, a)\n # This variance is a multiple of the variance of the noise driving the\n # AR(1) model. This variance, in turn, is a multiple of the underlying\n # measurement variance, with the relationship given in Gillespie 96\n var = var * (1. - exp(-2*self.lambda_s / self.lambda_p))/2\n # print q, a\n return var", "def is_artificial(self):\n\t\treturn 0", "def get_specific_heat() -> float:\n return 1006.0", "def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def get_production_factor(self, temp_atmosphere):\n return 1.", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity", "def simulated_reflectivity(pressure, temperature, vapor_mixing_ratio, liquid_mixing_ratio, snow_mixing_ratio=None,\n graupel_mixing_ratio=None, use_varint=False, use_liqskin=False):\n # Set values for constants with variable intercept\n R1 = 1e-15\n RON = 8e6\n RON2 = 1e10\n SON = 2e7\n GON = 5e7\n RON_MIN = 8e6\n RON_QR0 = 0.00010\n RON_DELQR0 = 0.25*RON_QR0\n RON_CONST1R = (RON2-RON_MIN)*0.5\n RON_CONST2R = (RON2+RON_MIN)*0.5\n\n # set constant intercepts\n rno_l = 8e6\n rno_s = 2e7\n rno_g = 4e6\n\n qvapor = da.clip(vapor_mixing_ratio, 0., None)\n qliquid = da.clip(liquid_mixing_ratio, 0., None)\n\n # If qgraupel but not qsnow, set qgraupel = qsnow\n if snow_mixing_ratio is None:\n if graupel_mixing_ratio is None:\n qsnow = da.zeros_like(qliquid)\n qgraupel = da.zeros_like(qliquid)\n else:\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n qsnow = da.zeros_like(graupel_mixing_ratio)\n qsnow[temperature <= 273.15] = qgraupel[temperature <= 273.15]\n else:\n qsnow = da.clip(snow_mixing_ratio, 0., None)\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n\n # density for liquid, snow, and graupel (kg m-3)\n rho_l = 1000. # liquid\n rho_i = 100. # snow\n rho_g = 400. # graupel\n\n # constant evaluation of gamma distribution\n gamma = 720.\n\n # Alpha constant\n alpha = 0.224\n\n # constant multiplication factors\n factor_l = gamma * 1e18 * (1./(np.pi*rho_l))**1.75\n s = gamma * 1e18 * (1./(np.pi*rho_i))**1.75 * (rho_i/rho_l)**2 * alpha\n g = gamma * 1e18 * (1./(np.pi*rho_g))**1.75 * (rho_g/rho_l)**2 * alpha\n\n # calculate virtual temperature\n virtual_t = virtual_temperature(temperature, qvapor)\n\n # dry gas constant\n Rd = 287.\n rho_air = pressure/(Rd*virtual_t)\n\n # adjust for brightband if use_liqskin=True\n if use_liqskin:\n raise NotImplementedError('Liquid skin correction not implemented')\n # factor_s = da.full_like(temperature, s)\n # factor_g = da.full_like(temperature, g)\n # try:\n # factor_s[temperature >= 273.15] = factor_s[temperature >= 273.15] / da.array([alpha])\n # factor_g[temperature >= 273.15] = factor_g[temperature >= 273.15] / da.array([alpha])\n # except ValueError:\n # factor_s = s\n # factor_g = g\n else:\n factor_s = s\n factor_g = g\n\n # calculate variable intercept if use_varint=True\n if use_varint:\n raise NotImplementedError('Variable intercepts not yet implemented')\n # temp_c = da.clip(temperature-273.15, temperature.min(), -0.001)\n # sonv = MIN(2.0D8, 2.0D6*EXP(-0.12D0*temp_c))\n #\n # gonv = gon\n # IF (qgr(i,j,k) .GT. R1) THEN\n # gonv = 2.38D0 * (PI*RHO_G/(rhoair*qgr(i,j,k)))**0.92D0\n # gonv = MAX(1.D4, MIN(gonv,GON))\n # END IF\n #\n # ronv = RON2\n # IF (qra(i,j,k) .GT. R1) THEN\n # ronv = RON_CONST1R*TANH((RON_QR0 - qra(i,j,k))/RON_DELQR0) + RON_CONST2R\n # END IF\n else:\n ronv = rno_l\n sonv = rno_s\n gonv = rno_g\n\n # Total equivalent reflectivity factor (z_e, in mm^6 m^-3) is\n # the sum of z_e for each hydrometeor species:\n z_e = (((factor_l*(rho_air*qliquid)**1.75)/(ronv**.75)) +\n ((factor_s*(rho_air*qsnow)**1.75)/(sonv**.75)) +\n ((factor_g*(rho_air*qgraupel)**1.75)/(gonv**.75)))\n\n # Adjust small values of Z_e so that dBZ is no lower than -30\n z_e = da.clip(z_e, .001, None)\n\n # Convert to dBZ\n dbz = 10.*da.log10(z_e)\n return dbz", "def atmosphereVariation(img, header, chanInfo, airmass=1.5, pwv=-1, removeSlope=True):\n freqs, values = CalcAtmTransmissionForImage(img, header, chanInfo, airmass=airmass, pwv=pwv, value='transmission')\n if removeSlope:\n slope, intercept = linfit(freqs, values, values*0.001)\n casalogPost(\"Computed atmospheric variation and determined slope: %f per GHz (%.0f,%.2f)\" % (slope,freqs[0],values[0]))\n values = values - (freqs*slope + intercept) + np.mean(values)\n maxMinusMin = np.max(values)-np.min(values)\n percentage = maxMinusMin/np.mean(values)\n freqs, values = CalcAtmTransmissionForImage(img, header, chanInfo, airmass=airmass, pwv=pwv, value='tsky')\n if removeSlope:\n slope, intercept = linfit(freqs, values, values*0.001)\n values = values - (freqs*slope + intercept) + np.mean(values)\n TmaxMinusMin = np.max(values)-np.min(values)\n Tpercentage = TmaxMinusMin*100/np.mean(values)\n stdValues = np.std(values)\n return(maxMinusMin, percentage, TmaxMinusMin, Tpercentage, stdValues)", "def visc(s, t, p):\n s, t, p = map(np.asanyarray, (s, t, p))\n return (1e-4 * (17.91 - 0.5381 * t + 0.00694 * t ** 2 + 0.02305 * s) /\n sw.dens(s, t, p))", "def intensity(self) -> int:", "def test_virtual_temperature():\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n tv = virtual_temperature(t, qv)\n assert_almost_equal(tv, 288.2796 * units.kelvin, 3)", "def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT", "def velocity(n_core, q, beta_invariant, material_dispersion=None):\n c = scipy.constants.speed_of_light\n if material_dispersion is None:\n A = 2 / c / (2 + q)\n B = q * n_core**2 / c / (2 + q)\n else:\n N1 = n_core + material_dispersion\n y = 2 * n_core / N1\n A = 2 * N1 / n_core * (1 + 0.25 * y) / c / (q + 2)\n B = q * n_core**2 * A - 1 / 4 / c * N1 * n_core * y\n\n return A * beta_invariant + B / beta_invariant", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def vorticity(self):\n \n ux,_ = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n _,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n # self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y'])\n self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y','t'])\n \n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append('1/dt')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = ('1/dt')\n\n\n return self._obj", "def acc_visc(j,rA,vA,mA,rhoA,PA,hA,dW=kernel.dW_M4):\n assert rA.shape[0] == vA.shape[0] == mA.shape[0] == rhoA.shape[0] == hA.shape[0], \"arrays are not matched\"\n N = len(mA)\n c_j = c_gas(j,rhoA,PA)\n\n tot = 0\n for i in range(N):\n if i != j:\n\n r_ij = rA[j,:] - rA[i,:]\n r_ij1 = np.linalg.norm(r_ij)\n v_ij = vA[j,:] - vA[i,:]\n m_i = mA[i]\n c_i = c_gas(i,rhoA,PA)\n c_ij = 0.5 * (c_i + c_j)\n h_ij = 0.5 * (hA[i] + hA[j])\n rho_ij = 0.5 * (rhoA[i] + rhoA[j])\n\n c = np.dot(v_ij,r_ij)\n mu_ij = ( c * h_ij ) / ( r_ij1**2 + 0.01*h_ij**2 )\n\n a = ( -alpha * mu_ij * c_ij + beta * mu_ij**2 ) / rho_ij\n b = 0\n Pi_ij = a*dm.heavi(-c) + b*dm.heavi(c)\n\n # if Pi_ij == 0:\n # print(\"i,j:\",i,j)\n # print(\"c:\",c)\n # print(\"c_ij\",c_ij)\n # print(\"\")\n # assert Pi_ij != 0\n\n tot += m_i * h_ij**(-4) * Pi_ij * dW(r_ij1,h_ij) * (r_ij/r_ij1)\n\n return - tot", "def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho", "def _get_scalar_bar_visibility(self) :\n \n return self._scalar_bar_visibility", "def fGasAcousticVelocity(GasGravity, Temperature, Pressure):\n\tGasBulkModulus = fGasBulkModulus(GasGravity, Temperature, Pressure) # Pascals\n\tGasDensity = fGasDensity(GasGravity, Temperature, Pressure) * 1000 # Kg\n\treturn (GasBulkModulus / GasDensity)**0.5 # m/s", "def molar_mass_dry_air():\n return 28.9647", "def vaporPressure(temp: float) -> float:\n exponent = (17.27*temp)/(temp + 237.3)\n vp = 611*np.exp(exponent)\n\n return vp", "def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2", "def max_angular_acceleration():", "def neutrino_thermal_velocity(self, z):\n fac = 5./3.*5.*ss.zeta(5.)/ss.zeta(3.)\n vel = np.zeros(self.N_nu)\n vel = fac**.5*(const.kB*self.T_nu/self.M_nu)*(1.+z)*const.c\n return vel", "def test_virtual_potential_temperature():\n p = 999. * units.mbar\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n theta_v = virtual_potential_temperature(p, t, qv)\n assert_almost_equal(theta_v, 288.3620 * units.kelvin, 3)", "def get_dispersion_virial(self):\n if self._dispersion_virial is None:\n self._dispersion_virial = self._get_potential(self._system._dispersion)\n return self._dispersion_virial", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def determine_analytic_solution(self):\n\n self._Janalytic = np.where(self.xr <= self.xint, self.S, 0.5 * self.S)\n self._Hanalytic = np.where(self.xr <= self.xint, 0, 0.25 * self.S)\n self._Kanalytic = np.where(self.xr <= self.xint, 1./3. * self.S,\n 1./6. * self.S)", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j", "def RemoteCavity(T_in, p_in, m_dot, d_inner, l_pipe, Q_ex, N):\r\n\r\n ## Estimation of the influence of the arcs\r\n # Amount of 180° arcs: 5\r\n # Resistance coefficient for the 180° arc equal to 2*90° arc value according to VDI Heatatlas!\r\n f_arc = 2 * 1.3\r\n # Calculation according to VDI Heatatlas 2013\r\n # Assumption isoenthalpic flow\r\n state_Arc = FlowRestriction(T_in, p_in, m_dot, d_inner, 5*f_arc)\r\n p_Arc = state_Arc.get(\"p\")\r\n T_Arc = state_Arc.get(\"T\")\r\n\r\n ## Estimation of the external heat load on a compressible flow\r\n # Preparation of the variables to use the SimplePipe function\r\n # Heat transfer area of one pipe. Attention: d_inner is used!\r\n A_pipe = np.pi * d_inner * l_pipe #m²\r\n # Specific external heat load\r\n q_pipe = Q_ex/A_pipe #W/m²\r\n\r\n # Calling of the function SimplePipe\r\n state_out = SimplePipe(T_Arc, p_Arc, m_dot, d_inner, l_pipe, N, 0, q_pipe)\r\n #Transfer results\r\n p_out = state_out.get(\"p\")\r\n T_out = state_out.get(\"T\")\r\n h_out = state_out.get(\"h\")\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n\r\n return state_out", "def constant_temp(self, numIterations):\n return 1 + self.alpha", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def func_d23_318(n, series):\n if series == \"3D3\":\n try: \n return np.sqrt((3*os_3D3[str(n)]*wl_3D3[str(n)]*1e-9*hbar*e**2)/(4*np.pi*m_e*c))\n except:\n return 0", "def get_D_C3H8_air(self, T):\n\n self.set_TempPres_dependents(T)\n\n self.D_C3H8_air = (\n 2. / 3. * np.sqrt(const.k_B * T / np.pi * 0.5 * (1. /\n self.air.m + 1. / self.fuel.m)) / (np.pi * (0.5 *\n (self.air.d + self.fuel.d)) ** 2.) / self.air.n\n )\n\n return self.D_C3H8_air", "def sea_still_water_pressure(z, t1, rho=1.025, g=9.81):\r\n\r\n if z <= t1:\r\n return rho * g * (t1 - z)\r\n else:\r\n return 0", "def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )", "def vis(self):\n \treturn self._vis", "def aliveness(self, physics):\n return 0.", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def get_vac_lines(self):\n\n b_sig = np.where(self.AirglowLines['obs_eint'] > 5)\n bVL = self.air_to_vac(self.AirglowLines['obs_wave'])\n bVL = bVL[b_sig] #nm to A\n self.BlueVacLines = bVL[bVL < 700]\n\n r_sig = np.where(self.AirglowLines['obs_eint'] > 5)\n rVL = self.air_to_vac(self.AirglowLines['obs_wave'])\n rVL = rVL[r_sig] #nm to A\n self.RedVacLines = rVL[rVL > 560]", "def __getitem__(self, i):\n T0, S0 = get_surface_ts(self.nc, i)\n \n # average the variables if we got multiple time elements\n if isinstance(i, slice):\n T0, S0, = T0.mean(axis=0), S0.mean(axis=0)\n if self.p == 0.:\n rho, drhodT, drhodS = jmd95.eos.state_surface(T0, S0)\n else:\n rho, drhodT, drhodS = jmd95.eos.state(self.p, T0, S0)\n return rho", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)", "def test_static_stability_adiabatic():\n pressures = [1000., 900., 800., 700., 600., 500.] * units.hPa\n temperature_start = 20 * units.degC\n temperatures = dry_lapse(pressures, temperature_start)\n sigma = static_stability(pressures, temperatures)\n truth = np.zeros_like(pressures) * units('J kg^-1 hPa^-2')\n # Should be zero with a dry adiabatic profile\n assert_almost_equal(sigma, truth, 6)", "def test_vertical_velocity_pressure_dry_air():\n w = 1 * units('cm/s')\n omega_truth = -1.25073619 * units('microbar/second')\n omega_test = vertical_velocity_pressure(w, 1000. * units.mbar, 273.15 * units.K)\n assert_almost_equal(omega_test, omega_truth, 6)", "def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y", "def dvdt(self, args: Dict) -> float:\n if self.channel_bool['leak']:\n i_leak: float = self.leak.i(args['v'])\n else:\n i_leak: float = 0.\n \n if self.channel_bool['nav']:\n i_nav: float = self.nav.i(args['v'], h=args['h_nav'])\n else:\n i_nav: float = 0.\n\n if self.channel_bool['kvhh']:\n i_kvhh: float = self.kvhh.i(args['v'], n=args['n_kvhh'])\n else:\n i_kvhh: float = 0.\n\n if self.channel_bool['kva']:\n i_kva: float = self.kva.i(args['v'], h=args['h_kva'])\n else:\n i_kva: float = 0.\n\n if self.channel_bool['kvsi']:\n i_kvsi: float = self.kvsi.i(args['v'], m=args['m_kvsi'])\n else:\n i_kvsi: float = 0.\n\n if self.channel_bool['cav']:\n i_cav: float = self.cav.i(args['v'])\n else:\n i_cav: float = 0.\n\n if self.channel_bool['kca']:\n i_kca: float = self.kca.i(args['v'], ca=args['ca'])\n else:\n i_kca: float = 0.\n \n if self.channel_bool['nap']:\n i_nap: float = self.nap.i(args['v'])\n else:\n i_nap: float = 0.\n\n if self.channel_bool['kir']:\n i_kir: float = self.kir.i(args['v'])\n else:\n i_kir: float = 0.\n\n if self.channel_bool['ampar']:\n i_ampar: float = self.ampar.i(args['v'], s=args['s_ampar'])\n else:\n i_ampar: float = 0.\n\n if self.channel_bool['nmdar']:\n i_nmdar: float = self.nmdar.i(args['v'], s=args['s_nmdar'])\n else:\n i_nmdar: float = 0.\n\n if self.channel_bool['gabar']:\n i_gabar: float = self.gabar.i(args['v'], s=args['s_gabar'])\n else:\n i_gabar: float = 0.\n\n return ((-10.0*self.params.area \n * (i_leak\n + i_nav \n + i_kvhh \n + i_kva \n + i_kvsi \n + i_cav \n + i_kca \n + i_nap \n + i_kir) \n - (i_ampar \n + i_nmdar \n + i_gabar))\n / (10.0*self.params.cm*self.params.area))", "def temphum_plot(self, kwargs=None):\n\n def valuechange():\n \"\"\"This is the function which is called, when a value is changed in the spin boxes\"\"\"\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )\n\n def dry_air_action():\n if dry_air_btn.isChecked():\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"ON\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. on\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = True\n\n else:\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"OFF\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = False\n\n def light_action():\n \"\"\"This function is debricated\"\"\"\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False\n\n def check_light_state():\n if (\n self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights on\"\n ): # Checks if the lights are on and the button is off\n light_btn.setText(\"Lights on\")\n light_btn.setStyleSheet(\"background : rgb(0,255,0); border-radius: 5px\")\n elif (\n not self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights off\"\n ):\n light_btn.setText(\"Lights off\")\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n def config_plot(plot, plot2, pg):\n plot = plot.plotItem\n plot.setLabel(\"right\", \"humidity\", units=\"%\")\n plot.setLabel(\"bottom\", \"time\")\n plot.setLabel(\"left\", \"temperature\", units=\"Celsius\")\n plot.getAxis(\"left\").setPen(pg.mkPen(color=\"#c4380d\", width=3))\n plot.getAxis(\"right\").setPen(pg.mkPen(color=\"#025b94\", width=3))\n plot.showAxis(\"top\", show=True)\n plot.getAxis(\"top\").setTicks([])\n plot.getAxis(\"bottom\").setScale(1e-9)\n # plot.setRange(yRange=[15, 35])\n\n # For second plot\n plot.scene().addItem(\n plot2\n ) # inserts the second plot into the scene of the first\n plot2.setGeometry(plot.vb.sceneBoundingRect())\n plot.getAxis(\"right\").linkToView(\n plot2\n ) # links the second y axis to the second plot\n plot2.setXLink(plot) # sync the x axis of both plots\n # plot2.setRange(yRange=[0, 50])\n\n def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n \"\"\"This function cuts an array to a maximum time difference\n This function is supposed to be used only for temp and humidity shaped arrays\n \"\"\"\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass\n\n def update_temphum_plots(kwargs=None):\n # for rooms in self.rooms:\n if self.variables.default_values_dict[\"settings\"][\"new_data\"]:\n temphum_plot.clear() # clears the plot and prevents a memory leak\n hum_plot_obj.clear()\n p1 = temphum_plot.plotItem\n\n ax = p1.getAxis(\"bottom\") # This is the trick\n __cut_arrays(\n self.variables.meas_data,\n float(\n self.variables.default_values_dict[\"settings\"].get(\n \"temp_history\", 3600\n )\n ),\n [\"temperature\", \"humidity\"],\n )\n ax.setTicks(\n [\n get_thicks_for_timestamp_plot(\n self.variables.meas_data[\"temperature\"][0],\n 5,\n self.variables.default_values_dict[\"settings\"][\n \"time_format\"\n ],\n )\n ]\n )\n\n try:\n if len(self.variables.meas_data[\"temperature\"][0]) == len(\n self.variables.meas_data[\"humidity\"][1]\n ): # sometimes it happens that the values are not yet ready\n p1.plot(\n self.variables.meas_data[\"temperature\"][0],\n self.variables.meas_data[\"temperature\"][1],\n pen={\"color\": \"r\", \"width\": 2},\n clear=True,\n )\n plot_item = setpg.PlotCurveItem(\n self.variables.meas_data[\"humidity\"][0],\n self.variables.meas_data[\"humidity\"][1],\n pen={\"color\": \"b\", \"width\": 2},\n clear=True,\n )\n hum_plot_obj.addItem(plot_item)\n del plot_item # the plot class needs a plot item which can be rendered, to avoid a mem leak delete the created plot item or 20k ram will be used\n # hum_plot_obj.addItem(setpg.plot(self.variables.meas_data[\"humidity\"][0],self.variables.meas_data[\"humidity\"][1],pen={'color': \"b\", 'width': 2}, clear=True))\n hum_plot_obj.setGeometry(\n p1.vb.sceneBoundingRect()\n ) # resize the second plot!\n except:\n pass\n\n # Create sublayout\n temphum_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.temp_ypos, self.temp_xpos, self.temp_ysize, self.temp_xsize\n )\n\n x = np.zeros(1)\n y = np.zeros(1)\n\n setpg = pq\n # date_axis = CAxisTime(orientation='bottom') # Correctly generates the time axis\n hum_plot_obj = setpg.ViewBox() # generate new plot item\n temphum_plot = pq.PlotWidget()\n config_plot(temphum_plot, hum_plot_obj, setpg) # config the plot items\n\n self.variables.add_update_function(update_temphum_plots)\n\n # Additional Variables will be generated for temp and hum\n # self.variables.default_values_dict[\"settings\"].update({\"lights\": False, \"humidity_control\": True, \"current_tempmin\": 20, \"current_tempmax\": 25, \"current_hummin\": 20,\"current_hummax\": 25})\n\n # Spin Boxes for temp and humidity\n\n tempmin = QSpinBox()\n tempmax = QSpinBox()\n hummin = QSpinBox()\n hummax = QSpinBox()\n\n # Spinbox label\n textbox_temp = QLabel()\n textbox_temp.setText(\"Min temp. Max temp.\")\n textbox_temp.setFont(self.font)\n textbox_hum = QLabel()\n textbox_hum.setText(\"Min hum. Max hum.\")\n textbox_hum.setFont(self.font)\n\n # Config\n\n tempmin.setRange(15, 35)\n tempmin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmin\", 0)\n )\n )\n tempmax.setRange(15, 35)\n tempmax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmax\", 0)\n )\n )\n tempmin.valueChanged.connect(valuechange)\n tempmax.valueChanged.connect(valuechange)\n\n hummin.setRange(0, 70)\n hummin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummin\", 0)\n )\n )\n hummax.setRange(0, 70)\n hummax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummax\", 0)\n )\n )\n hummin.valueChanged.connect(valuechange)\n hummax.valueChanged.connect(valuechange)\n\n # Push buttons on the right for humidity control and light control\n\n dry_air_btn = QPushButton(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\"humidity_control\"] = False\n dry_air_btn.setCheckable(True)\n dry_air_btn.toggle()\n dry_air_btn.clicked.connect(dry_air_action)\n dry_air_btn.setChecked(False)\n\n light_btn = QLabel()\n light_btn.setText(\"State not defined\")\n light_btn.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n # light_btn.setCheckable(True)\n # light_btn.clicked.connect(light_action)\n\n # Humidity\n # temphum_plot.plot(x,y, pen=\"b\")\n\n # Widgets add\n temphum_layout.addWidget(textbox_temp, 0, 0, 1, 2)\n temphum_layout.addWidget(tempmin, 1, 0)\n temphum_layout.addWidget(tempmax, 1, 1)\n\n temphum_layout.addWidget(textbox_hum, 2, 0, 1, 2)\n temphum_layout.addWidget(hummin, 3, 0)\n temphum_layout.addWidget(hummax, 3, 1)\n\n temphum_layout.addWidget(dry_air_btn, 4, 0, 1, 2)\n temphum_layout.addWidget(light_btn, 5, 0, 3, 2)\n\n temphum_layout.addWidget(temphum_plot, 0, 3, 10, 2)\n\n temphum_layout.setContentsMargins(8, 8, 0, 8) # Makes a margin to the layout\n\n # Add the layout to the main layout\n self.layout.addLayout(\n temphum_layout,\n self.temp_ypos,\n self.temp_xpos,\n self.temp_ysize,\n self.temp_xsize,\n )\n\n def update():\n pass\n\n self.variables.add_update_function(update)\n self.variables.add_update_function(check_light_state)", "def test_demand_variability(self):\n demand_variability = self._uncertain_demand.demand_variability\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_variability = lambda x, y: x / y\n test_variability = cal_variability(stdev, avg_order)\n self.assertEqual(demand_variability, test_variability)", "def get_dynamic_pressure(self, velocity):\n\t\tdynamic_press = 0.5 * self.Density * velocity**2\n\t\treturn dynamic_press", "def compute_visuals(self):\n pass", "def _derive_variance_(self):\n # Pure Photon Noise\n self._properties[\"var\"] = np.sqrt(self.rawdata*self.exposuretime) / self.exposuretime", "def is_visible(self):\n return self.real > 0", "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def get_acceleration(self,v,el,T_s,T_i):\n\n rad = radiation.Radiation()\n T_atm = rad.getTemp(el)\n p_atm = rad.getPressure(el)\n rho_atm = rad.getDensity(el)\n g = rad.getGravity(el)\n\n\n rho_int = p_atm/(self.Rsp_air*T_i) # Internal air density\n\n Cd = .5 # Drag Coefficient\n F_b = (rho_atm - rho_int)*self.vol*g # Force due to buyoancy\n F_d = Cd*(0.5*rho_atm*math.fabs(v)*v)*self.cs_area# Force due to Drag\n\n if F_d > 0:\n F_d = F_d * self.Upsilon\n vm = (self.massEnv + self.mp) + rho_atm*self.vol + self.vm_coeff*rho_atm*self.vol #Virtual Mass\n accel = ((F_b - F_d - (self.massEnv + self.mp)*g)/vm)\n\n return accel", "def DynamicsCheat(x, t, T0, alpha, cost, K, n, r):\n D=dmax*x[0]**n/(x[0]**n+K**n)\n #defune the degradation effect\n deg=0\n #define ODEs\n y=np.zeros([np.size(x)])\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r*(1-cost)*(1-x[1])-D-alpha)#d Co/dt\n \n return y", "def __init__(self, temperatures, daytypes, consumptions, nb_days, nb_particles, sigma2, kappa, u_heat):\n self.temperatures = temperatures\n self.daytypes = daytypes\n self.consumptions = consumptions\n self.nb_days = nb_days\n self.nb_particles = nb_particles\n self.sigma2 = sigma2\n self.kappa = kappa\n self.u_heat = u_heat\n #Var init\n self.s = np.zeros((nb_days, nb_particles)) \n self.g_heat = np.zeros((nb_days, nb_particles))\n #sigma_s and sigma_g are fixed\n self.sigma_s_star_2 = np.zeros((1, nb_particles)) \n self.sigma_g_star_2 = np.zeros((1, nb_particles))\n self.x_season = np.zeros((1, nb_particles))\n self.x_heat = np.zeros((1, nb_particles))\n self.x = np.zeros((1, nb_particles))\n self.w = np.zeros((1, nb_particles))", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def dispersion(self, k):\n return np.sqrt(\n 1 + (\n (k*self.material.hbar_m)**2 / (2*self.material.m_star_m) -\n self.material.z\n )**2\n )", "def get_vcond(lambdam, taum):\n return 2 * lambdam / taum", "def RHO(p,tv): \n _rd=287.053 # Gas constant for dry air\n _tv=tv*1.\n if np.nanmax(_tv)<100: _tv +=273.15# NB: C-->K\n if np.nanmax(p)<2000: p*=100 # hPa to Pa\n rho=np.divide(p,np.multiply(_rd,_tv))\n\n return rho", "def IC_FC_visualization(self):\n legend = ['1st CWT','2nd CWT','IC','FC']\n title = 'Optimized ICs and FCs detection'\n IC_values = [self.IC,normalize(self.cwt1)[self.IC]]\n FC_values = [self.FC,normalize(self.cwt2)[self.FC]]\n visualize_signal(legend, title, normalize(self.cwt1), normalize(self.cwt2), IC = IC_values, FC = FC_values)", "def plc_temp(coil_df):", "def feller(self):\n return 2 * self.kappa_y * self.mean_v - self.eta_y**2 > 0", "def water_vapour(t):\n T_0 = 273.15\n T_rw = 35.86 # over water\n a = 17.269\n # cdo -mulc,610.78 -exp -div -mulc,17.5 -subc,273.15 a\n return 610.78 * np.exp(a * (t - T_0) / (t - T_rw))", "def visualize(self):\n import matplotlib.pyplot as plt\n import numpy as np\n\n plt.figure()\n sw_ = np.linspace(0.0, 1.0, 50)\n plt.plot(sw_, self.krw(sw_), label=\"Water\")\n plt.plot(sw_, self.kro(sw_), label=\"Oil\")\n plt.xlabel(\"Water saturation\")\n plt.ylabel(\"Relative permeability\")\n plt.legend()", "def artificial_data():\n \n num_voxels = 10\n c = (5.0, 5.0, 5.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.abs(numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 5) < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def func_kc_318(n, series):\n if series == \"3D3\":\n try:\n return 2*np.pi/(wl_3D3[str(n)]*1e-9)\n except:\n return 0", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def rad_field_initial_condition(self):\n\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n # Looking at the sky\n if angle < 90:\n I_init = (\n self.sun_intensity\n * f.delta_func(self.sun_elevation - self.receiver_elevation)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n # Looking at the ground\n elif angle > 90:\n I_ground = RT_model_1D.calc_direct_beam_intensity(self, 0)\n\n I_lambert = (\n I_ground\n * self.ground_albedo\n * np.cos(np.deg2rad((self.sun_elevation + 180) % 360))\n )\n\n I_specular = (\n I_ground\n * self.ground_albedo\n * f.delta_func(self.sun_elevation + self.receiver_elevation - 180)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n I_init = (\n 1 - self.reflection_type\n ) * I_lambert + self.reflection_type * I_specular\n\n else:\n I_init = np.empty(self.stokes_dim)\n I_init.fill(np.nan)\n\n return I_init", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def __Vs__(self, z):\r\n return -np.array(I(z > self.psi), dtype=int)", "def is_stationary(self) -> bool:\n ad_fuller_result = adfuller(self.y.dropna(), autolag='AIC')\n p_value = ad_fuller_result[1]\n return p_value <= 0.5", "def energy(self, visible):\n bias_term = tf.matmul(visible, self._bias_visible)\n linear_transform = tf.matmul(visible, self._weights) + tf.squeeze(self._bias_hidden)\n hidden_term = tf.reduce_sum(tf.math.log(1 + tf.exp(linear_transform)), axis=1)\n return tf.reduce_mean(-hidden_term - bias_term)" ]
[ "0.7093741", "0.6370206", "0.617513", "0.6122823", "0.61151516", "0.6091893", "0.6021482", "0.59835947", "0.592721", "0.5878499", "0.58544457", "0.5826623", "0.58215696", "0.5811718", "0.58073586", "0.56892794", "0.56720847", "0.56465936", "0.5620597", "0.5558093", "0.5547346", "0.5543419", "0.5529484", "0.54964113", "0.54892886", "0.5484821", "0.5484269", "0.5476041", "0.5470144", "0.5468478", "0.5437187", "0.5403287", "0.5399196", "0.53906727", "0.53889227", "0.53840417", "0.5382609", "0.53760934", "0.53361154", "0.5322398", "0.5316629", "0.53162044", "0.531272", "0.5306365", "0.5289402", "0.5279789", "0.5277643", "0.52725595", "0.5268325", "0.52562225", "0.5250284", "0.52501774", "0.52403426", "0.5238791", "0.52375376", "0.5232843", "0.52218294", "0.52187574", "0.5216304", "0.5211536", "0.5206846", "0.520571", "0.51963633", "0.51919127", "0.51790255", "0.5170617", "0.5170138", "0.51697206", "0.51673007", "0.51672345", "0.51645356", "0.5156917", "0.5156777", "0.5155528", "0.51535", "0.51502687", "0.51492685", "0.5141668", "0.5134098", "0.5124783", "0.51227325", "0.51195073", "0.5118763", "0.51172256", "0.5115223", "0.5103505", "0.5101708", "0.5100424", "0.50985765", "0.5091643", "0.50906533", "0.5086744", "0.5083628", "0.50824165", "0.5079744", "0.5079037", "0.5072997", "0.5068044", "0.50680053", "0.50629264" ]
0.82398015
0
Return the heat capacity of air in Joules perkilogram Kelvin. The heat capacity of air varies with a function of temperature and is given by an empiricallyderived formula.
def heat_capacity_of_air(self) -> float: return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heatCapacity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"heat capacity\", Tk)\n return (\n sum(\n [\n +1.38642e-13 * Tk**4,\n -6.47481e-10 * Tk**3,\n +1.02345e-06 * Tk**2,\n -4.32829e-04 * Tk,\n +1.06133e00,\n ]\n )\n * 1000.0\n ) # kJ / kg K to J / kg K", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def heat_capacity(r, phi, q, kT):\n pot = q*(phi - phi[0])\n a = np.trapz(pot**2 * np.exp(-pot/kT) * r, r)\n b = np.trapz(pot * np.exp(-pot/kT) * r, r)\n c = np.trapz(np.exp(-pot/kT) * r, r)\n return 3/2 + 1/kT**2 * (a/c - b**2/c**2)", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def get_heat_capacity_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n # There is a precision problem with \"-\" (minus) operator\n # We'll use asymptotic formula for high temperatures to avoid that problem\n y_low = y[y < THRESHOLD]\n vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]\n tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]\n # high temperatures - low numbers\n C_P_low = 5 * gbar * np.sqrt(2) / (36 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low\n C_P_low *= (\n 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)\n - 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2\n )\n C_P_low *= _1d_call(_fdk, y_low, k=3 / 2) / _1d_call(_fdk, y_low, k=1 / 2) ** 2\n # low temperatures - high numbers\n C_P_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)\n return np.concatenate((C_P_low, C_P_high)).reshape(y.shape)", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def get_specific_heat() -> float:\n return 1006.0", "def getHeatCapacity(self, Tlist):\n\t\treturn _modes.harmonicoscillator_heatcapacity(Tlist, self.frequency) * self.degeneracy", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def get_heat_capacity_volume(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n # There is a precision problem with \"-\" (minus) operator\n # We'll use asymptotic formula for high temperatures to avoid that problem\n y_low = y[y < THRESHOLD]\n vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]\n tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]\n # high temperatures - low numbers\n C_V_low = 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)\n C_V_low -= 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2\n C_V_low *= gbar * np.sqrt(2) / (4 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low\n C_V_low /= _1d_call(_fdk, y_low, k=-1 / 2)\n # low temperatures - high numbers\n C_V_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)\n return np.concatenate((C_V_low, C_V_high)).reshape(y.shape)", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def getHeatCapacity(self, Tlist, V=1.0):\n\t\treturn _modes.translation_heatcapacity(Tlist, self.mass, self.dimension, V)", "def specificHeatCapacity(d, d_iso, density, cp):\n d_t = min(0.5 * np.sum(d), d_iso , 0.1)\n sum_d_i = d[0]\n i = 0 \n kappa = 0 \n while sum_d_i <= d_t:\n kappa += d[i] * density[i] * cp[i]\n i += 1\n sum_d_i += d[i]\n else:\n sum_d_i -= d[i]\n d_part = d_t - sum_d_i \n kappa += d_part * density[i] * cp[i]\n\n return kappa", "def harmonicOscillator_heatCapacity(T, freq):\n x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def getEnthalpyOfVaporization(self,Temperature):\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\n\t\t# Eqn 7 from Epstein et al 2009\n\t\tHvap = 2.303*8.3145*Temperature*Temperature*B/((C + Temperature - 273.15)*(C + Temperature - 273.15))\n\t\treturn Hvap # units are J/molK", "def getHeatCapacity(self, Tlist):\n\t\treturn _modes.hinderedrotor_heatcapacity(Tlist, self.frequency, self.barrier) * self.degeneracy", "def alpha_B_HII(temperature):\n # HII recombination rate\n # input : T in K\n # output : HII recombination rate (in cm3 / s)\n l = 315614./temperature\n a = 2.753e-14 * l**1.5 / (1. + (l/2.74)**0.407)**2.242\n return a", "def canopy_heat_capacity(states: ClimateStates) -> float:\n return CAP_LEAF * states.leaf_area_index", "def getHeatCapacity(self, Tlist):\n\t\treturn _modes.freerotor_heatcapacity(Tlist, self.frequencies, 1 if self.linear else 0)", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def molar_mass_dry_air():\n return 28.9647", "def gueymard94_pw(temp_air, relative_humidity):\n\n T = temp_air + 273.15 # Convert to Kelvin # noqa: N806\n RH = relative_humidity # noqa: N806\n\n theta = T / 273.15\n\n # Eq. 1 from Keogh and Blakers\n pw = (\n 0.1 *\n (0.4976 + 1.5265*theta + np.exp(13.6897*theta - 14.9188*(theta)**3)) *\n (216.7*RH/(100*T)*np.exp(22.330 - 49.140*(100/T) -\n 10.922*(100/T)**2 - 0.39015*T/100)))\n\n pw = np.maximum(pw, 0.1)\n\n return pw", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )", "def fahr_to_celcius(temp_fahr):\n temp_celcius = (temp_fahr - 32) * 5/9\n return temp_celcius", "def conduct_heat(self, delta_time, external_power):\n\t\tself.temperature_container = self.temperature_container+self.area*external_power*delta_time/(self.heat_capacity_container*self.mass_container)#https://en.wikipedia.org/wiki/Heat_capacity\n\t\t\n\t\tinternal_power = 0.591*(self.temperature_container-self.temperature)/0.01#No idea of this is right. Mainly the devides by its length bit. https://en.wikipedia.org/wiki/Thermal_conduction#Fourier's_law\n\t\t\n\t\tif (self.heat_capacity*self.mass())!=0:\n\t\t\tself.temperature = self.temperature+internal_power*delta_time/(self.heat_capacity*self.mass())\n\t\t\t#self.temperature_container=self.temperature_container-internal_power*delta_time/(self.heat_capacity_container*self.mass_container)#Als je dit toevoegd lijkt de simulatie niet goed meer te werken dus nog even uitzoeken heo dat zit.", "def temperature(self):\n value = float(self._parent.query('R{}'.format(self._idx))[1:])\n return pq.Quantity(value, pq.Kelvin)", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def hinderedRotor_heatCapacity(T, freq, barr):\n x = constants.h * constants.c * 100. * freq / constants.kB / T\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n z = 0.5 * constants.h * constants.c * 100. * barr / constants.kB / T\n BB = scipy.special.i1(z) / scipy.special.i0(z)\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x - 0.5 + z * (z - BB - z * BB * BB)", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def celsius(fahrenheit):\n return 5 / 9 * (fahrenheit - 32)", "def bjerrum_length_water(temperature=298.15):\n bjerrum = np.power(ELECTRON_CHARGE, 2.0) / \\\n (4.0 * np.pi *\n ELECTRIC_CONSTANT *\n dielectric_constant_water(temperature) *\n BOLTZMANN_CONSTANT *\n temperature\n )\n return bjerrum", "def get_fuel_requirements(mass: int) -> int:\n return int(mass / 3) - 2", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def solute_holding_capacity(depth, surface_area, koc):\n\n from .parameters import benthic, water_column\n\n # Aqueous volumes in each region\n vol1 = depth * surface_area # total volume in water column, approximately equal to water volume alone\n vol2a = benthic.depth * surface_area # total benthic volume\n vol2 = vol2a * benthic.porosity # total benthic pore water volume\n\n # Default EXAMS conditions for partitioning\n kow = koc / .35 # DEFAULT EXAMS CONDITION ON Kow p.35\n kpdoc1 = kow * .074 # DEFAULT RELATION IN EXAMS (LITTORAL)\n kpdoc2 = koc # DEFAULT RELATION IN EXAMS (BENTHIC) p.16 of EXAMS 2.98 (or is it Kow*.46 ?)\n xkpb = 0.436 * kow ** .907 # DEFAULT RELATION IN EXAMS\n\n # mass in littoral region\n vol1a = depth[0] * surface_area # initial volume corresponding with suspended matter reference\n m_sed_1 = water_column.sused * vol1a * .001 # SEDIMENT MASS LITTORAL\n m_bio_1 = water_column.plmas * vol1a * .001 # BIOLOGICAL MASS LITTORAL\n m_doc_1 = water_column.doc * vol1a * .001 # DOC MASS LITTORAL\n\n # partitioning coefficients of individual media\n kd_sed_1 = koc * water_column.froc * .001 # Kd of sediment in littoral [m3/kg]\n kd_sed_2 = koc * benthic.froc * .001 # Kd of sediment in benthic\n kd_bio = xkpb / 1000. # Kd of biological organisms\n kd_doc_1 = kpdoc1 / 1000. # Kd of DOC in littoral region\n kd_doc_2 = kpdoc2 / 1000. # Kd of DOC in benthic region\n\n # mass in benthic region\n m_sed_2 = benthic.bulk_density * vol2a * 1000. # as defined by EXAMS parameters m_sed_2 = BULKD/PCTWA*VOL2*100000.\n m_bio_2 = benthic.bnmas * surface_area * .001\n m_doc_2 = benthic.doc * vol2 * .001\n\n # solute holding capacity in regions 1 and 2\n capacity_1 = kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 * m_doc_1 + vol1\n capacity_2 = kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 * m_doc_2 + vol2\n\n # Fraction going to water column and benthic\n fw1 = vol1 / capacity_1 # fw1 is daily, vol1 is daily\n fw2 = vol2 / capacity_2\n\n theta = capacity_2 / capacity_1\n\n sed_conv_factor = vol2 / fw2 / m_sed_2 # converts pore water to [Total Conc normalized to sed mass]\n\n # Omega mass transfer - Calculates littoral to benthic mass transfer coefficient\n omega = benthic.d_over_dx / benthic.depth # (m3/hr)/(3600 s/hr)\n\n return fw1, fw2, theta, sed_conv_factor, omega", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def get_production_factor(self, temp_atmosphere):\n return 1.", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def get_production_factor(self, temp_atmosphere):\n a1 = self.damages_terms[0]\n a2 = self.damages_terms[1]\n a3 = self.damages_terms[2]\n pf = self.params.prod_frac\n return ne.evaluate('1 - pf * (1 - 1 / (1 + a1 * temp_atmosphere + a2 * temp_atmosphere ** a3))')", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def vib_energy(frequency, temp):\n\n # vibrational temperature\n\n vtemp = plank_const * speed_ol / boltz_const * frequency\n\n vtemp = np.matrix(vtemp)\n\n temp = 1 / np.matrix(temp)\n\n # need to convert to array, because the element wise exponential\n # cannot work with matrix\n intermediate = np.array(vtemp.T * np.transpose(temp).T)\n\n # energy is a np.matrix, need to convert to array\n energy = R * vtemp * np.matrix((0.5 + (np.exp(intermediate) - 1) ** -1))\n energy = np.array(energy)[0]\n\n return energy", "def calc_h_lat(dry_bulb_C, humidity_ratio_out_kgperkg):\n\n h_kJ_kg = humidity_ratio_out_kgperkg * (dry_bulb_C * CPW_kJ_kgC + h_we_kJ_kg)\n\n return h_kJ_kg", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def capenergy(C, V):\n energy = 1 / 2 * C * V ** 2\n return energy", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def temperature() -> float:", "def C_V(self):\n return self.generic_getter(\n get_heat_capacity_volume, \"C_V\", \"convert_heat_capacity\"\n )", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T", "def getHeatCapacity(self, Tlist):\n\t\tCp = np.ones((len(Tlist)), np.float64)\n\t\tfor mode in self.modes:\n\t\t\tCp += mode.getHeatCapacity(Tlist)\n\t\treturn Cp", "def price_heston_mc(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_):\r\n esp_ = monte_carlo(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_)\r\n return exp(-r_*T_)*esp_", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def terrain_multiplier(self):\n #Hardcode table of terrain multipliers\n self.terrain_table = pd.DataFrame({\n 'height': [0.00, 3.00, 5.00, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, 75.0, 100., 150., 200.],\n '1': [0.99, 0.99, 1.05, 1.12, 1.16, 1.19, 1.22, 1.24, 1.25, 1.27, 1.29, 1.31, 1.32],\n '2': [0.91, 0.91, 0.91, 1.00, 1.05, 1.08, 1.12, 1.16, 1.18, 1.22, 1.24, 1.27, 1.29],\n '3': [0.83, 0.83, 0.83, 0.83, 0.89, 0.94, 1.00, 1.04, 1.07, 1.12, 1.16, 1.21, 1.24],\n '4': [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.80, 0.85, 0.90, 0.98, 1.03, 1.11, 1.16]}) #T4.1 AS1170.2\n self.terrain_table.set_index('height',inplace=True)\n\n terrain_stacked = self.terrain_table.stack().reset_index().values\n\n #2d interpolation of Table 4.1 AS1170.2.\n #Terrain Categories may be halves (e.g Category 1.5)\n #Heights may be any value\n #https://stackoverflow.com/questions/56291133/interpolation-of-a-pandas-dataframe\n self.M_z_cat = griddata(terrain_stacked[:,0:2],\n terrain_stacked[:,2],\n [(self.height, self.terrain_category)],\n method='linear')[0]", "def get_cooling_output_for_supply_air_estimation(\n a_a: float, q: float, mu_c: float, v_vent: np.ndarray,\n theta_ex: np.ndarray, x_ex: np.ndarray, j: np.ndarray,\n hc_period: np.ndarray, n_p: np.ndarray, q_gen: np.ndarray, w_gen: np.ndarray, v_local: np.ndarray,\n theta_set_c: float, x_set_c: float):\n\n # specific heat of air, J/kgK\n c = get_specific_heat()\n\n # air density, kg/m3\n rho = get_air_density()\n\n # latent heat of evaporation, kJ/kg\n l_wtr = get_evaporation_latent_heat()\n\n q_d_hs_cs = np.maximum(\n (((q - 0.35 * 0.5 * 2.4) * a_a + c * rho * (v_local + sum(v_vent)) / 3600) * (\n theta_ex - theta_set_c)\n + mu_c * a_a * j + q_gen + n_p * 51.0) * 3600 * 10 ** (-6),\n 0.0) * (hc_period == 'c')\n\n q_d_hs_cl = np.maximum(\n (((v_local + sum(v_vent)) * rho * (x_ex - x_set_c) * 10 ** 3 + w_gen) * l_wtr\n + n_p * 40.0 * 3600) * 10 ** (-6), 0.0) * (hc_period == 'c')\n\n return q_d_hs_cs + q_d_hs_cl", "def kin_energy (self):\n\n for planet in self.planets:\n planet.kenergy = 0.5*planet.mass*((np.linalg.norm(planet.velocity))**2) # every 'kenergy' depends by the body's mass and velocity", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def kelvin_to_fahrenheit(kelvin_temp):\n\n\treturn math.floor(9/5 * (kelvin_temp - 273) + 32)", "def fibre_strain_energy(self, l_stretch):\n if l_stretch <= 1.0:\n # compressed region - no energy\n return 0.0\n\n # Note: this range should be '< lm' according to FEBio but we use '<=' to\n # make setting c6 easier -> there's no difference because it's cts.\n if l_stretch <= self.lm:\n # exponential energy\n return self.c3 * (exp(self.c4 * (l_stretch - 1.0)) - 1.0)\n\n # linear energy\n return self.c5 * l_stretch + self.c6", "def do_economic_production(self, unused_t):\n # list of cells in some fixed order, so that we can use arrays below:\n C = list(self.cells)\n # collect cellwise input for energy subsectors:\n intensity = np.array([c.total_energy_intensity for c in C])\n # use the copan:GLOBAL Leontieff/Cobb-Douglas nested production\n # function:\n relative_productivity = np.array([c.total_relative_productivity\n for c in C])\n \"\"\"an aggregate, production-function specific indicator\"\"\"\n # distribute population and capital to cells so that wages and rents\n # are equal across cells (efficient allocation):\n if np.any(relative_productivity == np.inf):\n # give equal prod. to those with inf relative prod.:\n wh = np.where(relative_productivity < np.inf)[0]\n relative_productivity[:] = 1\n relative_productivity[wh] = 0\n relative_weight = relative_productivity\n total_relative_weight = sum(relative_weight)\n else:\n relative_weight = relative_productivity\n total_relative_weight = sum(relative_weight)\n if total_relative_weight == 0:\n # unimportant since relative_weight == 0, just to avoid division error:\n total_relative_weight = 1\n weight = relative_weight / total_relative_weight\n P = weight * self.population\n K = weight * self.physical_capital\n # resulting cell-wise harvest, extraction and production:\n denom = relative_productivity**0.8\n # unimportant since numerator is then 0, just to avoid division error:\n denom[np.where(denom == 0)] = 1\n fac = (P * K)**0.4 / denom\n if any(np.isnan(fac)):\n w = np.where(np.isnan(fac))[0] \n# print(\"fac\",self.physical_capital,P[w],K[w],weight[w],relative_productivity[w],intensity[w])\n exit()\n eB = self.metabolism.biomass_energy_density\n eF = self.metabolism.fossil_energy_density\n # TODO: FIX occurrence of intensity:\n B = np.array([max(0, c.biomass_relative_productivity) for c in C]) * fac / eB\n F = np.array([max(0, c.fossil_relative_productivity) for c in C]) * fac / eF\n R = np.array([max(0, c.renewable_relative_productivity) for c in C]) * fac\n E = eB * B + eF * F + R\n Y = E / intensity\n if any(Y < 0):\n w = np.where(np.isnan(fac))[0] \n# print(\"Y\",P[w],K[w],weight[w],relative_productivity[w],intensity[w],E[w])\n exit()\n \n # tell cells what their harvest and extraction is:\n for i in range(len(C)):\n C[i].biomass_harvest_flow = B[i]\n C[i].fossil_extraction_flow = F[i]\n # store social_systems' total harvest and extraction, emissions,\n # and production:\n self.biomass_input_flow = sum(B)\n self.fossil_fuel_input_flow = sum(F)\n self.renewable_energy_input_flow = sum(R)\n self.secondary_energy_flow = sum(E)\n self.economic_output_flow = sum(Y)\n self.carbon_emission_flow = \\\n self.biomass_input_flow + self.fossil_fuel_input_flow", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def ionization_constant_water(temperature=298.15, density=None):\n import numpy as np\n\n # using Model II from Bandura etal\n # model parameters\n n = 6\n alpha_0 = -0.864671\n alpha_1 = 8659.19\n alpha_2 = -22786.2\n beta_0 = 0.642044\n beta_1 = -56.8534\n beta_2 = -0.375754\n\n # Water parameters\n Mw = 18.01528\n\n # temperature\n T = temperature\n\n # density\n if density:\n D = density\n else:\n D = density_water(T)\n\n pKWG = 0.61415 \\\n + 48251.33 / T \\\n - 67707.93 / T**2.0 \\\n + 10102100.0 / T**3.0\n\n Z = D * np.exp(alpha_0 \\\n + alpha_1/T \\\n + alpha_2/T**2 *np.power(D,2.0/3.0)\n )\n\n pKw = -2*n*(\n np.log10(1 + Z) - (Z/(Z + 1)) * D * (\n beta_0 + beta_1/T + beta_2*D\n )\n ) + pKWG + 2 * np.log10(Mw/1000.0)\n\n return np.power(10, -pKw)", "def vaporPressure(temp: float) -> float:\n exponent = (17.27*temp)/(temp + 237.3)\n vp = 611*np.exp(exponent)\n\n return vp", "def CalculateTimeFrameElectricEneregyCost(self, kwh:float, dollarsPerKiloWattHour = 0.1149):\n\t\t\n\t\treturn kwh * dollarsPerKiloWattHour", "def get_requested_supply_air_temperature_for_cooling(\n theta_sur_c: np.ndarray, theta_ac: np.ndarray, l_d_cs: np.ndarray, v_d_supply: np.ndarray,\n psi: float, l_duct: np.ndarray) -> np.ndarray:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n l_duct = np.array(l_duct).reshape(1,5).T\n\n theta_req_c = theta_sur_c - (theta_sur_c - theta_ac + l_d_cs * 10 ** 6 / (v_d_supply * c * rho)) \\\n * np.exp(psi * l_duct * 3600 / (v_d_supply * c * rho))\n\n return np.minimum(theta_req_c, theta_ac)", "def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity", "def fuel_for_mass(mass):\n return int(mass / 3) - 2", "def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9", "def energy_capacity_rule(mod, g, p):\n return mod.stor_spec_energy_capacity_mwh[g, p]", "def get_hot_junction_temperature(self):\n return self._mcp9600.get('HOT_JUNCTION').temperature", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def get_D_C3H8_air(self, T):\n\n self.set_TempPres_dependents(T)\n\n self.D_C3H8_air = (\n 2. / 3. * np.sqrt(const.k_B * T / np.pi * 0.5 * (1. /\n self.air.m + 1. / self.fuel.m)) / (np.pi * (0.5 *\n (self.air.d + self.fuel.d)) ** 2.) / self.air.n\n )\n\n return self.D_C3H8_air", "def get_requested_supply_air_temperature_for_heating(\n theta_sur_h: np.ndarray, theta_ac: np.ndarray, l_d_h: np.ndarray, v_d_supply: np.ndarray,\n psi: float, l_duct: np.ndarray) -> np.ndarray:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n l_duct = np.array(l_duct).reshape(1, 5).T\n\n theta_req_h = theta_sur_h + (theta_ac + l_d_h * 10 ** 6 / (v_d_supply * c * rho) - theta_sur_h) \\\n * np.exp(psi * l_duct * 3600 / (v_d_supply * c * rho))\n\n return np.maximum(theta_req_h, theta_ac)", "def get_heat_loss_coefficient_of_partition() -> float:\n return 1 / 0.46", "def Analytical_2x2(J, L, temp):\n\n const = 8*J/temp\n\n # the partition function\n Z = 12 + 2*np.exp(-const) + 2*np.exp(const)\n\n # expectation values for E\n E_avg = 16*J* (np.exp(-const) - np.exp(const)) / Z\n E2_avg = 128*J**2*(np.exp(-const) + np.exp(const)) / Z\n E_var = E2_avg - E_avg**2 # 512*J**2 * (Z-6) / Z**2 ??\n\n # expectation values for M\n M_avg = 0\n M2_avg = 32*(1 + np.exp(const)) / Z\n M_abs_avg = 8*(2 + np.exp(const)) / Z\n M_var = M2_avg - M_avg**2\n\n # scaling by L? why is this correct?\n A_Energy = E_avg / L**2\n A_SpecificHeat = E_var / (temp**2 * L**2) # Cv\n A_Magnetization = M_avg / L**2\n A_MagnetizationAbs = M_abs_avg / L**2\n A_Susceptibility = M_var / (temp * L**2) # X, (32/(4*Z))*(1+np.exp(ang))\n\n return A_Energy, A_SpecificHeat, A_Magnetization, A_Susceptibility, A_MagnetizationAbs", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def k_Ni00(wind_ms, temp_C):\n\n U = wind_ms\n\n Sc = schmidt_number(temp_C)\n k = (0.333 * U + 0.222 * U ** 2) * (600 / Sc) ** 0.5\n\n return k", "def test_getThermalExpansionFactorConservedMassByLinearExpansionPercent(self):\n hotTemp = 700.0\n dLL = self.component.material.linearExpansionFactor(\n Tc=hotTemp, T0=self._coldTemp\n )\n ref = 1.0 + dLL\n cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n self.assertAlmostEqual(cur, ref)", "def compute_capacity(self, totalflow, i):\r\n self.capacity[i] = self.phi * self.flow[i] / (self.b[i] \\\r\n - self.beta*(self.flow[i]**self.theta) + np.log(1-totalflow) - np.log(self.flow[i]))", "def get_D_C3H8_air_Kn(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air = self.get_D_C3H8_air(T)\n\n self.D_C3H8_air_Kn = D_C3H8_air / Kn\n\n return self.D_C3H8_air_Kn", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def inventoryCapacity(self):\n # TODO: Worry about how +Strength and +Capacity gear could allow you to carry more than your capacity.\n if self.totalStrength <= 15:\n return int(6 * self.totalStrength + self._baseInventoryCapacity + self._equipmentCarryingCapacity)\n else:\n return int(90 + (self.totalStrength - 15) * 9 + self._baseInventoryCapacity + self._equipmentCarryingCapacity)", "def k_Li86(wind_ms, temp_C):\n from numpy import zeros_like\n\n U = wind_ms\n T = temp_C\n\n Sc = schmidt_number(T)\n k = zeros_like(temp_C)\n\n i1 = U <= 3.6\n i2 = (U > 3.6) & (U < 13.0)\n i3 = U >= 13.0\n\n k[i1] = (0.17 * U[i1]) * (Sc[i1] / 600) ** (-2.0 / 3.0)\n k[i2] = ((U[i2] - 3.4) * 2.8) * (600 / Sc[i2]) ** 0.5\n k[i3] = ((U[i3] - 8.4) * 5.9) * (600 / Sc[i3]) ** 0.5\n\n return k", "def boltzmann_weight(E, temp, unit_conversion=physconst.psi_hartree2J):\n return np.exp(-E*unit_conversion / kb / temp)", "def Erecoil( wavelen, mass):\n inJ = C.h**2 / ( 2* \\\n mass*C.physical_constants['unified atomic mass unit'][0] * \\\n (wavelen*1e-6)**2 ) \n inuK = inJ / C.k *1e6\n return inuK", "def get_classical_harmonic_free_energy(self, temperatures=None):\n if temperatures is None:\n temperatures = self.input.temperature\n temperatures = np.clip(temperatures, 1e-6, np.inf)\n beta = 1.0 / (KB * temperatures)\n\n return (\n -3\n * len(self.input.structure)\n * np.log(np.pi / (self.input.spring_constant * beta))\n / (2 * beta)\n )", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def kilometres_available(self):\n return self.fuel / self.litres_per_kilometre", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6" ]
[ "0.72807765", "0.70755357", "0.67571175", "0.6657788", "0.6539134", "0.63971967", "0.63434315", "0.6203374", "0.62022215", "0.61694735", "0.61694306", "0.6157651", "0.61571056", "0.61204106", "0.6098233", "0.60910946", "0.6065617", "0.6051415", "0.60473984", "0.6033305", "0.6029253", "0.5986691", "0.5982994", "0.5909707", "0.58936924", "0.58759123", "0.5864374", "0.58613294", "0.5851433", "0.581145", "0.5801449", "0.57922006", "0.57806593", "0.5777955", "0.57766664", "0.57752985", "0.5770449", "0.57515717", "0.57479614", "0.57448494", "0.5742503", "0.5723997", "0.571221", "0.5700844", "0.5683911", "0.56407493", "0.5640405", "0.5634611", "0.563313", "0.5632822", "0.56283045", "0.56157035", "0.5607263", "0.5589046", "0.5586153", "0.55852145", "0.55835146", "0.5581994", "0.5580971", "0.5577584", "0.55771977", "0.5574415", "0.5568073", "0.55652577", "0.5556556", "0.5545552", "0.5545258", "0.5542702", "0.55373806", "0.5526436", "0.5516734", "0.55124044", "0.5509346", "0.5493219", "0.5492143", "0.5480535", "0.54753727", "0.5474436", "0.5474238", "0.5471437", "0.5465536", "0.54618037", "0.54553837", "0.5451585", "0.54482067", "0.543647", "0.5430556", "0.54304755", "0.54279965", "0.5426904", "0.5425047", "0.54242885", "0.5422265", "0.54219353", "0.54144186", "0.5399957", "0.53994644", "0.539901", "0.5396284", "0.53844285" ]
0.77720433
0
The irradiance should only be definied if the sun is above the horizon.
def irradiance(self) -> float: if self.declination > 0: return self._irradiance return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rad_field_initial_condition(self):\n\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n # Looking at the sky\n if angle < 90:\n I_init = (\n self.sun_intensity\n * f.delta_func(self.sun_elevation - self.receiver_elevation)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n # Looking at the ground\n elif angle > 90:\n I_ground = RT_model_1D.calc_direct_beam_intensity(self, 0)\n\n I_lambert = (\n I_ground\n * self.ground_albedo\n * np.cos(np.deg2rad((self.sun_elevation + 180) % 360))\n )\n\n I_specular = (\n I_ground\n * self.ground_albedo\n * f.delta_func(self.sun_elevation + self.receiver_elevation - 180)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n I_init = (\n 1 - self.reflection_type\n ) * I_lambert + self.reflection_type * I_specular\n\n else:\n I_init = np.empty(self.stokes_dim)\n I_init.fill(np.nan)\n\n return I_init", "def civil_twilight(topos, earth, sun, time):\n\n location = earth + topos\n astrocentric = location.at(time).observe(sun).apparent()\n alt, _, _ = astrocentric.altaz('standard')\n return alt.degrees <= -6.0 # definition of civil twilight", "def Horizon(time, observer, ra, dec, refraction):\n if not (Refraction.Airless.value <= refraction.value <= Refraction.JplHorizons.value):\n raise Error('Invalid refraction type')\n\n latrad = math.radians(observer.latitude)\n lonrad = math.radians(observer.longitude)\n decrad = math.radians(dec)\n rarad = ra * _HOUR2RAD\n\n sinlat = math.sin(latrad)\n coslat = math.cos(latrad)\n sinlon = math.sin(lonrad)\n coslon = math.cos(lonrad)\n sindc = math.sin(decrad)\n cosdc = math.cos(decrad)\n sinra = math.sin(rarad)\n cosra = math.cos(rarad)\n\n # Calculate three mutually perpendicular unit vectors\n # in equatorial coordinates: uze, une, uwe.\n #\n # uze = The direction of the observer's local zenith (straight up).\n # une = The direction toward due north on the observer's horizon.\n # uwe = The direction toward due west on the observer's horizon.\n #\n # HOWEVER, these are uncorrected for the Earth's rotation due to the time of day.\n #\n # The components of these 3 vectors are as follows:\n # [0] = x = direction from center of Earth toward 0 degrees longitude (the prime meridian) on equator.\n # [1] = y = direction from center of Earth toward 90 degrees west longitude on equator.\n # [2] = z = direction from center of Earth toward the north pole.\n\n uze = [coslat*coslon, coslat*sinlon, sinlat]\n une = [-sinlat*coslon, -sinlat*sinlon, coslat]\n uwe = [sinlon, -coslon, 0.0]\n\n # Correct the vectors uze, une, uwe for the Earth's rotation by calculating\n # sideral time. Call spin() for each uncorrected vector to rotate about\n # the Earth's axis to yield corrected unit vectors uz, un, uw.\n # Multiply sidereal hours by -15 to convert to degrees and flip eastward\n # rotation of the Earth to westward apparent movement of objects with time.\n\n angle = -15.0 * _sidereal_time(time)\n uz = _spin(angle, uze)\n un = _spin(angle, une)\n uw = _spin(angle, uwe)\n\n # Convert angular equatorial coordinates (RA, DEC) to\n # cartesian equatorial coordinates in 'p', using the\n # same orientation system as uze, une, uwe.\n\n p = [cosdc*cosra, cosdc*sinra, sindc]\n\n # Use dot products of p with the zenith, north, and west\n # vectors to obtain the cartesian coordinates of the body in\n # the observer's horizontal orientation system.\n #\n # pz = zenith component [-1, +1]\n # pn = north component [-1, +1]\n # pw = west component [-1, +1]\n\n pz = p[0]*uz[0] + p[1]*uz[1] + p[2]*uz[2]\n pn = p[0]*un[0] + p[1]*un[1] + p[2]*un[2]\n pw = p[0]*uw[0] + p[1]*uw[1] + p[2]*uw[2]\n\n # proj is the \"shadow\" of the body vector along the observer's flat ground.\n proj = math.sqrt(pn*pn + pw*pw)\n\n # Calculate az = azimuth (compass direction clockwise from East.)\n if proj > 0.0:\n # If the body is not exactly straight up/down, it has an azimuth.\n # Invert the angle to produce degrees eastward from north.\n az = math.degrees(-math.atan2(pw, pn))\n if az < 0:\n az += 360\n else:\n # The body is straight up/down, so it does not have an azimuth.\n # Report an arbitrary but reasonable value.\n az = 0.0\n\n # zd = the angle of the body away from the observer's zenith.\n zd = math.degrees(math.atan2(proj, pz))\n hor_ra = ra\n hor_dec = dec\n\n if refraction != Refraction.Airless:\n zd0 = zd\n refr = RefractionAngle(refraction, 90.0 - zd)\n zd -= refr\n if refr > 0.0 and zd > 3.0e-4:\n zdrad = math.radians(zd)\n sinzd = math.sin(zdrad)\n coszd = math.cos(zdrad)\n zd0rad = math.radians(zd0)\n sinzd0 = math.sin(zd0rad)\n coszd0 = math.cos(zd0rad)\n\n pr = [(((p[j] - coszd0 * uz[j]) / sinzd0)*sinzd + uz[j]*coszd) for j in range(3)]\n proj = math.sqrt(pr[0]*pr[0] + pr[1]*pr[1])\n if proj > 0:\n hor_ra = _RAD2HOUR * math.atan2(pr[1], pr[0])\n if hor_ra < 0:\n hor_ra += 24\n else:\n hor_ra = 0\n hor_dec = math.degrees(math.atan2(pr[2], proj))\n\n return HorizontalCoordinates(az, 90.0 - zd, hor_ra, hor_dec)", "def test_check_sun_above_horizon():\n pass", "def wind_shear(self):\n return self.flow_field.wind_shear", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def lower_arm(self):\r\n # ---------------------------------------------------------------------\r\n # Done: 8. Implement this method; it is a ONE-LINER!\r\n # ---------------------------------------------------------------------\r\n if self.is_calibrated == False:\r\n self.calibrate_arm()\r\n self.move_arm_to_position(0)#America\r", "def checkSun(ontology_sun):\n elevation = ontology_sun.has_elevation[0] #gets the elevation value of the Sun in the ontology. \n azimuth = ontology_sun.has_azimuth[0] #gets the azimuth value of the Sun in the ontology. \n intensity = ontology_sun.has_intensity[0] #gets the intensity value of the Sun in the ontology.\n return xosc.Sun(intensity,azimuth,elevation)", "def power(self):\n return irradiance_on_plane(self.vnorm, self.h,\n self.date, self.lat) * self.s * self.eff", "def get_rain():\n global rain\n\n # Report rain only if the condition is 'rainy' (and not always).\n if weather_condition == CONDITION_RAINY and random.random() > 0.7:\n rain += round(random.random(), 2)\n return rain", "def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def horizontal_arcs_iglu():\n arc(screen, BLACK, (50, 560, 300, 20), 3.14, 0)\n arc(screen, BLACK, (60, 510, 280, 20), 3.14, 0)\n arc(screen, BLACK, (80, 460, 240, 20), 3.14, 0)\n arc(screen, BLACK, (120, 420, 160, 20), 3.14, 0)", "def signal_hammer(icu, icu_slope, hammer_icu, hammer_slope):\n\n return (icu > hammer_icu and icu_slope > 0) or (icu_slope > hammer_slope)", "def sun(xs, ys, s, n):\n yellow = (255, 255, 0) # sun color\n\n circle(screen, yellow, (xs, ys), 30 * s) # sun body\n for k in range(n + 1): # sun rays on the upper side of the sun\n polygon(screen, yellow,\n [(xs + 45 * s * np.cos(np.pi / n * (k - 1 / 2)), ys - 45 * s * np.sin(np.pi / n * (k - 1 / 2))),\n (xs + 30 * s * np.cos(np.pi * (k - 1) / n), ys - 30 * s * np.sin(np.pi * (k - 1) / n)),\n (xs + 30 * s * np.cos(np.pi * k / n), ys - 30 * s * np.sin(np.pi * k / n))], 0)\n for k in range(n + 1): # sun rays on the lower side of the sun\n polygon(screen, yellow,\n [(xs + 45 * s * np.cos(np.pi / n * (k - 1 / 2)), ys + 45 * s * np.sin(np.pi / n * (k - 1 / 2))),\n (xs + 30 * s * np.cos(np.pi * (k - 1) / n), ys + 30 * s * np.sin(np.pi * (k - 1) / n)),\n (xs + 30 * s * np.cos(np.pi * k / n), ys + 30 * s * np.sin(np.pi * k / n))], 0)", "def testRadial(self):\n self.doTest(afwGeom.makeRadialTransform([0, 1.01, 1e-7]))", "def testRadial(self):\n self.doTest(afwGeom.makeRadialTransform([0, 1.01, 1e-7]))", "def rhe(m):\n \n m = m*u.kg.to(u.M_sun)\n \n logr = np.full(m.shape,0)\n \n iless = np.where(m<=2.5)\n igreater = np.where(m>2.5)\n \n logr[iless] = 3.0965 - 2.013*np.log10(m[iless])\n logr[igreater] = 0.0557*(np.log10(m[igreater])-0.172)**-2.5\n return (10**logr)*u.Rsun.to(u.m)", "def event2511():\n header(2511)\n\n wait(1) # Longer delay because Soothing Sunlight will keep triggering it.\n\n if_event_flag_on(1, EVENT.LustrousRuneActive)\n\n if_player_has_special_effect(-1, SPEFFECT.RunicHit0)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit1)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit2)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit3)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit4)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit5)\n if_condition_true(2, -1)\n if_event_flag_off(3, EVENT.DarkAnorLondo)\n if_in_world_area(-2, 10, 1)\n if_in_world_area(-2, 10, 2)\n if_in_world_area(-2, 14, 0)\n if_in_world_area(-2, 15, 0)\n if_in_world_area(-2, 15, 1)\n if_in_world_area(-2, 17, 0)\n if_in_world_area(-2, 18, 1)\n if_condition_true(3, -2)\n if_condition_true(-3, 3)\n if_event_flag_off(4, EVENT.EarlyOolacile)\n if_in_world_area(4, 12, 1)\n if_condition_true(-3, 4)\n if_condition_true(2, -3)\n\n if_condition_true(-4, 2)\n\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive0)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive1)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive2)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive3)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive4)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive5)\n if_condition_true(5, -5)\n if_player_has_special_effect(-6, SPEFFECT.SoothingSunlight)\n if_player_has_special_effect(-6, SPEFFECT.SoothingSunlight.value + 1)\n if_player_has_special_effect(-6, SPEFFECT.SoothingSunlight.value + 2)\n if_player_has_special_effect(-6, SPEFFECT.SoothingSunlight.value + 3)\n if_player_has_special_effect(-6, SPEFFECT.SoothingSunlight.value + 4)\n if_player_has_special_effect(-6, SPEFFECT.BountifulSunlight)\n if_player_has_special_effect(-6, SPEFFECT.BountifulSunlight.value + 1)\n if_player_has_special_effect(-6, SPEFFECT.BountifulSunlight.value + 2)\n if_player_has_special_effect(-6, SPEFFECT.BountifulSunlight.value + 3)\n if_player_has_special_effect(-6, SPEFFECT.BountifulSunlight.value + 4)\n if_condition_true(5, -6)\n\n if_condition_true(-4, 5)\n\n if_condition_true(1, -4)\n\n if_condition_true(0, 1)\n\n # Cancel any previous Lustrous buffs.\n chr.cancel_special_effect(CHR.Player, SPEFFECT.LustrousRune0)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.LustrousRune1)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.LustrousRune2)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.LustrousRune3)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.LustrousRune4)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.LustrousRune5)\n\n # Apply appropriate level of Lustrous Rune effect, and register appropriate no-weapon condition.\n if_player_has_special_effect(1, SPEFFECT.RunicHit0)\n skip_if_condition_false(2, 1)\n chr.set_special_effect(CHR.Player, SPEFFECT.LustrousRune0)\n restart()\n\n if_player_has_special_effect(2, SPEFFECT.RunicHit1)\n skip_if_condition_false(2, 2)\n chr.set_special_effect(CHR.Player, SPEFFECT.LustrousRune1)\n restart()\n\n if_player_has_special_effect(3, SPEFFECT.RunicHit2)\n skip_if_condition_false(2, 3)\n chr.set_special_effect(CHR.Player, SPEFFECT.LustrousRune2)\n restart()\n\n if_player_has_special_effect(4, SPEFFECT.RunicHit3)\n skip_if_condition_false(2, 4)\n chr.set_special_effect(CHR.Player, SPEFFECT.LustrousRune3)\n restart()\n\n if_player_has_special_effect(5, SPEFFECT.RunicHit4)\n skip_if_condition_false(2, 5)\n chr.set_special_effect(CHR.Player, SPEFFECT.LustrousRune4)\n restart()\n\n if_player_has_special_effect(6, SPEFFECT.RunicHit5)\n skip_if_condition_false(1, 6)\n chr.set_special_effect(CHR.Player, SPEFFECT.LustrousRune5)\n\n restart()", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def global_irradiance_overcast(latitude_deg, longitude_deg, utc_datetime, \n elevation = elevation_default, temperature_celsius = 25, \n pressure_millibars = 1013.25): \n ghioc = (572 * (solar.GetAltitude(latitude_deg, longitude_deg, utc_datetime, \n elevation , temperature_celsius , pressure_millibars )))\n \n return ghioc", "def is_winter(et_cell, foo_day):\n if et_cell.latitude > 0 and (foo_day.month < 4 or foo_day.month > 10):\n # if et_cell.cell_lat > 0 and (foo_day.month < 4 or foo_day.month > 10):\n # Northern hemisphere\n return True\n else:\n # Southern hemisphere\n return False", "def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky", "def remove_rain_norain_discontinuity(R):\n R = R.copy()\n zerovalue = np.nanmin(R)\n threshold = np.nanmin(R[R > zerovalue])\n R[R > zerovalue] -= threshold - zerovalue\n R -= np.nanmin(R)\n\n return R", "def bare_soil(sil):\n sil.get_color_params()\n if ((sil.l_mean < 160) and (sil.a_std > 3)):\n return False\n else:\n return True", "def heaviside(rain_value):\n x = N.arange(0,70)\n P = N.where(rain_value <= x, 1., 0)\n return P", "def IRound(d):\n # http://en.wikipedia.org/wiki/List_of_area_moments_of_inertia\n return np.pi / 4 * (d/2)**4", "def horiz_angle(time, data):\n\n # TODO What should 0deg be? Set it to inline w/ target? facing target?\n\n # direction of the sun. measured in degrees counted clockwise from north.\n azimuth = data[time]['azimuth']\n\n h_angle = (azimuth / 2 - 90)\n\n # returns answer between -180 and 180 degrees\n return round(((h_angle + 180) % 360) - 180, 4)", "def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))", "def _is_solar_channel(self, chn):\n return self[chn].wavelength_range[2] < 3.9 or chn in[\n 'HRV', 'VIS006', 'VIS008', 'IR_016']", "def power_output_existing_wind_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_4[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * m.C_MC[g, y])\r\n == 0)\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_4[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE)) * m.C_MC[g, y])\r\n == 0)", "def annular_eclipse(sat, earth, sun, time):\n\n theta, theta_e, theta_s = eclipse_parameters(sat, earth, sun, time)\n return np.logical_and(theta_s > theta_e,\n theta < (theta_s - theta_e))", "def oceansim(sun_az,sun_zen,cam_head,cam_elev=0,m2=1.33,npart=1.08,mu=3.483, debug=True):\n\n #Water surface norm\n n = np.array([0,0,1])\n m1 = 1.0\n #vector from sun:\n ki = -np.asarray([np.sin(sun_az)*np.sin(sun_zen),\n np.cos(sun_az)*np.sin(sun_zen),\n np.cos(sun_zen)])\n xi = norm_cross(n,ki)\n #transmitted sunlight\n #tx, ty are the transmission amplitude coefficients in the xt, yt directions\n kt,tx,ty = Fresnel.transmission(ki,n,m1,m2)\n xt = xi\n #vector to camera\n kc = -np.asarray([np.sin(cam_head)*np.cos(cam_elev),\n np.cos(cam_head)*np.cos(cam_elev),\n np.sin(cam_elev)])*np.linalg.norm(kt)\n xc = norm_cross(n, kc) #right\n yc = norm_cross(kc, xc) #up\n #vectors for scattering\n ys = norm_cross(kt, kc) # y-axis of scattering event\n xst = norm_cross(ys, kt) # x-axis of scattering event relative to transmitted sunlight\n xsc = norm_cross(ys, kc) # x-axis of scattering event relative to camera\n #Mueller matrices\n # transmission through water surface:\n mm1 = Mueller.polarizer(tx,ty)\n # rotate to scattering plane\n mm2 = Mrotv(kt,xt,xst)\n # scatter\n th_s = vector_angle(kt,kc)\n #mm3 = Mocean(rad2deg(th_s)) #using Empirical ocean scattering\n mm3 = Mueller.rayleigh_norm(th_s) #normalized Rayleigh scattering matrix\n #b = Scattering.bsf_fournier(npart,mu)\n b = Scattering.vspf_fournier(th_s,npart,mu)\n # transform to camera's horizontal and up vectors\n mm4 = Mxform(xsc,ys, xc,yc)\n #Combined: mm4 . (b*mm3) . mm2 . mm1\n m = mm4.dot(b*mm3.dot(mm2.dot(mm1)))\n #stokes vector\n s = m.dot([1,0,0,0])\n if debug:\n return s,m,(ki,xi),(kt,xt,xst),(kc,xc,xsc),(mm1,mm2,mm3,b,mm4)\n else:\n return s,m", "def vac2air(w):\n return w / (1.0 + 2.735182E-4 + 131.4182 / w ** 2 + 2.76249E8 / w ** 4)", "def umbral_eclipse(sat, earth, sun, time):\n\n theta, theta_e, theta_s = eclipse_parameters(sat, earth, sun, time)\n return np.logical_and(theta_e > theta_s,\n theta < (theta_e - theta_s))", "def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()", "def is_facing_north(): #py:is_facing_north\n return RUR._is_facing_north_()", "def sun_is_down(check_time, observatory):\n sun = get_sun(check_time).transform_to(AltAz(obstime=check_time, location=observatory))\n return sun.alt.value < -14", "def alpha_crit_fromEarth(a_p): #OK\n a_earth = 1. #in AU\n if a_p > a_earth:\n alpha_max = np.arcsin(a_earth/a_p) #occurs at quadrature\n else: #if a_p < a_earth:\n alpha_max = np.pi #0 deg when opposite side of sta180 deg on same side of star\n return alpha_max", "def start_region(self, x, y):\n if x>0 and x<100 and y>0 and y<100:\n return 1\n elif x>700 and x<800 and y>0 and y<100:\n return 2\n elif x>0 and x<100 and y>400 and y<500:\n return 3\n elif x>700 and x<800 and y>400 and y<500:\n return 4\n return 0", "def _calc_qair_ilwr(rpn_hr):\n # saturation water vapour at the dew point in the pure phase\n # which within 0.5% is that of moist air\n ew = 6.112 * numpy.exp(17.62 * rpn_hr.TD / (243.12 + rpn_hr.TD))\n xvw = ew / (0.01 * rpn_hr.PN) # converting P to hectopascals\n r = 0.62198 * xvw / (1 - xvw) # as at Td r = rw\n qair = xarray.DataArray(r / (1 + r))\n # saturation water vapour at the current temperature in the pure phase\n TT = rpn_hr.TT - 273.15 # change temperature back to Celcius\n eT = 6.112 * numpy.exp(17.62 * TT / (243.12 + TT))\n rh = 100 * (ew / eT)\n\n ew = ew / 10.0 # Change vapour pressure to from hecto pascal to kPa\n w = 465 * ew / rpn_hr.TT\n Lclr = (\n 59.38 + 113.7 * (rpn_hr.TT / 273.16) ** 6 + 96.96 * numpy.sqrt(w / 2.5)\n ) # Dilley\n # Unsworth\n sigma = 5.6697e-8\n eclr = Lclr / (sigma * rpn_hr.TT ** 4)\n ewc = (1 - 0.84 * rpn_hr.NT) * eclr + 0.84 * rpn_hr.NT\n ilwr = xarray.DataArray(ewc * sigma * rpn_hr.TT ** 4)\n\n return qair, ilwr, rh", "def drawRadii(self):\n #horizontal line (which i will extend later)\n global radius_horiz_end_val\n radius_horiz_end_val = ValueTracker(self.x_max)\n global radius_horiz\n radius_horiz = always_redraw(\n lambda : Line(start=dot_center.get_center(), end=self.coords_to_point(radius_horiz_end_val.get_value(),0))\n )\n global radius_horiz_end_dot\n radius_horiz_end_dot = always_redraw(\n lambda : Dot(radius_horiz.get_end())\n )\n\n #angled radius - can alter based on ValueTracker\n global theta\n theta = ValueTracker(0)\n global radius_ang_end_dot\n radius_ang_end_dot = always_redraw(\n lambda : Dot(self.coords_to_point(self.x_max * np.cos(theta.get_value()*DEGREES),\n equation_upper(self.x_max * np.cos(theta.get_value()*DEGREES))))\n )\n\n global radius_ang\n radius_ang = always_redraw(\n lambda : Line(start=dot_center.get_center() ,end=radius_ang_end_dot.get_center())\n )\n\n self.play(Write(radius_horiz), Write(radius_horiz_end_dot),\n Write(radius_ang_end_dot), Write(radius_ang))\n self.play(theta.animate.set_value(60))", "def triangulate_analytic_sun_at_center(self,r1,x2,y2,r2,x3,y3,r3):\n gamma=(r1**2+x2**2+y2**2-r2**2)/(2.0*x2)\n\ta=(y2**2)/(float(x2**2))\n\tb=-2.0*gamma*y2/x2\n\tc=gamma**2-r1**2\n\ty_plus=(-b+np.sqrt((b**2)-4*a*c))/(2.0*a)\n\ty_minus=(-b-np.sqrt((b**2)-4*a*c))/(2.0*a)\n x_plus=gamma-y_plus*y2/float(x2)\n x_minus=gamma-y_minus*y2/float(x2)\n difference_plus=(x_plus-x3)**2+(y_plus-y3)**2-r3**2\n difference_minus=(x_minus-x3)**2+(y_minus-y3)**2-r3**2\n if abs(difference_minus) < abs(difference_plus):\n print \"Difference minus\", difference_minus\n print x_minus, y_minus\n return x_minus, x_plus, difference_minus\n else:\n print \"Difference plus\", difference_plus\n print x_plus, y_plus\n return x_plus, y_plus, difference_plus", "def addHorizon(horizon_altitude=np.radians(30.), lat_telescope=np.radians(33.35731944), raCen=0.):\n step = .02\n az = np.arange(0, np.pi * 2.0 + step, step)\n alt = np.ones(len(az), float) * horizon_altitude\n obs = ephem.Observer()\n obs.lat = lat_telescope\n # Set obs lon to zero, just to fix the location.\n # Note that this is not the true observatory longitude, but as long as\n # we calculate the RA at zenith for this longitude, we can still calculate\n # HA appropriately.\n obs.lon = 0\n obs.pressure = 0\n # Given obs lon at zero, find the equivalent ra overhead.\n zenithra, zenithlat = obs.radec_of(0, 90)\n lon = np.zeros(len(az), float)\n lat = np.zeros(len(az), float)\n for i, (alti, azi) in enumerate(zip(alt, az)):\n # Find the equivalent ra/dec values for an alt/az circle.\n r, lat[i] = obs.radec_of(azi, alti)\n # Correct the ra value by the zenith ra value, to get the HA.\n lon[i] = r - zenithra\n lon = -(lon - np.pi) % (np.pi * 2) - np.pi\n return lon, lat", "def shooting_star(self):\n self.data['shooting_star'] = (((self.data['High'] - self.data['Low']) > ((self.data['Open'] - self.data['Close'])*4)) & \\\n ((self.data['High'] - self.data['Close']) / ((.001 + self.data['High'] - self.data['Low']) >= 0.75)) & \\\n ((self.data['High'] - self.data['Open']) / ((.001 + self.data['High'] - self.data['Low']) >= 0.75)))", "def mask_nighttime(lon, lat, date=date, mask_daytime=mask_daytime,\n ref_date=datetime.datetime(1899, 12, 31, 12),\n buffer_hours=buffer_hours, debug=False):\n # --- get lat and lon values from columns\n if debug:\n print((\"--- (s4-1) %s seconds ---\" % (time.time() - start_time)))\n # --- get sunrise and sunset for location\n o = ephem.Observer()\n # set lat (decimal?), lon (decimal?), and date (UTC)\n o.lat = str(lat)\n o.long = str(lon)\n o.date = date\n # planetary body\n s = ephem.Sun()\n if debug:\n print((\"--- (s4-2) %s seconds ---\" % (time.time() - start_time)))\n\n # Compute sun vs observer\n s.compute()\n if debug:\n print((\"--- (s4-3) %s seconds ---\" % (time.time() - start_time)))\n\n # Work out if day or night based on sunrises and sunsets\n mask_value = 0\n try:\n\n # get sunrise time and date\n next_rising = o.next_rising(s)\n next_setting = o.next_setting(s)\n\n # convert to datetime.datetime\n next_rising = add_days(ref_date, next_rising)\n next_setting = add_days(ref_date, next_setting)\n\n # did the sun last rise or set? (inc. any adjustments)\n sun_last_rose = False\n if next_setting < next_rising:\n sun_last_rose = True\n\n # Add buffer to rising/setting if provided with buffer_hours\n if buffer_hours != 0:\n\n # Calculate last rise\n previous_rising = o.previous_rising(s)\n # convert to datetime.datetime\n previous_rising = add_days(ref_date, previous_rising)\n # Calculate last setting\n previous_setting = o.previous_setting(s)\n # convert to datetime.datetime\n previous_setting = add_days(ref_date, previous_setting)\n\n # Calculate absolute difference\n time_from_rise = (date-previous_rising).total_seconds()\n time_till_set = (date-next_setting).total_seconds()\n time_from_set = (date-previous_setting).total_seconds()\n time_till_rise = (date-next_rising).total_seconds()\n\n # If absolutely difference less than buffer\n if abs(time_from_rise)/60./60. <= buffer_hours:\n mask_value = 1\n elif abs(time_till_set)/60./60. <= buffer_hours:\n mask_value = 1\n elif abs(time_from_set)/60./60. < buffer_hours:\n mask_value = 1\n elif abs(time_till_rise)/60./60. < buffer_hours:\n mask_value = 1\n\n # --- Check if daytime or nighttime and mask if condition met.\n if sun_last_rose:\n if mask_daytime:\n # ... and has not set yet, it must be daytime\n if (date < next_setting):\n mask_value = 1\n\n # if the sun last set... (mask nighttime is default)\n else:\n # if mask nighttime (aka not mask_daytime)\n if not mask_daytime:\n # ... and has not risen yet, it must be nighttime\n if (date < next_rising):\n mask_value = 1\n\n # Add gotcha for locations where sun is always up.\n except AlwaysUpError:\n if mask_daytime:\n mask_value = 1\n\n # Add gotcha for locations where sun is always down.\n except NeverUpError:\n if not mask_daytime:\n mask_value = 1\n\n except:\n print('FAIL')\n sys.exit()\n\n # Mask value in array\n return mask_value", "def constrain_sun(self, position: str) -> bool:\n if (position == \"up\" and self.sun_up()) or (\n position == \"down\" and self.sun_down()\n ):\n return True\n return False", "def hydro_solver(self):\n u_dx = self.central_x(self.u)\n w_dy = self.central_y(self.w)\n P_dx = self.central_x(self.P)\n P_dy = self.central_y(self.P)\n\n rho_dx_upwind = self.upwind_x(self.rho, self.u)\n rho_dy_upwwind = self.upwind_y(self.rho, self.w)\n rho_udx_upwind = self.upwind_x(self.rho * self.u, self.u)\n rho_udy_upwind = self.upwind_y(self.rho * self.u, self.w)\n rho_wdx_upwind = self.upwind_x(self.rho * self.w, self.u)\n rho_wdy_upwind = self.upwind_y(self.rho * self.w, self.w)\n u_dx_uu = self.upwind_x(self.u, self.u)\n u_dx_uw = self.upwind_x(self.u, self.w)\n w_dy_uu = self.upwind_y(self.w, self.u)\n w_dy_uw = self.upwind_y(self.w, self.w)\n e_dx = self.upwind_x(self.e, self.u)\n e_dy = self.upwind_y(self.e, self.w)\n\n self.rho_dt = (\n -self.rho * (u_dx + w_dy)\n - self.u * rho_dx_upwind\n - self.w * rho_dy_upwwind\n )\n self.e_dt = (\n -(self.e + self.P) * (u_dx + w_dy) - self.u * e_dx - self.w * e_dy\n )\n self.rho_udt = (\n -self.rho * self.u * (u_dx_uu + w_dy_uu)\n - self.u * rho_udx_upwind\n - self.w * rho_udy_upwind\n - P_dx\n )\n self.rho_wdt = (\n -self.rho * self.w * (u_dx_uw + w_dy_uw)\n - self.u * rho_wdx_upwind\n - self.w * rho_wdy_upwind\n - P_dy\n + self.rho * self.g\n )\n\n self.time_step()\n rho_previous = np.zeros_like(self.rho)\n rho_previous[:, :] = self.rho\n self.rho[:, :] = self.rho + self.rho_dt * self.dt\n self.e[:, :] = self.e + self.e_dt * self.dt\n self.u[:, :] = (\n rho_previous * self.u + self.rho_udt * self.dt\n ) / self.rho\n self.w[:, :] = (\n rho_previous * self.w + self.rho_wdt * self.dt\n ) / self.rho\n\n self.boundary_conditions()\n self.T[:, :] = (\n (self.Y - 1) * self.e * self.mu * self.m_u / (self.kb * self.rho)\n )\n self.P[:, :] = (self.Y - 1) * self.e\n uw = (self.u, self.w)\n v = np.linalg.norm(uw)\n dt = self.dt\n\n return dt", "def r2_law(array, i):\n npix = np.shape(array)[1]\n M = np.array([int(npix/2), int(npix/2)])\n yy, xx = np.ogrid[-M[0]:npix-M[0], -M[1]:npix-M[1]]\n\n np.seterr(divide='ignore')\n light = npix / (xx ** 2 + (yy / (np.cos(np.deg2rad(i)))) ** 2)\n\n light[light == np.inf] = 0\n return light * array", "def looks_azimuth(self) -> Optional[int]:\n return self._get_property(LOOKS_AZIMUTH_PROP, int)", "def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)", "def runoff_land(water_raster, lambda_param, wp_raster, p_raster, s_pre_raster, fk_raster):\n r_land = Con(water_raster == 0,\n lambda_param * ((s_pre - wp_raster) ** 2) + overflow(p_raster, s_pre_raster, fk_raster), 0)\n\n print(time.strftime(\"%H:%M:%S: \") + \"Landabfluss ausgeführt.\")\n return r_land", "def check_if_in_shadow(psi, a_sat_vector, sun_pos):\n dot_prod = np.dot(a * n_vector.T, unit_sun_r(sun_pos))\n\n check = np.zeros((len(a_sat_vector)))\n if np.cos(psi) < 0 and a_sat_vector < r_earth and dot_prod <= r_earth:\n check = True\n\n return check", "def chance_of_rain(self):\r\n # Amount of yesterday's rain indicating chance of it occurring.\r\n NO_RAIN = 0.1\r\n LITTLE_RAIN = 3\r\n SOME_RAIN = 8\r\n # Chance of rain occurring.\r\n NONE = 0\r\n MILD = 40\r\n PROBABLE = 75\r\n LIKELY = 90\r\n\r\n if self._yesterdays_weather.get_rainfall() < NO_RAIN:\r\n chance_of_rain = NONE\r\n elif self._yesterdays_weather.get_rainfall() < LITTLE_RAIN:\r\n chance_of_rain = MILD\r\n elif self._yesterdays_weather.get_rainfall() < SOME_RAIN:\r\n chance_of_rain = PROBABLE\r\n else:\r\n chance_of_rain = LIKELY\r\n\r\n return chance_of_rain", "def set_rowland_radius(self, Rm, isJohansson=False):\n self.oe1.F_EXT = 1\n self.oe1.FMIRR = 1\n if isJohansson:\n self.oe1.set_johansson(Rm*2.)\n else:\n self.oe1.F_JOHANSSON = 0\n self.oe1.set_radius(Rm*2.)", "def _create_rain(self):\n r_calc = self._calculate_spacing()\n # Create the full screen of raindrops.\n for raindrop_y in range(r_calc[3]):\n self._create_raindrops_y(raindrop_y)", "def fireWest(self):\n self.rotate('w')\n gun = Laser(self)\n gun.shoot('w')\n self.agent.actionCompleted()", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def get_horizontal_solar(region: int) -> np.ndarray:\n\n return read_conditions.get_horizontal_solar(region)", "def penumbral_eclipse(sat, earth, sun, time):\n\n theta, theta_e, theta_s = eclipse_parameters(sat, earth, sun, time)\n return np.logical_and(np.abs(theta_e - theta_s) < theta,\n theta < (theta_e + theta_s))", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def event1924():\n header(1924)\n if_not_in_world_area(1, 13, 0)\n if_not_in_world_area(1, 13, 1)\n if_condition_true(0, 1)\n flag.disable(11310201)", "def test_1b_is_stormy(self):\n random.seed( 9 )\n self.assertFalse(self.weather.is_stormy())", "def wears_jacket(temp, raining):\n\treturn raining or temp<60", "def test_handle_weather_tomorrow_calls_darksky_correctly(self):\n pass", "def aire(self, r):\r\n self.r_num(r)\r\n return self.r**2 * pi", "def fireEast(self):\n self.rotate('e')\n gun = Laser(self)\n gun.shoot('e')\n self.agent.actionCompleted()", "def func(self):\n account = self.account\n city_name = 'Phoenix' if not self.args else self.args\n a = Astral()\n a.solar_depression = 'civil'\n city = a[city_name]\n if not city:\n return\n timezone = city.timezone\n sun = city.sun(date=datetime.date.today(), local=True)\n\n account.msg('Information for %s/%s\\n' % (city_name, city.region))\n account.msg('Timezone: %s' % timezone)\n account.msg('Latitude: %.02f; Longitude: %.02f' % (city.latitude, city.longitude))\n account.msg('Dawn: %s' % str(sun['dawn']))\n account.msg('Sunrise: %s' % str(sun['sunrise']))\n account.msg('Noon: %s' % str(sun['noon']))\n account.msg('Sunset: %s' % str(sun['sunset']))\n account.msg('Dusk: %s' % str(sun['dusk']))", "def is_slope(self):\n\t\tif self.high_elevation != self.low_elevation:\n\t\t\treturn True\n\t\treturn False", "def apparent(self):\n jd = self.jd\n position_au = self.position.au.copy()\n observer = self.observer\n\n if observer.geocentric:\n include_earth_deflection = array((False,))\n else:\n limb_angle, nadir_angle = compute_limb_angle(\n position_au, observer.position.au)\n include_earth_deflection = limb_angle >= 0.8\n\n add_deflection(position_au, observer.position.au, observer.ephemeris,\n jd, include_earth_deflection)\n add_aberration(position_au, observer.velocity.au_per_d, self.light_time)\n\n a = Apparent(position_au, jd=jd)\n a.observer = self.observer\n return a", "def throw(self):\n\n self.vx = (2 * random.random()) - 1\n self.vy = (4 * random.random()) + 4", "def is_armed_night(self):\n return self in (\n ArmingState.ARMED_STAY_NIGHT,\n ArmingState.ARMED_STAY_NIGHT_BYPASS_PROA7,\n ArmingState.ARMED_STAY_NIGHT_INSTANT_PROA7,\n ArmingState.ARMED_STAY_NIGHT_INSTANT_BYPASS_PROA7,\n )", "def test_interpolation():\n spiral_arm = survey.get_spiral_slice(track = \"perseus\", \n interpolate = True)\n spiral_arm2 = survey.get_spiral_slice(track = \"Per\", \n interpolate = False)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def sun_isoperimetric_ratio(image, mode_viz=False): # On distorted image\r\n try :\r\n _, _, sun_mask = sun_center(image)\r\n except TypeError :\r\n return np.nan\r\n # We blurr the image and re-binarize it\r\n blurred_mask = mahotas.gaussian_filter(sun_mask, 0.7)\r\n blurred_mask = (blurred_mask > blurred_mask.mean())\r\n # Obtain a binary image with the sun border in white pixels\r\n sun_perim_mask = mahotas.labeled.bwperim(blurred_mask, 8)\r\n # Compute the perimeter in pixels\r\n sun_perim = int(perimeter(sun_perim_mask))\r\n # Compute the surface in pixels\r\n sun_surface = np.sum(blurred_mask)\r\n # ratio = 4*pi*S/(P^2). Is in [0,1], equals 1 for a circle\r\n ratio = 4*np.pi*sun_surface/(sun_perim**2)\r\n if mode_viz:\r\n # Plot\r\n # print(f\"perimeter = {sun_perim} | surface = {sun_surface}\")\r\n fig = plt.figure(figsize=(12, 6))\r\n ax1 = fig.add_subplot(121)\r\n ax1.imshow(blurred_mask, cmap=\"gray\")\r\n ax2 = fig.add_subplot(122)\r\n ax2.imshow(sun_perim_mask, cmap=\"gray\")\r\n plt.show()\r\n return np.round(ratio, 3)", "def H_outside(self, r):\n\n mu_star = self.mu_star(r)\n\n res = 0.5 * self.S * (0.5 * (1. - mu_star**2) -\n quad(self.H_integ_outside, mu_star, 1,\n args=(r,))[0])\n return res", "def isFim(self):\r\n return self.sair", "def is_artificial(self):\n\t\treturn 0", "def power_output_existing_solar_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_6[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * m.C_MC[g, y])\r\n == 0)\r\n\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_6[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE)) * m.C_MC[g, y])\r\n == 0)", "def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)", "def radialOuter(self):\n if self.radial in range(1, len(self.ThRZmesh.getPositions(label=\"R\"))):\n R = self.ThRZmesh.getUpper(label=\"R\", n=(self.radial))\n else:\n runLog.warning(\n \"Error: Radial Index ({0}) location not INSIDE mesh \".format(\n self.radial\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])", "def rusle(self):\n\n # assign variables\n ls_factor = 'ls_factor'\n slope = 'slope'\n grow_slope = 'grow_slope'\n flowacc = 'flowacc'\n sedflow = 'sedflow'\n sedflux = 'flux'\n\n # parse, advance, and stamp time\n (evolved_elevation, time, depth, sediment_flux, erosion_deposition,\n difference) = self.parse_time()\n\n # compute event-based erosivity (R) factor (MJ mm ha^-1 hr^-1 yr^-1)\n r_factor = self.event_based_r_factor()\n\n # compute slope\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n\n # compute flow accumulation\n gscript.run_command(\n 'r.watershed',\n elevation=self.elevation,\n accumulation=flowacc,\n flags=\"a\",\n overwrite=True)\n region = gscript.parse_command(\n 'g.region', flags='g')\n res = region['nsres']\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{depth}\"\n \"=({flowacc}*{res})\".format(\n depth=depth,\n flowacc=flowacc,\n res=res),\n overwrite=True)\n\n # compute dimensionless topographic factor\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{ls_factor}\"\n \"=({m}+1.0)\"\n \"*(({flowacc}/22.1)^{m})\"\n \"*((sin({slope})/5.14)^{n})\".format(\n ls_factor=ls_factor,\n m=self.m,\n flowacc=depth,\n slope=slope,\n n=self.n),\n overwrite=True)\n\n # compute sediment flow\n \"\"\"E = R * K * LS * C * P\n where\n E is average annual soil loss\n R is erosivity factor\n K is soil erodibility factor\n LS is a dimensionless topographic (length-slope) factor\n C is a dimensionless land cover factor\n P is a dimensionless prevention measures factor\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sedflow}\"\n \"={r_factor}\"\n \"*{k_factor}\"\n \"*{ls_factor}\"\n \"*{c_factor}\".format(\n sedflow=sedflow,\n r_factor=r_factor,\n k_factor=self.k_factor,\n ls_factor=ls_factor,\n c_factor=self.c_factor),\n overwrite=True)\n\n # convert sediment flow from tons/ha/yr to kg/m^2s\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{converted_sedflow}\"\n \"={sedflow}\"\n \"*{ton_to_kg}\"\n \"/{ha_to_m2}\"\n \"/{yr_to_s}\".format(\n converted_sedflow=sedflux,\n sedflow=sedflow,\n ton_to_kg=1000.,\n ha_to_m2=10000.,\n yr_to_s=31557600.),\n overwrite=True)\n\n # filter outliers\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sediment_flux}\"\n \"=if({sedflux}>{erdepmax},{erdepmax},{sedflux})\".format(\n sediment_flux=sediment_flux,\n sedflux=sedflux,\n erdepmax=self.erdepmax),\n overwrite=True)\n gscript.run_command(\n 'r.colors',\n map=sediment_flux,\n color='viridis',\n flags='g')\n\n # evolve landscape\n \"\"\"\n change in elevation (m)\n = change in time (s)\n * sediment flux (kg/ms)\n / mass of sediment per unit area (kg/m^2)\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{evolved_elevation}\"\n \"={elevation}\"\n \"-({rain_interval}*60\"\n \"*{sediment_flux}\"\n \"/{mass})\".format(\n evolved_elevation=evolved_elevation,\n elevation=self.elevation,\n rain_interval=self.rain_interval,\n sediment_flux=sediment_flux,\n mass=self.mass),\n overwrite=True)\n\n # gravitational diffusion\n evolved_elevation = self.gravitational_diffusion(evolved_elevation)\n\n # compute elevation change\n difference = self.compute_difference(evolved_elevation, difference)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['slope',\n 'grow_slope',\n 'flowacc',\n 'sedflow',\n 'flux',\n 'settled_elevation',\n 'divergence',\n 'r_factor',\n 'ls_factor'],\n flags='f')\n\n return (evolved_elevation, time, depth, sediment_flux, difference)", "def _check_und_conds(xss, axs, **kw):\n und = []\n for xs in xss:\n und += [(i.y <= 0) for i in xs.conds]\n if(any(und)):\n #draw ground surface line\n xl = axs[0].get_xlim()\n kw['H'].append(axs[0].plot(xl, [0]*2, ':', color=_ground_surface_color,\n linewidth=_ground_surface_linewidth)[0])\n kw['L'].append('Ground Surface')\n for ax in axs:\n #apply padding\n yl = ax.get_ylim()\n r = yl[1] - yl[0]\n ax.set_ylim(yl[0] - r*0.05, yl[1])\n else:\n for ax in axs:\n yl = ax.get_ylim()\n ax.set_ylim(0, yl[1])", "def update_conditions(self) -> None:\n self.log.debug(\"Updating conditions.\")\n\n self.models[\"sky\"].update(self.models[\"observatory_state\"].time)\n\n if self.is_night is None:\n self.log.debug(\"Driver not initialized yet. Computing night parameters.\")\n # Driver was not initialized yet. Need to compute night\n # boundaries\n\n (self.current_sunset, self.current_sunrise) = self.models[\n \"sky\"\n ].get_night_boundaries(self.parameters.night_boundary)\n\n self.is_night = (\n self.current_sunset\n <= self.models[\"observatory_state\"].time\n < self.current_sunrise\n )\n\n self.log.debug(\n f\"Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise}, \"\n f\"sun @ {self.parameters.night_boundary} degrees.\"\n )\n\n is_night = self.is_night\n\n self.is_night = (\n self.current_sunset\n <= self.models[\"observatory_state\"].time\n < self.current_sunrise\n )\n\n # Only compute night boundaries when we transition from nighttime to\n # daytime. Possibilities are:\n # 1 - self.is_night=True and is_night = True: During the night (no need\n # to compute anything).\n # 2 - self.is_night=False and is_night = True: Transitioned from\n # night/day (need to recompute night boundaries).\n # 3 - self.is_night=True and is_night = False: Transitioned from\n # day/night (no need to compute anything).\n # 4 - self.is_night=False and is_night = False: During the day, no need\n # to compute anything.\n if not self.is_night and is_night:\n self.log.debug(\n \"Night over. Computing next night boundaries. \"\n f\"Assuming sun elevation of {self.parameters.night_boundary}.\"\n )\n self.night += 1\n (self.current_sunset, self.current_sunrise) = self.models[\n \"sky\"\n ].get_night_boundaries(self.parameters.night_boundary)\n\n self.log.debug(\n f\"[{self.night}]: Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise} \"\n )", "def inverted_hammer(self):\n self.data['inverted_hammer'] = (((self.data['High'] - self.data['Low']) > ((self.data['Open'] - self.data['Close'])*3)) & \\\n ((self.data['High'] - self.data['Close']) / ((.001 + self.data['High'] - self.data['Low']) > 0.6)) & \\\n ((self.data['High'] - self.data['Open']) / ((.001 + self.data['High'] - self.data['Low']) > 0.6)))", "def get_weather_with_time(time):\n global DARK\n\n if TIME in range(6, 9):\n DARK = False\n return 1\n elif TIME in range(9, 13):\n return 2\n elif TIME in range(13, 16):\n return 3\n elif TIME in range(16, 19):\n if HAS_RAINCOAT:\n return 4\n else:\n if not NICE_WEATHER:\n add_strength(False, 10)\n return 5\n\n elif TIME in range(19, 22):\n if HAS_RAINCOAT:\n return 7\n else:\n if not NICE_WEATHER:\n add_strength(False, 10)\n return 6\n\n else: # 9 - 6am\n DARK = True\n if HAS_FLASHLIGHT:\n return 9\n else:\n return 8", "def elevation(self, rover):\n\t\tcurrent_tile = rover.planet.tiles[rover.y][rover.x]\n\t\t#current_tile is slope\n\t\tif current_tile.is_slope():\n\t\t\t#self is slope current_tile is slope\n\t\t\tif self.is_slope():\n\t\t\t\tif current_tile.high_elevation == self.low_elevation:\n\t\t\t\t\treturn \"/\"\n\t\t\t\tif current_tile.low_elevation == self.high_elevation:\n\t\t\t\t\treturn \"\\\\\"\n\t\t\t\tif self.high_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\t\tif self.low_elevation > current_tile.high_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.low_elevation == current_tile.low_elevation\\\n\t\t\t\t\tand self.high_elevation == current_tile.high_elevation:\n\t\t\t\t\treturn \" \"\n\t\t\t#self is flat current_tile is slope\n\t\t\telse:\n\t\t\t\tif self.low_elevation > current_tile.high_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.low_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\t\treturn \" \"\n\n\n\t\telse: #current_tile is flat\n\t\t\t#self is slope current_tile is flat\n\t\t\tif self.is_slope():\n\t\t\t\tif self.low_elevation == current_tile.low_elevation:\n\t\t\t\t\treturn \"/\"\n\t\t\t\tif self.high_elevation == current_tile.low_elevation:\n\t\t\t\t\treturn \"\\\\\"\n\t\t\t\tif self.low_elevation > current_tile.low_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.high_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\t#self is flat current_tile is flat\n\t\t\telse:\n\t\t\t\tif self.low_elevation > current_tile.low_elevation:\n\t\t\t\t\treturn \"+\"\n\t\t\t\tif self.high_elevation < current_tile.low_elevation:\n\t\t\t\t\treturn \"-\"\n\t\t\treturn \" \"", "def calc_spectral_olr(self, atmosphere, surface):\n f, _, irradiance_field, _ = self.calc_spectral_irradiance_field(\n atmosphere=atmosphere, t_surface=surface[\"temperature\"][0]\n )\n return f, irradiance_field[:, -1, 0, 0, 1]", "def __init__(self, thresh=0.1, ar_depth=1, rnd_key=random.PRNGKey(42)):\n self.thresh = thresh\n self.ar_depth = ar_depth\n\n params = random.normal(rnd_key, shape=(ar_depth+1, ))\n self.did_fit = False\n super().__init__('RainDay', params)", "def slopecal(ton):\r\n global _ton_old\r\n global _slope\r\n\r\n print(\"ton: \", ton, \" _ton_old: \", _ton_old, \" substraction: \", round((ton - _ton_old),2))\r\n\r\n if round((ton - _ton_old), 2) >= 5:\r\n _slope = 1\r\n if round((ton - _ton_old), 2) <= -10:\r\n _slope = -1\r\n _ton_old = ton", "def sunrise(self):\r\n try:\r\n return str(self.connect()['sys']['sunrise'])\r\n except:\r\n return '@weather_sunrise'", "def morir(self):\n self.energia = 0\n self.vivo = False", "def determine_analytic_solution(self):\n\n self._Janalytic = np.where(self.xr <= self.xint, self.S, 0.5 * self.S)\n self._Hanalytic = np.where(self.xr <= self.xint, 0, 0.25 * self.S)\n self._Kanalytic = np.where(self.xr <= self.xint, 1./3. * self.S,\n 1./6. * self.S)", "def hanging_man(self):\n self.data['hanging_man'] = (((self.data['High'] - self.data['Low']) > ((self.data['Open'] - self.data['Close'])*4)) & \\\n ((self.data['Close'] - self.data['Low']) / ((.001 + self.data['High'] - self.data['Low']) >= 0.75)) &\\\n ((self.data['Open'] - self.data['Low']) / ((.001 + self.data['High'] - self.data['Low']) >= .075)))", "def _update_rain(self):\n self.rain.update()\n self._make_new_drops()", "def SearchRiseSet(body, observer, direction, startTime, limitDays):\n if body == Body.Earth:\n raise EarthNotAllowedError()\n elif body == Body.Sun:\n body_radius = _SUN_RADIUS_AU\n elif body == Body.Moon:\n body_radius = _MOON_EQUATORIAL_RADIUS_AU\n else:\n body_radius = 0.0\n\n if direction == Direction.Rise:\n ha_before = 12.0 # minimum altitude (bottom) happens BEFORE the body rises.\n ha_after = 0.0 # maximum altitude (culmination) happens AFTER the body rises.\n elif direction == Direction.Set:\n ha_before = 0.0 # culmination happens BEFORE the body sets.\n ha_after = 12.0 # bottom happens AFTER the body sets.\n else:\n raise Error('Invalid value for direction parameter')\n\n context = _peak_altitude_context(body, direction, observer, body_radius)\n\n # See if the body is currently above/below the horizon.\n # If we are looking for next rise time and the body is below the horizon,\n # we use the current time as the lower time bound and the next culmination\n # as the upper bound.\n # If the body is above the horizon, we search for the next bottom and use it\n # as the lower bound and the next culmination after that bottom as the upper bound.\n # The same logic applies for finding set times, only we swap the hour angles.\n time_start = startTime\n alt_before = _peak_altitude(context, time_start)\n if alt_before > 0.0:\n # We are past the sought event, so we have to wait for the next \"before\" event (culm/bottom).\n evt_before = SearchHourAngle(body, observer, ha_before, time_start)\n time_before = evt_before.time\n alt_before = _peak_altitude(context, time_before)\n else:\n # We are before or at the sought ebvent, so we find the next \"after\" event (bottom/culm),\n # and use the current time as the \"before\" event.\n time_before = time_start\n\n evt_after = SearchHourAngle(body, observer, ha_after, time_before)\n alt_after = _peak_altitude(context, evt_after.time)\n\n while True:\n if alt_before <= 0.0 and alt_after > 0.0:\n # Search between the \"before time\" and the \"after time\" for the desired event.\n event_time = Search(_peak_altitude, context, time_before, evt_after.time, 1.0)\n if event_time is not None:\n return event_time\n # We didn't find the desired event, so use the \"after\" time to find the next \"before\" event.\n evt_before = SearchHourAngle(body, observer, ha_before, evt_after.time)\n evt_after = SearchHourAngle(body, observer, ha_after, evt_before.time)\n if evt_before.time.ut >= time_start.ut + limitDays:\n return None\n time_before = evt_before.time\n alt_before = _peak_altitude(context, evt_before.time)\n alt_after = _peak_altitude(context, evt_after.time)", "def check_angle_of_arcs(self):\n\n if self.thin_arc_start_angle >= 3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle += 360\n\n elif self.thin_arc_start_angle <= -3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle -= 360\n\n if self.thin_arc_end_angle >= 3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle += 360\n\n elif self.thin_arc_end_angle <= -3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle -= 360\n\n if self.thick_arc_start_angle >= 3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle += 360\n\n elif self.thick_arc_start_angle <= -3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle -= 360\n\n if self.thick_arc_end_angle >= 3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle += 360\n\n elif self.thick_arc_end_angle <= -3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle -= 360", "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def daily_insolation_limits(irrad, clearsky, daily_min=0.4, daily_max=1.25):\n daily_irradiance = _daily_total(irrad)\n daily_clearsky = _daily_total(clearsky)\n good_days = quality.util.check_limits(\n daily_irradiance/daily_clearsky,\n upper_bound=daily_max,\n lower_bound=daily_min\n )\n return good_days.reindex(irrad.index, method='pad', fill_value=False)", "def Illumination(body, time):\n if body == Body.Earth:\n raise EarthNotAllowedError()\n earth = _CalcEarth(time)\n if body == Body.Sun:\n gc = Vector(-earth.x, -earth.y, -earth.z, time)\n hc = Vector(0.0, 0.0, 0.0, time)\n phase = 0.0 # placeholder value; the Sun does not have a phase angle.\n else:\n if body == Body.Moon:\n # For extra numeric precision, use geocentric moon formula directly.\n gc = GeoMoon(time)\n hc = Vector(earth.x + gc.x, earth.y + gc.y, earth.z + gc.z, time)\n else:\n # For planets, heliocentric vector is most direct to calculate.\n hc = HelioVector(body, time)\n gc = Vector(hc.x - earth.x, hc.y - earth.y, hc.z - earth.z, time)\n phase = AngleBetween(gc, hc)\n\n geo_dist = gc.Length() # distance from body to center of Earth\n helio_dist = hc.Length() # distance from body to center of Sun\n ring_tilt = None # only reported for Saturn\n if body == Body.Sun:\n mag = -0.17 + 5.0*math.log10(geo_dist / _AU_PER_PARSEC)\n elif body == Body.Moon:\n mag = _MoonMagnitude(phase, helio_dist, geo_dist)\n elif body == Body.Saturn:\n mag, ring_tilt = _SaturnMagnitude(phase, helio_dist, geo_dist, gc, time)\n else:\n mag = _VisualMagnitude(body, phase, helio_dist, geo_dist)\n return IlluminationInfo(time, mag, phase, helio_dist, geo_dist, gc, hc, ring_tilt)" ]
[ "0.634991", "0.565729", "0.5482073", "0.5471151", "0.543911", "0.5316347", "0.52773964", "0.5272393", "0.52664614", "0.52653426", "0.52644473", "0.519665", "0.51890105", "0.5169418", "0.50999045", "0.5082697", "0.5082697", "0.508089", "0.5074152", "0.50740224", "0.5064365", "0.5052793", "0.5047427", "0.5037336", "0.50334084", "0.50233996", "0.50164187", "0.5014618", "0.5007739", "0.4992038", "0.49896023", "0.4988123", "0.49747714", "0.4971595", "0.49702844", "0.4951463", "0.49514607", "0.49501485", "0.4949333", "0.4941237", "0.49176484", "0.49059463", "0.48967886", "0.48962113", "0.48892975", "0.4877545", "0.487514", "0.4873331", "0.4873321", "0.48716813", "0.48707005", "0.48685095", "0.48572937", "0.485399", "0.484714", "0.4833243", "0.48318493", "0.4831488", "0.48306358", "0.48290846", "0.48256275", "0.48211417", "0.4817233", "0.4815825", "0.48155496", "0.48070467", "0.47940925", "0.47915906", "0.4788916", "0.47854358", "0.47853774", "0.4778968", "0.47783625", "0.47763062", "0.47753468", "0.47741693", "0.47698867", "0.47681624", "0.47667426", "0.47654617", "0.47627532", "0.47613105", "0.47591153", "0.47581643", "0.47565708", "0.47548732", "0.4754591", "0.4745711", "0.4745501", "0.47453845", "0.47419783", "0.47368607", "0.4724615", "0.4719487", "0.47148094", "0.47128704", "0.47048137", "0.4703963", "0.4699489", "0.469699" ]
0.5284681
6
The kinematic viscosity of air varies as a function of temperature.
def kinematic_viscosity_of_air(self) -> float: return self.dynamic_viscosity_of_air / self.density_of_air
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def dispersion(self, k):\n return np.sqrt(\n 1 + (\n (k*self.material.hbar_m)**2 / (2*self.material.m_star_m) -\n self.material.z\n )**2\n )", "def viscosity(altitude):\n t_ref = temperature(0) # R\n t = temperature(altitude) # R\n s = 198.72 # R\n mu_ref = 3.737 * 10 ** (-7) # [slug/(ft*s)]\n mu = mu_ref*((t/t_ref)**(3/2))*(t_ref + s)/(t + s) # [slug/(ft*s)]\n return mu", "def dispersion(self, k):\n return 2*self.material.gamma*np.sqrt(self.material.z)", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def kts(self):\n return CAL_TO_J * 0.0077 * (self.rho/1000.0) * (self.rho/1000.0)", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def feller(self):\n return 2 * self.kappa_y * self.mean_v - self.eta_y**2 > 0", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def von_klitzing_constant(self):\n return self._von_klitzing_constant", "def VTrue(h,Vc,p,Temp_m):\n M = Mach(h,Vc,p)\n return M*np.sqrt(gamma*R*Static_T(Temp_m,M))", "def temperature() -> float:", "def conductivity(self, T):\n m = self.mass\n mu = self.viscosity(T)\n K = (15/4) * kB * mu / m\n return K", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def air_density(self):\n return self.flow_field.air_density", "def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))", "def kineticEnergy(self):\n return self.params['kinetic']", "def viscosity(self, T):\n m = self.mass\n w = self.omega\n Tr = self.t_ref\n dr = self.d_ref\n a = self.alpha\n \n mu_ref_numerator = (5 * (1 + a) * (2 + a) * sqrt(kB * m * Tr / pi))\n mu_ref_denominator = 4 * a * dr **2 * (7 - 2 * w) * (5 - 2 * w)\n mu_ref = mu_ref_numerator / mu_ref_denominator\n \n mu = mu_ref * (T / Tr) ** w\n return mu", "def calculate_visibility(qv,qc,qr,qi,qs,T,p):\n Rd = 287.\n COEFLC = 144.7\n COEFLP = 2.24\n COEFFC = 327.8\n COEFFP = 10.36\n EXPLC = 0.88\n EXPLP = 0.75\n EXPFC = 1.\n EXPFP = 0.7776\n\n Tv = T * (1+0.61*qv) # Virtual temperature\n\n rhoa = p/(Rd*Tv) # Air density [kg m^-3]\n rhow = 1e3 # Water density [kg m^-3]\n rhoi = 0.917e3 # Ice density [kg m^-3]\n\n vovmd = (1+qv)/rhoa + (qc+qr)/rhow + (qi+qs)/rhoi\n\n conc_lc = 1e3*qc/vovmd\n conc_lp = 1e3*qr/vovmd\n conc_fc = 1e3*qi/vovmd\n conc_fp = 1e3*qs/vovmd\n\n # Make sure all concentrations are positive\n conc_lc[conc_lc < 0] = 0\n conc_lp[conc_lp < 0] = 0\n conc_fc[conc_fc < 0] = 0\n conc_fp[conc_fp < 0] = 0\n\n betav = COEFFC*conc_fc**EXPFC\\\n + COEFFP*conc_fp**EXPFP\\\n + COEFLC*conc_lc**EXPLC\\\n + COEFLP*conc_lp**EXPLP+1E-10\n\n vis = -np.log(0.02)/betav # Visibility [km]\n vis[vis > 24.135] = 24.135\n\n return vis", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def kinetic_energy(self, sys):\n v = sys.velocities\n m = sys.mass\n return 0.5*np.dot(m, np.multiply(v, v))", "def func_kc_318(n, series):\n if series == \"3D3\":\n try:\n return 2*np.pi/(wl_3D3[str(n)]*1e-9)\n except:\n return 0", "def test_virtual_potential_temperature():\n p = 999. * units.mbar\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n theta_v = virtual_potential_temperature(p, t, qv)\n assert_almost_equal(theta_v, 288.3620 * units.kelvin, 3)", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def molar_mass_dry_air():\n return 28.9647", "def k_Ni00(wind_ms, temp_C):\n\n U = wind_ms\n\n Sc = schmidt_number(temp_C)\n k = (0.333 * U + 0.222 * U ** 2) * (600 / Sc) ** 0.5\n\n return k", "def liquid_viscosity(id, temperature=298.15, pressure=constants.atm): # noqa: A002\n return rx._misc._get_chemical(id, temperature, pressure).mul # noqa: SLF001", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def variable_vis(self):\n return self._variable_vis", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def _ani_ic_on_planck(electron_energy, soft_photon_temperature, gamma_energy, theta):\n Ktomec2 = 1.6863699549e-10\n soft_photon_temperature *= Ktomec2\n\n def G12(x, a):\n \"\"\"\n Eqs 20, 24, 25\n \"\"\"\n alpha, a, beta, b = a\n pi26 = np.pi ** 2 / 6.0\n G = (pi26 + x) * np.exp(-x)\n tmp = 1 + b * x ** beta\n g = 1. / (a * x ** alpha / tmp + 1.)\n return G * g\n\n gamma_energy = np.vstack(gamma_energy)\n # Parameters from Eqs 21, 22\n a1 = [0.857, 0.153, 1.840, 0.254]\n a2 = [0.691, 1.330, 1.668, 0.534]\n z = gamma_energy / electron_energy\n ttheta = 2. * electron_energy * soft_photon_temperature * (1. - np.cos(theta))\n x = z / (1 - z) / ttheta\n # Eq. 11\n cross_section = z ** 2 / (2 * (1 - z)) * G12(x, a1) + G12(x, a2)\n tmp = (soft_photon_temperature / electron_energy) ** 2\n # r0 = (e**2 / m_e / c**2).to('cm')\n # (2 * r0 ** 2 * m_e ** 3 * c ** 4 / (pi * hbar ** 3)).cgs\n tmp *= 2.6318735743809104e+16\n cross_section = tmp * cross_section\n cc = ((gamma_energy < electron_energy) * (electron_energy > 1))\n return np.where(cc, cross_section,\n np.zeros_like(cross_section))", "def test_virtual_temperature():\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n tv = virtual_temperature(t, qv)\n assert_almost_equal(tv, 288.2796 * units.kelvin, 3)", "def KineticEnergy(self):\n return Particle.TotalEnergy(self) - Particle.RestEnergy(self)", "def k3(self) -> float:\n return self.distortion_coefficients[2]", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def kin_energy (self):\n\n for planet in self.planets:\n planet.kenergy = 0.5*planet.mass*((np.linalg.norm(planet.velocity))**2) # every 'kenergy' depends by the body's mass and velocity", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def _CloudVar(self): \n # q is MA order of ARMA(1,q)\n q = int(round(self.lambda_avg/self.lambda_s))\n a = exp(-self.lambda_s / self.lambda_p) \n (var, var_ratio) = self._ARMAvar(q, a)\n # This variance is a multiple of the variance of the noise driving the\n # AR(1) model. This variance, in turn, is a multiple of the underlying\n # measurement variance, with the relationship given in Gillespie 96\n var = var * (1. - exp(-2*self.lambda_s / self.lambda_p))/2\n # print q, a\n return var", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def true_an(self):\n vel = self.getvel_sph()\n e_vec = self.get_ecc()\n pos = self.getpos_xyz()\n if np.linalg.norm(e_vec) == 0:\n h_mom = self.sp_ang_mom()\n e_vec = np.cross([0, 0, 1], h_mom)\n true_th = np.arccos(np.dot(e_vec, pos)/(np.linalg.norm(e_vec) *\n np.linalg.norm(pos)))\n if vel[0] < 0:\n true_th = 2*np.pi - true_th\n return true_th", "def rad_field_initial_condition(self):\n\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n # Looking at the sky\n if angle < 90:\n I_init = (\n self.sun_intensity\n * f.delta_func(self.sun_elevation - self.receiver_elevation)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n # Looking at the ground\n elif angle > 90:\n I_ground = RT_model_1D.calc_direct_beam_intensity(self, 0)\n\n I_lambert = (\n I_ground\n * self.ground_albedo\n * np.cos(np.deg2rad((self.sun_elevation + 180) % 360))\n )\n\n I_specular = (\n I_ground\n * self.ground_albedo\n * f.delta_func(self.sun_elevation + self.receiver_elevation - 180)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n I_init = (\n 1 - self.reflection_type\n ) * I_lambert + self.reflection_type * I_specular\n\n else:\n I_init = np.empty(self.stokes_dim)\n I_init.fill(np.nan)\n\n return I_init", "def is_artificial(self):\n\t\treturn 0", "def mTV(self):\n distance = abs(self.vertPosT - self.vertPosW) # distance between htp and vortex shred plane,\n # approximated with the wing root chordplane\n return distance / (self.spanW / 2)", "def LotkaVolterra_Dynamics(self):\n LV_c = self.toConceptual(self.state) # (nF, nR)\n LV_c = LV_c.mul((1 - LV_c) + self.LV_inhM.mm(LV_c))\n LV_s = self.toNeural(LV_c)\n\n return LV_c, LV_s", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def constraint_k_pi_invis(self):\n width_contr = 0.0\n ms = self.ms\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in k_pi_invis_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n width_s_sm = width_s - widths_s[\"x x\"] # Gamma_{S->SM}\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mk - mpi - ms) * (mk + mpi - ms) * (mk - mpi + ms) * (mk + mpi + ms)\n ) / (2.0 * mk)\n # Probability that S decays outside the detector\n pr_invis = np.exp(-k_pi_invis_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # Compute the total contribution to the invisible decay width\n width_contr = (\n self.width_k_pi_s() * (widths_s[\"x x\"] + pr_invis * width_s_sm) / width_s\n )\n\n return k_pi_invis_obs.width_bound - width_contr", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def getEnthalpyOfVaporization(self,Temperature):\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\n\t\t# Eqn 7 from Epstein et al 2009\n\t\tHvap = 2.303*8.3145*Temperature*Temperature*B/((C + Temperature - 273.15)*(C + Temperature - 273.15))\n\t\treturn Hvap # units are J/molK", "def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def vcrit(Te):\n vcrit = 3.0*np.sqrt(np.pi)/4.*(2.*eV2J/me)**(1.5)*(me/mi)*np.sqrt(Te**3.)\n return vcrit", "def k_Li86(wind_ms, temp_C):\n from numpy import zeros_like\n\n U = wind_ms\n T = temp_C\n\n Sc = schmidt_number(T)\n k = zeros_like(temp_C)\n\n i1 = U <= 3.6\n i2 = (U > 3.6) & (U < 13.0)\n i3 = U >= 13.0\n\n k[i1] = (0.17 * U[i1]) * (Sc[i1] / 600) ** (-2.0 / 3.0)\n k[i2] = ((U[i2] - 3.4) * 2.8) * (600 / Sc[i2]) ** 0.5\n k[i3] = ((U[i3] - 8.4) * 5.9) * (600 / Sc[i3]) ** 0.5\n\n return k", "def sky_observed(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n #M = 6e7*u.Msun\n B = 19.95*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n \n if old:\n old_label = '_old'\n \n # impact parameters\n M = 5e7*u.Msun\n B = 19.8*u.kpc\n V = 210*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.05*u.Gyr\n T = 2*u.Gyr\n dt = 0.1*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n alt_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': -45*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal_alt = coord.Galactocentric(stream['x'], **alt_observer)\n xeq_alt = xgal_alt.transform_to(coord.ICRS)\n veq_alt_ = gc.vgal_to_hel(xeq_alt, stream['v'], **vobs)\n veq_alt = [None] * 3\n veq_alt[0] = veq_alt_[0].to(u.mas/u.yr)\n veq_alt[1] = veq_alt_[1].to(u.mas/u.yr)\n veq_alt[2] = veq_alt_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal0_alt = coord.Galactocentric(stream0['x'], **alt_observer)\n xeq0_alt = xgal0_alt.transform_to(coord.ICRS)\n veq0_alt_ = gc.vgal_to_hel(xeq0_alt, stream0['v'], **vobs)\n veq0_alt = [None] * 3\n veq0_alt[0] = veq0_alt_[0].to(u.mas/u.yr)\n veq0_alt[1] = veq0_alt_[1].to(u.mas/u.yr)\n veq0_alt[2] = veq0_alt_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n # alternative observer\n R_alt = find_greatcircle(xeq0_alt.ra.deg[::10], xeq0_alt.dec.deg[::10])\n xi0_alt, eta0_alt = myutils.rotate_angles(xeq0_alt.ra, xeq0_alt.dec, R_alt)\n xi0_alt = coord.Angle(xi0_alt*u.deg)\n \n # place gap at xi~0\n xioff_alt = xi0_alt[ienc]\n xi0_alt -= xioff_alt\n \n xi_alt, eta_alt = myutils.rotate_angles(xeq_alt.ra, xeq_alt.dec, R_alt)\n xi_alt = coord.Angle(xi_alt*u.deg)\n xi_alt -= xioff_alt\n \n\n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-1.5, 1.5], [-1.5, 1.5], [-30,30]]\n color = '0.35'\n ms = 4\n alpha = 0.7\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(2,4,figsize=(17,8), sharex=True, sharey='col')\n \n plt.sca(ax[0][0])\n plt.plot(xi.wrap_at(wangle), eta, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n #plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n plt.sca(ax[1][0])\n plt.plot(xi_alt.wrap_at(wangle), eta_alt, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n dv = []\n dv_alt = []\n for i in range(3):\n plt.sca(ax[0][i+1])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n dv += [veq[i]-vexp]\n plt.plot(xi.wrap_at(wangle), dv[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n \n plt.sca(ax[1][i+1])\n # interpolate expected kinematics from an unperturbed stream\n vexp_alt = np.interp(xi_alt.wrap_at(wangle), xi0_alt.wrap_at(wangle), veq0_alt[i].value) * veq0_alt[i].unit\n dv_alt += [veq_alt[i]-vexp_alt]\n plt.plot(xi_alt.wrap_at(wangle), dv_alt[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n plt.xlabel('$\\phi_1$ [deg]')\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n np.random.seed(seed+1)\n fgaia = np.sqrt(2/5)\n print(2/5, fgaia)\n phi1 = xi[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin]\n pmra = dv[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Stream', 'Spur']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[0][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n print(np.sqrt(np.sum(g['pmra_error'][mask]**2))/np.sum(mask))\n print(np.sqrt(np.sum(g['pmdec_error'][mask]**2))/np.sum(mask))\n\n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.0848, 0.0685])\n \n np.random.seed(seed+2)\n phi1 = xi[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin2]\n pmra = dv[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[0][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n ##############\n # alt observer\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n #np.random.seed(seed+3)\n phi1 = xi_alt[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin]\n pmra = dv_alt[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv_alt[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[1][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.11, 0.08])\n \n np.random.seed(seed+6)\n phi1 = xi_alt[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin2]\n pmra = dv_alt[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv_alt[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[1][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n plt.sca(ax[0][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = 60$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n\n plt.sca(ax[1][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = -45$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n plt.legend(fontsize='small', loc=3, handlelength=0.2)\n \n plt.suptitle('Expected astrometric performance', fontsize='medium')\n plt.tight_layout(rect=[0,0,1,0.94])\n plt.savefig('../plots/astrometric_performance.png')", "def get_D_C3H8_air(self, T):\n\n self.set_TempPres_dependents(T)\n\n self.D_C3H8_air = (\n 2. / 3. * np.sqrt(const.k_B * T / np.pi * 0.5 * (1. /\n self.air.m + 1. / self.fuel.m)) / (np.pi * (0.5 *\n (self.air.d + self.fuel.d)) ** 2.) / self.air.n\n )\n\n return self.D_C3H8_air", "def useKineticEnergy(self):\n return self.params['useKinetic']", "def sensitivity(gas, surf, old_data, temp, dk, thermo=False):\n rxns = []\n sens1 = []\n sens2 = []\n sens3 = []\n sens4 = []\n sens5 = []\n sens6 = []\n sens7 = []\n sens8 = []\n sens9 = []\n sens10 = []\n sens11 = []\n sens12 = []\n\n gas_out_data, gas_names_data, dist_array_data, T_array_data = old_data\n\n reference = []\n for a in range(len(gas_names_data)):\n reference.append([gas_names_data[a], [gas_out_data[:, a]]])\n\n # getting the ratio\n for x in reference:\n if x[0] == 'CH4(2)':\n ch4_in = x[1][0][0]\n if x[0] == 'O2(3)':\n o2_in = x[1][0][0]\n if x[0] == 'Ar':\n ar_in = x[1][0][0]\n ratio = ch4_in / (2 * o2_in)\n moles_in = [ch4_in, o2_in, ar_in]\n\n for x in reference:\n if x[0] == 'CH4(2)':\n ch4_in = x[1][0][0]\n ch4_out = x[1][0][-1]\n if ch4_out < 0:\n ch4_out = 0.\n ch4_depletion = ch4_in - ch4_out\n if ch4_depletion <= 1.0e-8:\n ch4_depletion = 1.0e-8\n reference_ch4_conv = 1.0e-8\n else:\n reference_ch4_conv = ch4_depletion / ch4_in # Sensitivity definition 7: CH4 conversion\n if x[0] == 'Ar':\n ar = x[1][0][-1]\n if x[0] == 'O2(3)':\n o2_out = x[1][0][-1]\n if o2_out < 0:\n o2_out = 1.0e-15 # O2 can't be negative\n elif o2_out > o2_in:\n o2_out = o2_in # O2 can't be created, to make it equal to O2 in\n if x[0] == 'CO(7)':\n co_out = x[1][0][-1]\n if x[0] == 'H2(6)':\n h2_out = x[1][0][-1]\n if x[0] == 'H2O(5)':\n h2o_out = x[1][0][-1]\n if x[0] == 'CO2(4)':\n co2_out = x[1][0][-1]\n\n if reference_ch4_conv <= 1.0e-8:\n reference_h2_sel = 1.0e-8\n reference_co_sel = 1.0e-8\n reference_syngas_selectivity = 1.0e-8\n reference_syngas_yield = 1.0e-8\n reference_co_yield = 1.0e-8\n reference_h2_yield = 1.0e-8\n reference_full_oxidation_selectivity = 1.0e-8\n reference_full_oxidation_yield = 1.0e-8\n else:\n # negative sensitivity is higher selectivity\n reference_h2_sel = h2_out / (ch4_depletion * 2) # Sensitivity definition 5: H2 selectivity\n if reference_h2_sel <= 0:\n reference_h2_sel = 1.0e-15 # selectivity can't be 0\n\n reference_co_sel = co_out / ch4_depletion # Sensitivity definition 3: CO selectivity\n if reference_co_sel <= 0:\n reference_co_sel = 1.0e-15 # selectivity can't be 0\n\n reference_syngas_selectivity = reference_co_sel + reference_h2_sel # Sensitivity definition 1: SYNGAS selectivity\n\n reference_syngas_yield = reference_syngas_selectivity * reference_ch4_conv # Sensitivity definition 2: SYNGAS yield\n if reference_syngas_yield <= 0:\n reference_syngas_yield = 1.0e-15 # yield can't be 0\n\n reference_co_yield = co_out / ch4_in # Sensitivity definition 4: CO % yield\n # reference_co_yield = reference_co_sel * reference_ch4_conv\n\n reference_h2_yield = h2_out / (2 * ch4_in) # Sensitivity definition 6: H2 % yield\n # reference_h2_yield = reference_h2_sel * reference_ch4_conv\n\n # Sensitivity definition 8: H2O + CO2 selectivity\n reference_h2o_sel = h2o_out / (ch4_depletion * 2)\n reference_co2_sel = co2_out / ch4_depletion\n if reference_h2o_sel <= 0:\n reference_h2o_sel = 1.0e-15 # H2O selectivity can't be 0\n if reference_co2_sel <= 0:\n reference_co2_sel = 1.0e-15 # CO2 selectivity can't be 0\n reference_full_oxidation_selectivity = reference_h2o_sel + reference_co2_sel\n\n # Sensitivity definition 9: H2O + CO2 yield\n reference_full_oxidation_yield = reference_full_oxidation_selectivity * reference_ch4_conv\n\n # Sensitivity definition 10: exit temperature\n reference_exit_temp = T_array_data[-1]\n\n # Sensitivity definition 11: peak temperature\n reference_peak_temp = max(T_array_data)\n\n # Sensitivity definition 12: distance to peak temperautre\n reference_peak_temp_dist = dist_array_data[T_array_data.index(max(T_array_data))]\n\n # run the simulations\n if thermo is True:\n for m in range(surf.n_species):\n s = surf.species(m)\n original_coeffs = s.thermo.coeffs\n perturbed_coeffs = np.ones_like(original_coeffs)\n perturbed_coeffs[0] = original_coeffs[0]\n perturbed_coeffs[1:6] = original_coeffs[1:6]\n perturbed_coeffs[7:13] = original_coeffs[7:13]\n perturbed_coeffs[14] = original_coeffs[14]\n # perturbed_coeffs[6] = original_coeffs[6] + original_coeffs[6]*dk\n # perturbed_coeffs[13] = original_coeffs[13] + original_coeffs[13]*dk\n perturbed_coeffs[6] = original_coeffs[6] + dk\n perturbed_coeffs[13] = original_coeffs[13] + dk\n s.thermo = ct.NasaPoly2(100.000, 5000.000, ct.one_atm, perturbed_coeffs)\n surf.modify_species(m, s)\n c = monolithFull(gas, surf, temp, moles_in)\n\n gas_out, surf_out, gas_names, surf_names, dist_array, T_array = c\n\n new_amts = []\n for a in range(len(gas_names)):\n new_amts.append([gas_names[a], [gas_out[:, a]]])\n\n for x in new_amts:\n if x[0] == 'CH4(2)':\n new_ch4_in = x[1][0][0]\n new_ch4_out = x[1][0][-1]\n if new_ch4_out < 0:\n new_ch4_out = 0.\n new_ch4_depletion = new_ch4_in - new_ch4_out\n if new_ch4_depletion <= 1e-8:\n new_ch4_depletion = 1e-8\n new_ch4_conv = 1e-8\n else:\n new_ch4_conv = new_ch4_depletion / new_ch4_in # Sensitivity definition 7: CH4 conversion\n if x[0] == 'Ar':\n ar = x[1][0][-1]\n if x[0] == 'O2(3)':\n new_o2_in = x[1][0][0]\n new_o2_out = x[1][0][-1]\n if new_o2_out < 0:\n new_o2_out = 1.0e-15\n elif new_o2_out > new_o2_in:\n new_o2_out = new_o2_in\n if x[0] == 'CO(7)':\n new_co_out = x[1][0][-1]\n if x[0] == 'H2(6)':\n new_h2_out = x[1][0][-1]\n if x[0] == 'H2O(5)':\n new_h2o_out = x[1][0][-1]\n if x[0] == 'CO2(4)':\n new_co2_out = x[1][0][-1]\n\n if new_ch4_conv <= 1e-8:\n new_h2_sel = 1.0e-8\n new_co_sel = 1.0e-8\n new_syngas_selectivity = 1.0e-8\n new_syngas_yield = 1.0e-8\n new_co_yield = 1.0e-8\n new_h2_yield = 1.0e-8\n new_full_oxidation_selectivity = 1.0e-8\n new_full_oxidation_yield = 1.0e-8\n else:\n new_h2_sel = new_h2_out / (new_ch4_depletion * 2) # Sensitivity definition 5: H2 selectivity\n new_co_sel = new_co_out / new_ch4_depletion # Sensitivity definition 3: CO selectivity\n new_syngas_selectivity = new_co_sel + new_h2_sel # Sensitivity definition 1: SYNGAS selectivity\n new_syngas_yield = new_syngas_selectivity * new_ch4_conv # Sensitivity definition 2: SYNGAS yield\n new_co_yield = new_co_out / new_ch4_in # Sensitivity definition 4: CO % yield\n new_h2_yield = new_h2_out / (2 * new_ch4_in) # Sensitivity definition 6: H2 % yield\n new_h2o_sel = new_h2o_out / (new_ch4_depletion * 2) # Sensitivity definition 8: H2O + CO2 selectivity\n new_co2_sel = new_co2_out / new_ch4_depletion\n new_full_oxidation_selectivity = new_h2o_sel + new_co2_sel\n new_full_oxidation_yield = new_full_oxidation_selectivity * new_ch4_conv # Sensitivity definition 9: C2O + CO2 yield\n\n Sens5 = (new_h2_sel - reference_h2_sel) / (reference_h2_sel * dk)\n sens5.append(Sens5)\n\n Sens3 = (new_co_sel - reference_co_sel) / (reference_co_sel * dk)\n sens3.append(Sens3)\n\n Sens1 = (new_syngas_selectivity - reference_syngas_selectivity) / (reference_syngas_selectivity * dk)\n sens1.append(Sens1)\n\n Sens2 = (new_syngas_yield - reference_syngas_yield) / (reference_syngas_yield * dk)\n sens2.append(Sens2)\n\n Sens4 = (new_co_yield - reference_co_yield) / (reference_co_yield * dk)\n sens4.append(Sens4)\n\n Sens6 = (new_h2_yield - reference_h2_yield) / (reference_h2_yield * dk)\n sens6.append(Sens6)\n\n Sens7 = (new_ch4_conv - reference_ch4_conv) / (\n reference_ch4_conv * dk)\n sens7.append(Sens7)\n\n Sens8 = (new_full_oxidation_selectivity - reference_full_oxidation_selectivity) / (\n reference_full_oxidation_selectivity * dk)\n sens8.append(Sens8)\n\n Sens9 = (new_full_oxidation_yield - reference_full_oxidation_yield) / (reference_full_oxidation_yield * dk)\n sens9.append(Sens9)\n\n new_exit_temp = T_array[-1] # Sensitivity definition 10: exit temperature\n Sens10 = (new_exit_temp - reference_exit_temp) / (reference_exit_temp * dk)\n sens10.append(Sens10)\n\n new_peak_temp = max(T_array) # Sensitivity definition 11: peak temperature\n Sens11 = (new_peak_temp - reference_peak_temp) / (reference_peak_temp * dk)\n sens11.append(Sens11)\n\n new_peak_temp_dist = dist_array[\n T_array.index(max(T_array))] # Sensitivity definition 12: dist to peak temperature\n Sens12 = (new_peak_temp_dist - reference_peak_temp_dist) / (reference_peak_temp_dist * dk)\n sens12.append(Sens12)\n\n print \"%d %s %.3F %.3F\" % (m, surf.species_name(m), Sens1, Sens2)\n rxns.append(surf.species_name(m))\n\n # this step is essential, otherwise mechanism will have been altered\n s.thermo = ct.NasaPoly2(100.000, 5000.000, ct.one_atm, original_coeffs)\n surf.modify_species(m, s)\n else:\n for rxn in range(surf.n_reactions):\n c = monolithFull(gas, surf, temp, moles_in, sens=[dk, rxn])\n gas_out, surf_out, gas_names, surf_names, dist_array, T_array = c\n\n new_amts = []\n for a in range(len(gas_names)):\n new_amts.append([gas_names[a], [gas_out[:, a]]])\n\n for x in new_amts:\n if x[0] == 'CH4(2)':\n new_ch4_in = x[1][0][0]\n new_ch4_out = x[1][0][-1]\n if new_ch4_out < 0:\n new_ch4_out = 0.\n new_ch4_depletion = new_ch4_in - new_ch4_out\n if new_ch4_depletion <= 1e-8:\n new_ch4_depletion = 1e-8\n new_ch4_conv = 1e-8\n else:\n new_ch4_conv = new_ch4_depletion / new_ch4_in # Sensitivity definition 7: CH4 conversion\n if x[0] == 'Ar':\n ar = x[1][0][-1]\n if x[0] == 'O2(3)':\n new_o2_in = x[1][0][0]\n new_o2_out = x[1][0][-1]\n if new_o2_out < 0:\n new_o2_out = 1.0e-15\n elif new_o2_out > new_o2_in:\n new_o2_out = new_o2_in\n if x[0] == 'CO(7)':\n new_co_out = x[1][0][-1]\n if x[0] == 'H2(6)':\n new_h2_out = x[1][0][-1]\n if x[0] == 'H2O(5)':\n new_h2o_out = x[1][0][-1]\n if x[0] == 'CO2(4)':\n new_co2_out = x[1][0][-1]\n\n if new_ch4_conv <= 1e-8:\n new_h2_sel = 1.0e-8\n new_co_sel = 1.0e-8\n new_syngas_selectivity = 1.0e-8\n new_syngas_yield = 1.0e-8\n new_co_yield = 1.0e-8\n new_h2_yield = 1.0e-8\n new_full_oxidation_selectivity = 1.0e-8\n new_full_oxidation_yield = 1.0e-8\n else:\n new_h2_sel = new_h2_out / (new_ch4_depletion * 2) # Sensitivity definition 5: H2 selectivity\n new_co_sel = new_co_out / new_ch4_depletion # Sensitivity definition 3: CO selectivity\n new_syngas_selectivity = new_co_sel + new_h2_sel # Sensitivity definition 1: SYNGAS selectivity\n new_syngas_yield = new_syngas_selectivity * new_ch4_conv # Sensitivity definition 2: SYNGAS yield\n new_co_yield = new_co_out / new_ch4_in # Sensitivity definition 4: CO % yield\n new_h2_yield = new_h2_out / (2 * new_ch4_in) # Sensitivity definition 6: H2 % yield\n new_h2o_sel = new_h2o_out / (new_ch4_depletion * 2) # Sensitivity definition 8: H2O + CO2 selectivity\n new_co2_sel = new_co2_out / new_ch4_depletion\n new_full_oxidation_selectivity = new_h2o_sel + new_co2_sel\n new_full_oxidation_yield = new_full_oxidation_selectivity * new_ch4_conv # Sensitivity definition 9: C2O + CO2 yield\n\n Sens5 = (new_h2_sel - reference_h2_sel) / (reference_h2_sel * dk)\n sens5.append(Sens5)\n\n Sens3 = (new_co_sel - reference_co_sel) / (reference_co_sel * dk)\n sens3.append(Sens3)\n\n Sens1 = (new_syngas_selectivity - reference_syngas_selectivity) / (reference_syngas_selectivity * dk)\n sens1.append(Sens1)\n\n Sens2 = (new_syngas_yield - reference_syngas_yield) / (reference_syngas_yield * dk)\n sens2.append(Sens2)\n\n Sens4 = (new_co_yield - reference_co_yield) / (reference_co_yield * dk)\n sens4.append(Sens4)\n\n Sens6 = (new_h2_yield - reference_h2_yield) / (reference_h2_yield * dk)\n sens6.append(Sens6)\n\n Sens7 = (new_ch4_conv - reference_ch4_conv) / (\n reference_ch4_conv * dk)\n sens7.append(Sens7)\n\n Sens8 = (new_full_oxidation_selectivity - reference_full_oxidation_selectivity) / (\n reference_full_oxidation_selectivity * dk)\n sens8.append(Sens8)\n\n Sens9 = (new_full_oxidation_yield - reference_full_oxidation_yield) / (reference_full_oxidation_yield * dk)\n sens9.append(Sens9)\n\n new_exit_temp = T_array[-1] # Sensitivity definition 10: exit temperature\n Sens10 = (new_exit_temp - reference_exit_temp) / (reference_exit_temp * dk)\n sens10.append(Sens10)\n\n new_peak_temp = max(T_array) # Sensitivity definition 11: peak temperature\n Sens11 = (new_peak_temp - reference_peak_temp) / (reference_peak_temp * dk)\n sens11.append(Sens11)\n\n new_peak_temp_dist = dist_array[T_array.index(max(T_array))] # Sensitivity definition 12: dist to peak temperature\n Sens12 = (new_peak_temp_dist - reference_peak_temp_dist) / (reference_peak_temp_dist * dk)\n sens12.append(Sens12)\n\n print \"%d %s %.3F %.3F\" % (rxn, surf.reaction_equations()[rxn], Sens1, Sens2)\n rxns.append(surf.reaction_equations()[rxn])\n\n return rxns, sens1, sens2, sens3, sens4, sens5, sens6, sens7, sens8, sens9, sens10, sens11, sens12", "def _iso_ic_on_planck(electron_energy, soft_photon_temperature, gamma_energy):\n Ktomec2 = 1.6863699549e-10\n soft_photon_temperature *= Ktomec2\n\n def G34(x, a):\n \"\"\"\n Eqs 20, 24, 25\n \"\"\"\n alpha, a, beta, b, c = a\n pi26 = np.pi ** 2 / 6.0\n tmp = (1 + c * x) / (1 + pi26 * c * x)\n G = pi26 * tmp * np.exp(-x)\n tmp = 1 + b * x ** beta\n g = 1. / (a * x ** alpha / tmp + 1.)\n return G * g\n\n gamma_energy = np.vstack(gamma_energy)\n # Parameters from Eqs 26, 27\n a3 = [0.606, 0.443, 1.481, 0.540, 0.319]\n a4 = [0.461, 0.726, 1.457, 0.382, 6.620]\n z = gamma_energy / electron_energy\n x = z / (1 - z) / (4. * electron_energy * soft_photon_temperature)\n # Eq. 14\n cross_section = z ** 2 / (2 * (1 - z)) * G34(x, a3) + G34(x, a4)\n tmp = (soft_photon_temperature / electron_energy) ** 2\n # r0 = (e**2 / m_e / c**2).to('cm')\n # (2 * r0 ** 2 * m_e ** 3 * c ** 4 / (pi * hbar ** 3)).cgs\n tmp *= 2.6318735743809104e+16\n cross_section = tmp * cross_section\n cc = ((gamma_energy < electron_energy) * (electron_energy > 1))\n return np.where(cc, cross_section,\n np.zeros_like(cross_section))", "def get_vcond(lambdam, taum):\n return 2 * lambdam / taum", "def calc_metric3(K_tilda):\n trace = np.trace(K_tilda)\n # determinant = np.linalg.det(K_tilda)\n _, log_determinant = np.linalg.slogdet(K_tilda)\n diff = trace - log_determinant\n print(trace, log_determinant, diff)\n return diff", "def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T", "def isKelvin(self):\n return _libsbml.Unit_isKelvin(self)", "def neutrino_thermal_velocity(self, z):\n fac = 5./3.*5.*ss.zeta(5.)/ss.zeta(3.)\n vel = np.zeros(self.N_nu)\n vel = fac**.5*(const.kB*self.T_nu/self.M_nu)*(1.+z)*const.c\n return vel", "def func_d23_318(n, series):\n if series == \"3D3\":\n try: \n return np.sqrt((3*os_3D3[str(n)]*wl_3D3[str(n)]*1e-9*hbar*e**2)/(4*np.pi*m_e*c))\n except:\n return 0", "def kn(self):\n if self.enginePos == 'wing': # wing mounted engines\n return -4.0\n elif self.enginePos == 'fuselage': # tail mounted engines\n return -2.5", "def _leg_kinematics(self, knee_pos):\n knee_pos = np.abs(knee_pos)\n sign = np.sign(knee_pos)\n t = np.sqrt(self.p ** 2 + self.q ** 2 + self.r ** 2 - 2 * np.sqrt(self.p ** 2 + self.q ** 2) * self.r * np.cos(knee_pos))\n phi = np.arccos((self.c ** 2 + t ** 2 - self.s **2) / (2 * self.c * t))\n delta = np.arcsin((self.c * np.sin(phi)) / self.s)\n beta = np.arcsin((self.r * np.sin(knee_pos)) / t)\n epsilon = np.pi - (delta + beta)\n Ax = self.r * np.cos(knee_pos) + self.u\n Ay = self.r * np.sin(knee_pos) + self.v\n Bx = self.s * np.cos(epsilon) + self.u + self.p\n By = self.s * np.cos(epsilon) + self.v + self.q\n Cx = Ax + ((Bx - Ax) * self.e + (Ay - By) * self.h) / self.c\n Cy = Ay + ((By - Ay) * self.e + (Ax - Bx) * self.h) / self.c\n alpha = np.arctan((Cy - Ay) / (Cx - Ax))\n return alpha * sign", "def test_vertical_velocity_pressure_dry_air():\n w = 1 * units('cm/s')\n omega_truth = -1.25073619 * units('microbar/second')\n omega_test = vertical_velocity_pressure(w, 1000. * units.mbar, 273.15 * units.K)\n assert_almost_equal(omega_test, omega_truth, 6)", "def variance(self):\n return self.k * self.theta ** 2", "def _kvol(sunAz, sunZen, viewAz, viewZen):\n\t\t\t\n\t\t\trelative_azimuth = sunAz.subtract(viewAz).rename(['relAz'])\n\t\t\tpa1 = viewZen.cos() \\\n\t\t\t\t.multiply(sunZen.cos())\n\t\t\tpa2 = viewZen.sin() \\\n\t\t\t\t.multiply(sunZen.sin()) \\\n\t\t\t\t.multiply(relative_azimuth.cos())\n\t\t\tphase_angle1 = pa1.add(pa2)\n\t\t\tphase_angle = phase_angle1.acos()\n\t\t\tp1 = ee.Image(PI().divide(2)).subtract(phase_angle)\n\t\t\tp2 = p1.multiply(phase_angle1)\n\t\t\tp3 = p2.add(phase_angle.sin())\n\t\t\tp4 = sunZen.cos().add(viewZen.cos())\n\t\t\tp5 = ee.Image(PI().divide(4))\n\n\t\t\tkvol = p3.divide(p4).subtract(p5).rename(['kvol'])\n\n\t\t\tviewZen0 = ee.Image(0)\n\t\t\tpa10 = viewZen0.cos() \\\n\t\t\t\t.multiply(sunZen.cos())\n\t\t\tpa20 = viewZen0.sin() \\\n\t\t\t\t.multiply(sunZen.sin()) \\\n\t\t\t\t.multiply(relative_azimuth.cos())\n\t\t\tphase_angle10 = pa10.add(pa20)\n\t\t\tphase_angle0 = phase_angle10.acos()\n\t\t\tp10 = ee.Image(PI().divide(2)).subtract(phase_angle0)\n\t\t\tp20 = p10.multiply(phase_angle10)\n\t\t\tp30 = p20.add(phase_angle0.sin())\n\t\t\tp40 = sunZen.cos().add(viewZen0.cos())\n\t\t\tp50 = ee.Image(PI().divide(4))\n\n\t\t\tkvol0 = p30.divide(p40).subtract(p50).rename(['kvol0'])\n\n\t\t\treturn (kvol, kvol0)", "def aliveness(self, physics):\n return 0.", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def Local_Kinetic(Walker):\n\n # laplacian -0.5 \\nabla^2 \\Psi / \\Psi\n h = 0.001\n h2 = h*h\n K = 0.0\n Psi_R = wfs(Walker)\n for i in range(Walker.Ne):\n for j in range(Walker.sys_dim):\n Y=Walker.Re[i][j]\n Walker.Re[i][j]-=h\n wfs1 = wfs(Walker)\n Walker.Re[i][j]+=2.0*h\n wfs2 = wfs(Walker)\n K -= 0.5*(wfs1+wfs2-2.0*Psi_R)/h2\n Walker.Re[i][j]=Y\n return K/Psi_R", "def wind_chill(T_a, v):\r\n return 13.12 + 0.6215*(T_a) - 11.37*(v)**0.16 + 0.3965*(T_a)*(v)**0.16", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def a_realization(self):\n if self.t==1:\n return self.kmonomial()\n else:\n return self.kHallLittlewoodP()", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def get_sky_data_flag(self,data):\n sky_data_flag = ~Calibration.get_vane_flag(data['level1']) \n features = self.getFeatures(data)\n features = np.log10(features)/np.log10(2)\n sky_data_flag = sky_data_flag & np.isfinite(features) & (features != 16)\n\n return sky_data_flag", "def dvdt(self, args: Dict) -> float:\n if self.channel_bool['leak']:\n i_leak: float = self.leak.i(args['v'])\n else:\n i_leak: float = 0.\n \n if self.channel_bool['nav']:\n i_nav: float = self.nav.i(args['v'], h=args['h_nav'])\n else:\n i_nav: float = 0.\n\n if self.channel_bool['kvhh']:\n i_kvhh: float = self.kvhh.i(args['v'], n=args['n_kvhh'])\n else:\n i_kvhh: float = 0.\n\n if self.channel_bool['kva']:\n i_kva: float = self.kva.i(args['v'], h=args['h_kva'])\n else:\n i_kva: float = 0.\n\n if self.channel_bool['kvsi']:\n i_kvsi: float = self.kvsi.i(args['v'], m=args['m_kvsi'])\n else:\n i_kvsi: float = 0.\n\n if self.channel_bool['cav']:\n i_cav: float = self.cav.i(args['v'])\n else:\n i_cav: float = 0.\n\n if self.channel_bool['kca']:\n i_kca: float = self.kca.i(args['v'], ca=args['ca'])\n else:\n i_kca: float = 0.\n \n if self.channel_bool['nap']:\n i_nap: float = self.nap.i(args['v'])\n else:\n i_nap: float = 0.\n\n if self.channel_bool['kir']:\n i_kir: float = self.kir.i(args['v'])\n else:\n i_kir: float = 0.\n\n if self.channel_bool['ampar']:\n i_ampar: float = self.ampar.i(args['v'], s=args['s_ampar'])\n else:\n i_ampar: float = 0.\n\n if self.channel_bool['nmdar']:\n i_nmdar: float = self.nmdar.i(args['v'], s=args['s_nmdar'])\n else:\n i_nmdar: float = 0.\n\n if self.channel_bool['gabar']:\n i_gabar: float = self.gabar.i(args['v'], s=args['s_gabar'])\n else:\n i_gabar: float = 0.\n\n return ((-10.0*self.params.area \n * (i_leak\n + i_nav \n + i_kvhh \n + i_kva \n + i_kvsi \n + i_cav \n + i_kca \n + i_nap \n + i_kir) \n - (i_ampar \n + i_nmdar \n + i_gabar))\n / (10.0*self.params.cm*self.params.area))", "def is_solvated(molecular_system):\n\n from molsysmt.basic import get\n\n output = False\n\n n_waters, volume = get(molecular_system, element='system', n_waters=True, box_volume=True)\n\n if (n_waters>0) and (volume is not None):\n\n density_number = puw.get_value((n_waters/volume), to_unit='1/nm**3')\n\n if (density_number)>15:\n\n output = True\n\n return output", "def kinetic_energy(vel):\r\n return 0.5 * (vel ** 2).sum(axis=1)", "def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)", "def test_vertical_velocity_dry_air():\n omega = 1 * units('microbar/second')\n w_truth = -0.7995291 * units('cm/s')\n w_test = vertical_velocity(omega, 1000. * units.mbar, 273.15 * units.K)\n assert_almost_equal(w_test, w_truth, 6)", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Compute constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(VVVRIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()", "def get_specific_heat() -> float:\n return 1006.0", "def compute_ground_truth_volume(self, display_opt):\n\n self.meshActor.GetProperty().SetOpacity(0.2)\n self.meshActor.GetProperty().SetColor(1, 0, 0)\n\n clean = vtk.vtkCleanPolyData()\n clean.SetInputData(self.endo_poly)\n\n d3 = vtk.vtkDelaunay3D()\n d3.SetInputConnection(clean.GetOutputPort())\n d3.SetTolerance(0.01)\n d3.SetAlpha(0.0)\n d3.Update()\n\n surfaceFilter = vtk.vtkDataSetSurfaceFilter() # output is triangular mesh\n surfaceFilter.SetInputConnection(d3.GetOutputPort())\n surfaceFilter.Update()\n\n Mass = vtk.vtkMassProperties()\n Mass.SetInputConnection(surfaceFilter.GetOutputPort())\n Mass.Update()\n\n self.ground_truth_vol = Mass.GetVolume()/1000.0\n\n if display_opt:\n\n m = vtk.vtkDataSetMapper()\n m.SetInputConnection(d3.GetOutputPort())\n\n a = vtk.vtkActor()\n a.SetMapper(m)\n\n # set mapper for epi for visualization\n m2 = vtk.vtkDataSetMapper()\n m2.SetInputData(self.epi_poly)\n\n epi_actor = vtk.vtkActor()\n epi_actor.SetMapper(m2)\n epi_actor.GetProperty().SetOpacity(0.3)\n epi_actor.GetProperty().SetColor(1,0,0)\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(0.0, 0.0, 0.0)\n ren.AddActor(epi_actor)\n ren.AddActor(a)\n\n vtk_show(ren)", "def acc_visc(j,rA,vA,mA,rhoA,PA,hA,dW=kernel.dW_M4):\n assert rA.shape[0] == vA.shape[0] == mA.shape[0] == rhoA.shape[0] == hA.shape[0], \"arrays are not matched\"\n N = len(mA)\n c_j = c_gas(j,rhoA,PA)\n\n tot = 0\n for i in range(N):\n if i != j:\n\n r_ij = rA[j,:] - rA[i,:]\n r_ij1 = np.linalg.norm(r_ij)\n v_ij = vA[j,:] - vA[i,:]\n m_i = mA[i]\n c_i = c_gas(i,rhoA,PA)\n c_ij = 0.5 * (c_i + c_j)\n h_ij = 0.5 * (hA[i] + hA[j])\n rho_ij = 0.5 * (rhoA[i] + rhoA[j])\n\n c = np.dot(v_ij,r_ij)\n mu_ij = ( c * h_ij ) / ( r_ij1**2 + 0.01*h_ij**2 )\n\n a = ( -alpha * mu_ij * c_ij + beta * mu_ij**2 ) / rho_ij\n b = 0\n Pi_ij = a*dm.heavi(-c) + b*dm.heavi(c)\n\n # if Pi_ij == 0:\n # print(\"i,j:\",i,j)\n # print(\"c:\",c)\n # print(\"c_ij\",c_ij)\n # print(\"\")\n # assert Pi_ij != 0\n\n tot += m_i * h_ij**(-4) * Pi_ij * dW(r_ij1,h_ij) * (r_ij/r_ij1)\n\n return - tot", "def var(self):\n\n return self.scale ** -2 \\\n * (m.gamma(1 + 2 * self.shape ** -1) - m.gamma(1 + self.shape ** -1) ** 2)", "def simulated_reflectivity(pressure, temperature, vapor_mixing_ratio, liquid_mixing_ratio, snow_mixing_ratio=None,\n graupel_mixing_ratio=None, use_varint=False, use_liqskin=False):\n # Set values for constants with variable intercept\n R1 = 1e-15\n RON = 8e6\n RON2 = 1e10\n SON = 2e7\n GON = 5e7\n RON_MIN = 8e6\n RON_QR0 = 0.00010\n RON_DELQR0 = 0.25*RON_QR0\n RON_CONST1R = (RON2-RON_MIN)*0.5\n RON_CONST2R = (RON2+RON_MIN)*0.5\n\n # set constant intercepts\n rno_l = 8e6\n rno_s = 2e7\n rno_g = 4e6\n\n qvapor = da.clip(vapor_mixing_ratio, 0., None)\n qliquid = da.clip(liquid_mixing_ratio, 0., None)\n\n # If qgraupel but not qsnow, set qgraupel = qsnow\n if snow_mixing_ratio is None:\n if graupel_mixing_ratio is None:\n qsnow = da.zeros_like(qliquid)\n qgraupel = da.zeros_like(qliquid)\n else:\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n qsnow = da.zeros_like(graupel_mixing_ratio)\n qsnow[temperature <= 273.15] = qgraupel[temperature <= 273.15]\n else:\n qsnow = da.clip(snow_mixing_ratio, 0., None)\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n\n # density for liquid, snow, and graupel (kg m-3)\n rho_l = 1000. # liquid\n rho_i = 100. # snow\n rho_g = 400. # graupel\n\n # constant evaluation of gamma distribution\n gamma = 720.\n\n # Alpha constant\n alpha = 0.224\n\n # constant multiplication factors\n factor_l = gamma * 1e18 * (1./(np.pi*rho_l))**1.75\n s = gamma * 1e18 * (1./(np.pi*rho_i))**1.75 * (rho_i/rho_l)**2 * alpha\n g = gamma * 1e18 * (1./(np.pi*rho_g))**1.75 * (rho_g/rho_l)**2 * alpha\n\n # calculate virtual temperature\n virtual_t = virtual_temperature(temperature, qvapor)\n\n # dry gas constant\n Rd = 287.\n rho_air = pressure/(Rd*virtual_t)\n\n # adjust for brightband if use_liqskin=True\n if use_liqskin:\n raise NotImplementedError('Liquid skin correction not implemented')\n # factor_s = da.full_like(temperature, s)\n # factor_g = da.full_like(temperature, g)\n # try:\n # factor_s[temperature >= 273.15] = factor_s[temperature >= 273.15] / da.array([alpha])\n # factor_g[temperature >= 273.15] = factor_g[temperature >= 273.15] / da.array([alpha])\n # except ValueError:\n # factor_s = s\n # factor_g = g\n else:\n factor_s = s\n factor_g = g\n\n # calculate variable intercept if use_varint=True\n if use_varint:\n raise NotImplementedError('Variable intercepts not yet implemented')\n # temp_c = da.clip(temperature-273.15, temperature.min(), -0.001)\n # sonv = MIN(2.0D8, 2.0D6*EXP(-0.12D0*temp_c))\n #\n # gonv = gon\n # IF (qgr(i,j,k) .GT. R1) THEN\n # gonv = 2.38D0 * (PI*RHO_G/(rhoair*qgr(i,j,k)))**0.92D0\n # gonv = MAX(1.D4, MIN(gonv,GON))\n # END IF\n #\n # ronv = RON2\n # IF (qra(i,j,k) .GT. R1) THEN\n # ronv = RON_CONST1R*TANH((RON_QR0 - qra(i,j,k))/RON_DELQR0) + RON_CONST2R\n # END IF\n else:\n ronv = rno_l\n sonv = rno_s\n gonv = rno_g\n\n # Total equivalent reflectivity factor (z_e, in mm^6 m^-3) is\n # the sum of z_e for each hydrometeor species:\n z_e = (((factor_l*(rho_air*qliquid)**1.75)/(ronv**.75)) +\n ((factor_s*(rho_air*qsnow)**1.75)/(sonv**.75)) +\n ((factor_g*(rho_air*qgraupel)**1.75)/(gonv**.75)))\n\n # Adjust small values of Z_e so that dBZ is no lower than -30\n z_e = da.clip(z_e, .001, None)\n\n # Convert to dBZ\n dbz = 10.*da.log10(z_e)\n return dbz", "def fGasAcousticVelocity(GasGravity, Temperature, Pressure):\n\tGasBulkModulus = fGasBulkModulus(GasGravity, Temperature, Pressure) # Pascals\n\tGasDensity = fGasDensity(GasGravity, Temperature, Pressure) * 1000 # Kg\n\treturn (GasBulkModulus / GasDensity)**0.5 # m/s" ]
[ "0.7809844", "0.62890047", "0.62317854", "0.6194581", "0.61644995", "0.61163557", "0.600697", "0.5984969", "0.5790677", "0.575207", "0.5744783", "0.5729746", "0.5714137", "0.57019544", "0.5691392", "0.56809", "0.56597465", "0.56572974", "0.5655333", "0.56213343", "0.56118655", "0.55981666", "0.55919814", "0.5574342", "0.55607146", "0.5530427", "0.55266887", "0.55231625", "0.55113393", "0.5509306", "0.54993814", "0.5494139", "0.54788834", "0.54775524", "0.54595804", "0.54469913", "0.5445236", "0.5445171", "0.54366094", "0.5420351", "0.5416981", "0.540632", "0.53993636", "0.5398881", "0.53863126", "0.53668475", "0.5353126", "0.5333249", "0.5328219", "0.53188753", "0.53187746", "0.53184503", "0.5317413", "0.5314278", "0.5308887", "0.5305678", "0.5291435", "0.52860075", "0.5283745", "0.5280399", "0.527981", "0.52765036", "0.52747774", "0.52712065", "0.5267032", "0.5252972", "0.5241843", "0.52387714", "0.5236489", "0.5236283", "0.52278775", "0.52248365", "0.5221702", "0.5216384", "0.5211454", "0.5206013", "0.52000856", "0.5188656", "0.5185088", "0.51814246", "0.5170094", "0.5166184", "0.51649916", "0.5160163", "0.51592225", "0.5157207", "0.5143566", "0.5138126", "0.5138095", "0.5136418", "0.51359534", "0.513296", "0.51313585", "0.51312256", "0.51271766", "0.5125665", "0.5123816", "0.51138973", "0.5111476", "0.51096165" ]
0.8016766
0
Determines the radiative temperature of the sky. The "sky," as a black body, has a radiative temperature different to that of the surrounding air, or the ambient temperature. This function converts between them and outputs the sky's radiative temperature.
def sky_temperature(self) -> float: return 0.0552 * (self.ambient_temperature**1.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)", "def temperature() -> float:", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def get_capacitive_rain_sensor_temp(\n self, rain_sensor_temp: Optional[int] = None\n ) -> float:\n # TODO: these values were hardcoded but now are taken from the CW.\n # Check which way is the \"true\" way based on the sensor type (capacitive vs Hydredon)\n # rain_pull_up_resistance = 1\n # rain_res_at_25 = 1\n # rain_beta = 3450\n absolute_zero = 273.15\n\n if rain_sensor_temp is None:\n rain_sensor_temp = self.raw_rain_sensor_temp\n\n if rain_sensor_temp < 1:\n rain_sensor_temp = 1\n elif rain_sensor_temp > 1022:\n rain_sensor_temp = 1022\n\n r = self.rain_pull_up_resistance / ((1023 / rain_sensor_temp) - 1)\n r = math.log(r / self.rain_res_at_25)\n\n return 1 / (r / self.rain_beta + 1 / (absolute_zero + 25)) - absolute_zero", "def get_temperature(self):\n \n # Get temp readings from both sensors\n humidity_temp = self._sense_hat.get_temperature_from_humidity()\n pressure_temp = self._sense_hat.get_temperature_from_pressure()\n \n # avg_temp becomes the average of the temperatures from both sensors\n # We need to check for pressure_temp value is not 0, to not ruin avg_temp calculation\n avg_temp = (humidity_temp + pressure_temp) / 2 if pressure_temp else humidity_temp\n \n # Get the CPU temperature\n cpu_temp = self._get_cpu_temp()\n \n # Calculate temperature compensating for CPU heating\n adj_temp = avg_temp - (cpu_temp - avg_temp) / 1.5\n \n # Average out value across the last three readings\n return self._get_smooth(adj_temp)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def temperature(altitude):\n if altitude <= 36152:\n t = 59-0.00356*altitude # deg F\n else:\n t = -70 # deg F\n t = t + 459.7 # R\n return t", "def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def temperatures(self):\r\n return self._arm.temperatures", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def get_temperature(\n self, sensitivity: Optional[str] = None, temp_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or temp_sensor is None:\n sensitivity, temp_sensor = self.get_temperature_sensor()\n if sensitivity == \"th\":\n temp = temp_sensor * 175.72 / 65536 - 46.85\n elif sensitivity == \"t\":\n temp = temp_sensor * 1.7572 - 46.85\n else:\n raise CloudWatcherException(\n f\"Unknown temperature sensor type {sensitivity}\"\n )\n\n return temp", "def ambient_temperature(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"F\":\r\n return self.ambient_temperature_f\r\n elif self.temperature_scale == \"C\":\r\n return self.ambient_temperature_c\r\n else:\r\n return self._ambient_temperature", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def get_temperature(self):\n pass", "def getTemperature(self):\n return self.temperature", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def temperatures():\n\n return station_9281", "def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature", "def target_temperature(self):\n if self._client.mode == self._client.MODE_HEAT:\n return self._client.heattemp\n if self._client.mode == self._client.MODE_COOL:\n return self._client.cooltemp\n return None", "def read_temperature(self):\n tRaw = self._read_multiple_bytes_as_array(self.BME280_TEMP_MSB, 3)\n\n return float(self._compensate_temperature((tRaw[0] << 12) + (tRaw[1] << 4) + (tRaw[2] >> 4)))", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def temperature(self):\n return self._temperature", "def temperature(self):\n return self._temperature", "def get_attic_temperature(theta_sat: np.ndarray, theta_ac: np.ndarray) -> np.ndarray:\n\n # temperature difference coefficient\n h = 1.0\n\n return theta_sat * h + theta_ac * (1 - h)", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def get_temperature(elevation, sea_level):\n if elevation <= sea_level:\n return 0.8\n else:\n return (-1.0 / (1.0 - sea_level)) * (elevation - sea_level) + 1.0", "def getTemperature(self):\n return self.json_state.get(\"temperature\")", "def current_temperature(self) -> float:\n return self._thermostat.current_temperatue", "def temperature(self, alt):\n T = self.altitude_profile(alt)[1]\n return T", "def temperature(self) -> SmartSsdTemperature:\n return self._temperature", "def target_temperature(self) -> int:\r\n # TODO: Find a better way to do this. This is ugly.\r\n if self._hvac_mode == \"cool\":\r\n return self.target_temperature_low\r\n elif self._hvac_mode == \"heat\":\r\n return self.target_temperature_high\r\n elif self._hvac_mode == \"heat-cool\":\r\n # TODO: Fix this so that heat or cool is chosen.\r\n if self._ambient_temperature >= self._target_temperature:\r\n return self.target_temperature_low\r\n elif self._ambient_temperature <= self._target_temperature:\r\n return self.target_temperature_high\r\n elif self._hvac_mode == \"eco\":\r\n if self._ambient_temperature >= self._target_temperature:\r\n return self.eco_temperature_low\r\n elif self._ambient_temperature <= self._target_temperature:\r\n return self.eco_temperature_high\r\n elif self._hvac_mode == \"off\":\r\n return self.ambient_temperature\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))", "def calculateTemperature(self, R):\n rRuOx2005 = self._busToRuOx2005(R)\n T = self.RuOx2005.calculateTemperature(rRuOx2005)\n return T", "def soil_temp_factor(self, project_day):\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac", "def temperature(self):\r\n try:\r\n return str(self.connect()['main']['temp'])\r\n except:\r\n return '@weather_temperature'", "def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value", "def read_object_temperatureF(self, ):\n return self.read_object_temperatureC() * (9.0/5.0) + 32.0", "def tskypy(self, psr):\n # ensure l is in range 0 -> 360\n b = psr.gb\n if psr.gl < 0.:\n l = 360 + psr.gl\n else:\n l = psr.gl\n\n # convert from l and b to list indices\n j = b + 90.5\n if j > 179:\n j = 179\n\n nl = l - 0.5\n if l < 0.5:\n nl = 359\n i = float(nl) / 4.\n \n tsky_haslam = self.tskylist[180*int(i) + int(j)]\n # scale temperature before returning\n return tsky_haslam * (self.freq/408.0)**(-2.6)", "def get_temperature(self):\n # Fake a random temperature change\n temperature = random.randint(20, 25)\n self.set_temperature(temperature)", "def read_temperature(self):\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n\n return float(self._compensate_temperature(tempADC))", "def target_temperature(self) -> float:\n return self._thermostat.setpoint_temperature", "def temperature(self, state: State):\n if self.is_self_play and state.number_of_stones < 16:\n return self._temperature\n return None", "def color_temp(self):\n return kelvin_to_mired(self._color_temp)", "def get_temp(self):\n\t\traw_temp = self.read_i2c_word(self.TEMP_OUT0)\n\n\t\t# Get the actual temperature using the formule given in the\n\t\t# MPU-6050 Register Map and Descriptions revision 4.2, page 30\n\t\tactual_temp = (raw_temp / 340.0) + 36.53\n\n\t\treturn actual_temp", "def __getRawTemperature(self):\n t1 = self.read_byte_data(self.address, 0x03)\n t2 = self.read_byte_data(self.address, 0x04)\n t3 = self.read_byte_data(self.address, 0x05)\n t = (t1 << 16) | (t2 << 8) | t3\n t = getTwosComplement(t, 24)\n return t", "def temperature(self):\n self.convert_window(\"Temperature\", \"Celsius\", [\"Celsius\", \"Fahrenheit\", \"Kelvin\", \"Rankine\", \"Reaumur\", \"Newton\", \"Romer\", \"Delisle\"])", "def temperature(self):\n done, data = self._request('GP')\n if done:\n return {\n 'ds3231temp': float(data[0])/10,\n 'mcp9808temp': float(data[1])/10,\n 'tmp007temp': float(data[2])/10\n }\n\n raise EvseError", "def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])", "def thermodynamic_temperature(frequency, T_cmb=None):\n nu = frequency.to(si.GHz, spectral())\n\n if T_cmb is None:\n from astropy.cosmology import default_cosmology\n\n T_cmb = default_cosmology.get().Tcmb0\n\n def f(nu, T_cmb=T_cmb):\n x = _si.h * nu / _si.k_B / T_cmb\n return x**2 * np.exp(x) / np.expm1(x) ** 2\n\n def convert_Jy_to_K(x_jybm):\n factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(\n astrophys.Jy\n )\n return x_jybm / factor\n\n def convert_K_to_Jy(x_K):\n factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(\n si.K\n )\n return x_K / factor\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],\n \"thermodynamic_temperature\",\n {\"frequency\": frequency, \"T_cmb\": T_cmb},\n )", "def convert_temp(self, temperature):\n return 1.8 * (temperature - 273) + 32", "def temperature(self) -> Optional[float]:\n return self.data.get(\"temp\")", "def get_tank_temperature():\n if sensor is None:\n return DEFAULT_TEMPERATURE\n\n try:\n # Read the temperature from the I2C sensor.\n return sensor.read_temperature(True)\n except OSError:\n return DEFAULT_TEMPERATURE", "def formation_temperature(surface_temperature, gradient, depth):\n form_temp = surface_temperature + gradient * depth\n return form_temp", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def current_temperature(self):\n return self.atag.dhw_temperature", "def target_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_c)", "def get_brightnesstemperature(self, channel):\n K1 = {\n \"10\": 3040.136402, # Constant K1 [W m-2 um-1].\n \"11\": 2482.375199,\n \"12\": 1935.060183,\n \"13\": 866.468575,\n \"14\": 641.326517,\n }\n\n K2 = {\n \"10\": 1735.337945, # Constant K2 [K].\n \"11\": 1666.398761,\n \"12\": 1585.420044,\n \"13\": 1350.069147,\n \"14\": 1271.221673,\n }\n\n return K2[channel] / np.log((K1[channel] / self.get_radiance(channel)) + 1)", "def get_chip_temperature(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_GET_CHIP_TEMPERATURE, (), '', 10, 'h')", "def read_temp(self):\n return 19.0\n data = self.read(_TEMP_REG, 2)\n temp = ((data[0] * 256) + data[1]) / 16\n if temp > 2047:\n temp -= 4096\n return temp * 0.0625", "def temperature_const_theta() -> FlowFieldVal:\n return self._potential_temperature_to_temperature([\n self._const_theta,\n ] * self._params.nz, zz)", "def get_temperature(self):\n self.temperature = self.temperature_sensors.get_temperature(\n self.channel)\n return self.temperature", "def current_temperature(self):\n if self._device.temp is not None and self._device.temp > -460:\n return self._device.temp\n return None", "def temperature_sensor():\n\n\tsensor_name = \"humiture\"\n\treg_addr = 26\n\tdata_len = 4\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t# get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Temperature', Temperature, 2)\n\ttemperature = data.temperature\n\n\tdelete_sensor(sensor_name)\n\treturn temperature", "def update_rain_temp(self, day_of_week, departure_time_seconds):\n\n current_time = t.time()\n today = datetime.today().weekday()\n\n if (departure_time_seconds < (current_time + 3600) \\\n and day_of_week == today):\n\n self.temp = self.current_temperature\n self.rain = self.current_rainfall\n\n elif (day_of_week == today):\n for i in range(24):\n if (departure_time_seconds > self.weather_forecast_json \\\n [\"hourly\"][\"data\"][i][\"time\"] and departure_time_seconds \\\n < self.weather_forecast_json[\"hourly\"][\"data\"][i + 1][\"time\"]):\n\n self.temp = self.weather_forecast_json \\\n ['hourly']['data'][i]['temperature']\n\n self.rain = self.weather_forecast_json['hourly'] \\\n ['data'][i]['precipIntensity']\n break\n else:\n continue\n else:\n day_difference = int((departure_time_seconds - current_time) / 86400)\n\n self.temp = (self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMax'] + \\\n self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMin']) / 2\n\n self.rain = self.weather_forecast_json['daily'] \\\n ['data'][day_difference]['precipIntensity']", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def current_temperature(self):\n temperature = float('NaN')\n while math.isnan(temperature) or temperature < MINIMUM_BELIEVABLE_TEMPERATURE:\n temperature = float(self._sensor.readTempC())\n return temperature", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def target_temperature(self) -> float | None:\n return self.vera_device.get_current_goal_temperature()", "def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625", "def get_rain():\n global rain\n\n # Report rain only if the condition is 'rainy' (and not always).\n if weather_condition == CONDITION_RAINY and random.random() > 0.7:\n rain += round(random.random(), 2)\n return rain", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def get_temperature(self, sensor: int = 0) -> float:\n\n return self.send(self.cmd.GET_HEATING_ACT)", "def read_ambient_temperatureC(self, ):\n return self._read_temperature(MLX90614_TA)", "def target_temperature(self):\n return self._target_temp", "def temperature(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n return temp", "def getTempMedia(self):\n temp_media = float(self.soup.media.text)\n temperature = str(int(round(temp_media)))\n return temperature", "def temp(self):\n\t\ttemp_out = self.read16(MPU9250_ADDRESS, TEMP_DATA)\n\t\ttemp = temp_out / 333.87 + 21.0 # these are from the datasheets\n\t\treturn temp", "def readtemperature(self, cTemp):\r\n\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\twhile (data & 0x01) != 0 :\r\n\t\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\tdata1 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\tdata2 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\t\r\n\t\t# Convert the data to 14-bits\r\n\t\tcTemp = (((data1 * 256.0) + data2) / 4.0)\r\n\t\t\r\n\t\tif cTemp < 0x0140 :\r\n\t\t\tcTemp = 0x0140\r\n\t\telif cTemp > 0x12C0 :\r\n\t\t\tcTemp = 0x12C0\r\n\t\telse :\r\n\t\t\tcTemp = cTemp\r\n\t\t\r\n\t\tcTemp = (cTemp / 32.0) - 50.0\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "def target_temperature(self):\n return self.atag.dhw_target_temperature", "def read_temperature(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 2)\n return lsm9ds1.to_int16(data)", "def rain(self, json):\n rain = str(json['forecast']['txt_forecast']['forecastday'][0]['pop'])\n return rain", "def get_temperature(self):\n\n svc = \"urn:upnp-org:serviceId:TemperatureSensor1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentTemperature\")", "def high_temperature(self):\r\n return self._yesterdays_weather.get_high_temperature()", "def calculate_color_temperature(r: int, g: int, b: int) -> float:\n\n # 1. Map RGB values to their XYZ counterparts.\n # Based on 6500K fluorescent, 3000K fluorescent\n # and 60W incandescent values for a wide range.\n # Note: Y = Illuminance or lux\n x = (-0.14282 * r) + (1.54924 * g) + (-0.95641 * b)\n y = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)\n z = (-0.68202 * r) + (0.77073 * g) + (0.56332 * b)\n\n # 2. Calculate the chromaticity co-ordinates\n xchrome = x / (x + y + z)\n ychrome = y / (x + y + z)\n\n # 3. Use to determine the CCT\n n = (xchrome - 0.3320) / (0.1858 - ychrome)\n\n # 4. Calculate the final CCT\n cct = (449.0 * pow(n, 3)) + (3525.0 * pow(n, 2)) + (6823.3 * n) + 5520.33\n\n # Return the results in degrees Kelvin\n return cct", "def get_temperature(data):\n celcius = 0\n celcius = [i for i in data if re.search(r'\\d+[/]', i)]\n \n if celcius == []:\n return \"N/A\"\n celcius = celcius[0].split('/')[0]\n celcius = celcius.replace('M', '-')\n \n try:\n celcius = int(celcius)\n except ValueError:\n return \"N/A\"\n\n farenheit = round((celcius * 9/5) + 32) # formula to get farenheit from celcius\n temperature = \"{0} C ({1} F)\".format(celcius, farenheit)\n return temperature", "def target_temperature(self):\n return self._target_temperature", "def target_temperature(self):\n return self._target_temperature", "def target_temperature(self):\n return self._target_temperature", "def target_temperature(self):\n return self._target_temperature" ]
[ "0.677967", "0.65615", "0.64276695", "0.6416784", "0.64141804", "0.6408829", "0.62878555", "0.6222289", "0.6220975", "0.61991245", "0.6182677", "0.61740917", "0.61678636", "0.6091844", "0.60883605", "0.6079088", "0.6072627", "0.6015334", "0.60076296", "0.59943837", "0.59905624", "0.5988029", "0.5980697", "0.59772134", "0.5975562", "0.59164184", "0.5863152", "0.586078", "0.5853003", "0.5832522", "0.5827362", "0.58265156", "0.5820179", "0.58153296", "0.58153296", "0.58105195", "0.58070904", "0.5787698", "0.5771034", "0.5770008", "0.5768418", "0.575893", "0.57132816", "0.5709572", "0.5702031", "0.5700625", "0.56975675", "0.56973493", "0.5694243", "0.5688774", "0.5675132", "0.5662157", "0.56581", "0.56546646", "0.5645749", "0.56297594", "0.5628357", "0.56259096", "0.5620268", "0.56078464", "0.5603357", "0.55883086", "0.5586931", "0.55809605", "0.5578597", "0.55726355", "0.557004", "0.5568196", "0.5538022", "0.5534543", "0.5519602", "0.5511703", "0.5499495", "0.54924357", "0.5485852", "0.54829025", "0.5480603", "0.5477799", "0.54719996", "0.5470503", "0.54703283", "0.54624283", "0.5459717", "0.5456861", "0.5447461", "0.5445308", "0.5443012", "0.543805", "0.54202324", "0.5418282", "0.54056865", "0.5402589", "0.53981", "0.53954524", "0.5376369", "0.5375557", "0.5370513", "0.5370513", "0.5370513", "0.5370513" ]
0.77121866
0
The thermal conductivity of air varies as a function of temperature.
def thermal_conductivity_of_air(self) -> float: # This more accurate equation is not used by the paper. # return (0.02646 * self.ambient_temperature ** 1.5) / ( # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature)) # ) # The reference suggests this equation is accurate to 1%. return 0.02646 * (self.ambient_temperature / 300) ** 0.8646
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def temperature() -> float:", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def get_temperature(self):\n pass", "def current_temperature(self) -> float:\n return self._thermostat.current_temperatue", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def air_density(self):\n return self.flow_field.air_density", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def getTemperature(self):\n return self.temperature", "def temperature(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n return temp", "def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def get_cold_junction_temperature(self):\n return self._mcp9600.get('COLD_JUNCTION').temperature", "def get_chiller_temperature(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_ACT)", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def temperature(self, alt):\n T = self.altitude_profile(alt)[1]\n return T", "def target_temperature(self) -> int:\r\n # TODO: Find a better way to do this. This is ugly.\r\n if self._hvac_mode == \"cool\":\r\n return self.target_temperature_low\r\n elif self._hvac_mode == \"heat\":\r\n return self.target_temperature_high\r\n elif self._hvac_mode == \"heat-cool\":\r\n # TODO: Fix this so that heat or cool is chosen.\r\n if self._ambient_temperature >= self._target_temperature:\r\n return self.target_temperature_low\r\n elif self._ambient_temperature <= self._target_temperature:\r\n return self.target_temperature_high\r\n elif self._hvac_mode == \"eco\":\r\n if self._ambient_temperature >= self._target_temperature:\r\n return self.eco_temperature_low\r\n elif self._ambient_temperature <= self._target_temperature:\r\n return self.eco_temperature_high\r\n elif self._hvac_mode == \"off\":\r\n return self.ambient_temperature\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))", "def get_D_C3H8_air(self, T):\n\n self.set_TempPres_dependents(T)\n\n self.D_C3H8_air = (\n 2. / 3. * np.sqrt(const.k_B * T / np.pi * 0.5 * (1. /\n self.air.m + 1. / self.fuel.m)) / (np.pi * (0.5 *\n (self.air.d + self.fuel.d)) ** 2.) / self.air.n\n )\n\n return self.D_C3H8_air", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def ambient_temperature(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"F\":\r\n return self.ambient_temperature_f\r\n elif self.temperature_scale == \"C\":\r\n return self.ambient_temperature_c\r\n else:\r\n return self._ambient_temperature", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def internal_temp_c(self) -> int:\n return int(self._device_info[\"Temperature\"])", "def soil_temp_factor(self, project_day):\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac", "def temperature(self):\n return self._temperature", "def temperature(self):\n return self._temperature", "def target_temperature(self):\n if self.current_operation == 'Heat & Cool':\n return None\n if self.current_operation == 'Heat only':\n return int(self._api._heatto)\n elif self.current_operation == 'Cool only':\n return int(self._api._coolto)\n return None", "def target_temperature(self) -> float | None:\n if self.hvac_mode == HVACMode.COOL:\n return self.target_temperature_high\n if self.hvac_mode == HVACMode.HEAT:\n return self.target_temperature_low\n return None", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2", "def current_temperature(self):\n return self.atag.dhw_temperature", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def get_temperature(self):\n \n # Get temp readings from both sensors\n humidity_temp = self._sense_hat.get_temperature_from_humidity()\n pressure_temp = self._sense_hat.get_temperature_from_pressure()\n \n # avg_temp becomes the average of the temperatures from both sensors\n # We need to check for pressure_temp value is not 0, to not ruin avg_temp calculation\n avg_temp = (humidity_temp + pressure_temp) / 2 if pressure_temp else humidity_temp\n \n # Get the CPU temperature\n cpu_temp = self._get_cpu_temp()\n \n # Calculate temperature compensating for CPU heating\n adj_temp = avg_temp - (cpu_temp - avg_temp) / 1.5\n \n # Average out value across the last three readings\n return self._get_smooth(adj_temp)", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def target_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_c)", "def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def get_chip_temperature(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_GET_CHIP_TEMPERATURE, (), '', 10, 'h')", "def temperatures(self):\r\n return self._arm.temperatures", "def get_temp(self) -> float:\n return np.round(np.mean(self.temp_data), 1)", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def target_temperature(self):\n if self._client.mode == self._client.MODE_HEAT:\n return self._client.heattemp\n if self._client.mode == self._client.MODE_COOL:\n return self._client.cooltemp\n return None", "def thermal_state(self, T):\n return unvectorize(\n np.diag(thermal_dist(t, self.ev)) \\\n .astype(settings.DTYPE_COMPLEX)\n for t in vectorize(T)\n )", "def get_production_factor(self, temp_atmosphere):\n return 1.", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def constant_temp(self, numIterations):\n return 1 + self.alpha", "def target_temperature(self) -> float:\n return self._thermostat.setpoint_temperature", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def _therm_cond(self):\n xy = dict() # used to label the components e.g 1->CO2,2->N2\n for (i, j) in enumerate(self.component_list, 1):\n xy[i] = j\n\n k_vap = 0\n for i in range(1, len(self.component_list) + 1):\n sumij = 0\n for j in range(1, len(self.component_list) + 1):\n Aij = (1 + (self.visc_d_comp[xy[i]] / self.visc_d_comp[xy[j]])**0.5 *\n (self.mw_comp[xy[j]] / self.mw_comp[xy[i]])**0.25)**2 *\\\n (8 * (1 + self.mw_comp[xy[i]] / self.mw_comp[xy[j]]))**-0.5\n sumij += self.mole_frac_comp[xy[j]] * Aij\n k_vap += self.mole_frac_comp[xy[i]] * self.therm_cond_comp[xy[i]] / sumij\n\n try:\n self.therm_cond = Expression(expr=k_vap,\n doc='Vapor thermal'\n 'conductivity [J/(m.K.s)]')\n except AttributeError:\n self.del_component(self.therm_cond)\n raise", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def current_temperature(self):\n temperature = float('NaN')\n while math.isnan(temperature) or temperature < MINIMUM_BELIEVABLE_TEMPERATURE:\n temperature = float(self._sensor.readTempC())\n return temperature", "def current_temperature(self) -> float:\n return self._device.scaled_temperature", "def _temperature_policy(self, N: np.ndarray) -> np.ndarray:\n t = 1 / self.temperature\n pi = N ** t + np.finfo(np.float32).eps # Add tiny value to remove 0s\n pi = pi / pi.sum()\n return pi", "def get_actual_air_conditioned_temperature(\n hc_period: np.ndarray,\n theta_ac: np.ndarray, v_supply: np.ndarray, theta_supply_h: np.ndarray, theta_supply_c: np.ndarray,\n l_d_h: np.ndarray, l_d_cs: np.ndarray,\n u_prt: float, a_prt: np.ndarray, a_hcz: np.ndarray, q: float) -> np.ndarray:\n\n rho = get_air_density()\n c = get_specific_heat()\n\n a_prt = a_prt.reshape(1, 5).T\n a_hcz = a_hcz[0:5].reshape(1, 5).T\n\n theta_ac_act_h = np.maximum(theta_ac + (c * rho * v_supply * (theta_supply_h - theta_ac) - l_d_h * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n theta_ac_act_c = np.minimum(theta_ac - (c * rho * v_supply * (theta_ac - theta_supply_c) - l_d_cs * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n return theta_ac_act_h * (hc_period == 'h') + theta_ac_act_c * (hc_period == 'c') + theta_ac * (hc_period == 'm')", "def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)", "def get_attic_temperature(theta_sat: np.ndarray, theta_ac: np.ndarray) -> np.ndarray:\n\n # temperature difference coefficient\n h = 1.0\n\n return theta_sat * h + theta_ac * (1 - h)", "def temperatures():\n\n return station_9281", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def temperature(self) -> Optional[float]:\n return self.data.get(\"temp\")", "def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c", "def airfoilEffW(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def read_object_temperatureF(self, ):\n return self.read_object_temperatureC() * (9.0/5.0) + 32.0", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def get_specific_heat() -> float:\n return 1006.0", "def compute_td_spectral_function(self):\n nomegase = self.nomegase\n nkpt = self.nkpt\n nband = self.nband\n ntemp = self.ntemp\n\n self.spectral_function_T = np.zeros((nomegase, ntemp, nkpt, nband),\n dtype=float)\n\n omega = np.einsum('ijt,l->ijlt',\n np.ones((nkpt, nband, ntemp)), self.omegase)\n\n self.spectral_function_T = (\n (1 / np.pi) * np.abs(self.self_energy_T.imag) /\n ((omega - self.self_energy_T.real) ** 2\n + self.self_energy_T.imag ** 2)\n )", "def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature", "def water_vapour(t):\n T_0 = 273.15\n T_rw = 35.86 # over water\n a = 17.269\n # cdo -mulc,610.78 -exp -div -mulc,17.5 -subc,273.15 a\n return 610.78 * np.exp(a * (t - T_0) / (t - T_rw))", "def get_convection_vent(self,T_i,el):\n\n rad = radiation.Radiation()\n T_atm = rad.getTemp(el)\n\n Q_vent = self.mdot*self.Cp_air0*(T_i-T_atm) # Convection due to released air\n return Q_vent", "def current_temperature(self) -> float | None:\n return self.vera_device.get_current_temperature()", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def eco_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_high_c)", "def current_temperature(self):\n if self._device.temp is not None and self._device.temp > -460:\n return self._device.temp\n return None", "def temperature_unit(self):\n return TEMP_FAHRENHEIT" ]
[ "0.79880536", "0.764283", "0.7523666", "0.75130093", "0.7090662", "0.70669293", "0.7042304", "0.6954361", "0.6936385", "0.6905874", "0.6894696", "0.68758714", "0.6788131", "0.6774004", "0.677208", "0.67583066", "0.6724294", "0.67135936", "0.66866106", "0.66677165", "0.6657772", "0.6656361", "0.6648099", "0.6646587", "0.66422915", "0.6608585", "0.6579855", "0.6576052", "0.6573095", "0.6572762", "0.6572021", "0.65550625", "0.6527852", "0.65222335", "0.65220314", "0.6521044", "0.6517282", "0.6515396", "0.6515312", "0.6510845", "0.650714", "0.64997095", "0.6491665", "0.64797366", "0.64776105", "0.64748764", "0.64748764", "0.6449573", "0.64457047", "0.6429444", "0.6422232", "0.64041454", "0.6399046", "0.6395345", "0.63913614", "0.63886535", "0.6386026", "0.63832194", "0.6379305", "0.6369028", "0.636829", "0.63652855", "0.63641787", "0.6363595", "0.6362791", "0.636108", "0.6337859", "0.63281345", "0.6325385", "0.63240093", "0.63025707", "0.63001037", "0.6299052", "0.6297543", "0.6287753", "0.6272516", "0.6271103", "0.6265984", "0.62610424", "0.6257063", "0.62536395", "0.62500817", "0.6248167", "0.62444663", "0.62345964", "0.6223101", "0.6221932", "0.62174064", "0.6217323", "0.6201667", "0.6201649", "0.61864376", "0.61703587", "0.6161416", "0.6157279", "0.6156725", "0.6156312", "0.61544037", "0.6146374", "0.61360174" ]
0.8359033
0
The thermal expansion coefficient of air varies as a function of temperature.
def thermal_expansivity_of_air(self) -> float: return 1 / self.ambient_temperature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def temperature() -> float:", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def get_D_C3H8_air(self, T):\n\n self.set_TempPres_dependents(T)\n\n self.D_C3H8_air = (\n 2. / 3. * np.sqrt(const.k_B * T / np.pi * 0.5 * (1. /\n self.air.m + 1. / self.fuel.m)) / (np.pi * (0.5 *\n (self.air.d + self.fuel.d)) ** 2.) / self.air.n\n )\n\n return self.D_C3H8_air", "def temperature(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n return temp", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )", "def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def constant_temp(self, numIterations):\n return 1 + self.alpha", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def get_actual_air_conditioned_temperature(\n hc_period: np.ndarray,\n theta_ac: np.ndarray, v_supply: np.ndarray, theta_supply_h: np.ndarray, theta_supply_c: np.ndarray,\n l_d_h: np.ndarray, l_d_cs: np.ndarray,\n u_prt: float, a_prt: np.ndarray, a_hcz: np.ndarray, q: float) -> np.ndarray:\n\n rho = get_air_density()\n c = get_specific_heat()\n\n a_prt = a_prt.reshape(1, 5).T\n a_hcz = a_hcz[0:5].reshape(1, 5).T\n\n theta_ac_act_h = np.maximum(theta_ac + (c * rho * v_supply * (theta_supply_h - theta_ac) - l_d_h * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n theta_ac_act_c = np.minimum(theta_ac - (c * rho * v_supply * (theta_ac - theta_supply_c) - l_d_cs * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n return theta_ac_act_h * (hc_period == 'h') + theta_ac_act_c * (hc_period == 'c') + theta_ac * (hc_period == 'm')", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def get_mfp(self, T):\n\n self.air.T = T\n self.air.set_TempPres_dependents()\n\n self.mfp = (\n (np.sqrt(2.) * np.pi * self.air.d ** 2. * self.air.n) ** -1.\n )\n\n return self.mfp", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def Fermi(En,T):\n ev = 1.60218e-19\n kb = 1.380e-23\n return 1/(1+np.exp(En/(kb*T/ev)))", "def air_density(self):\n return self.flow_field.air_density", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def get_production_factor(self, temp_atmosphere):\n a1 = self.damages_terms[0]\n a2 = self.damages_terms[1]\n a3 = self.damages_terms[2]\n pf = self.params.prod_frac\n return ne.evaluate('1 - pf * (1 - 1 / (1 + a1 * temp_atmosphere + a2 * temp_atmosphere ** a3))')", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def get_cold_junction_temperature(self):\n return self._mcp9600.get('COLD_JUNCTION').temperature", "def test_getThermalExpansionFactorConservedMassByLinearExpansionPercent(self):\n hotTemp = 700.0\n dLL = self.component.material.linearExpansionFactor(\n Tc=hotTemp, T0=self._coldTemp\n )\n ref = 1.0 + dLL\n cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n self.assertAlmostEqual(cur, ref)", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def P(self, energy, newEnergy, temperature):\n \"\"\" This is the decision-rule, adapted from Nascimento, et al., 2009 (See references) \"\"\"\n \n delta = self.calcDelta(newEnergy, energy)\n\n minTemp = 0.00001 # use minimum to avoid div/0 and buffer overflow\n if temperature == 0:\n return minTemp\n elif temperature > minTemp:\n try:\n return math.exp(-1 * round(delta, 4) / round(temperature, 4))\n except OverflowError as detail:\n return minTemp\n else:\n return 1", "def target_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_c)", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def farenheit(ctemp):\n return round(9.0/5.0 * ctemp + 32)", "def soil_temp_factor(self, project_day):\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def target_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_low_c)", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def temperature(self, alt):\n T = self.altitude_profile(alt)[1]\n return T", "def liqtemperature(pres):\n psi = pres/_PTPE - 1\n temp = 0.\n for coeff in _C_TMELT[::-1]:\n temp = temp*psi + coeff\n temp = (1 + temp*psi)*_TTP\n return temp", "def temperature(altitude):\n if altitude <= 36152:\n t = 59-0.00356*altitude # deg F\n else:\n t = -70 # deg F\n t = t + 459.7 # R\n return t", "def get_production_factor(self, temp_atmosphere):\n return 1.", "def linear_heat_transfer(x, t, K_medium, rho_medium, c_medium, T_medium_initial, H_heat_transfer, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n\n h = H_heat_transfer/K_medium\n erfc_factor_1 = erfc(x/(2*np.sqrt(k*t)))\n\n #combine factors in logdomain, since the exp-factors quickly approach\n #infinity while erfc-factor goes to zero\n log_exp_factor_1 = h*x\n log_exp_factor_2 = k*t*h**2\n log_erfc_factor_2 = np.log(erfc(x/(2*np.sqrt(k*t)) + h*np.sqrt(k*t)))\n exp_erfc_factor = np.exp(log_exp_factor_1 + log_exp_factor_2 + log_erfc_factor_2)\n\n return (erfc_factor_1 - exp_erfc_factor)*(T_external_applied - T_medium_initial) + T_medium_initial", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def exponential(min_iterations, i, start = start_temp, final = final_temp):\n\n\ttemperature = (start * (final / start) ** (i / min_iterations))\n\n\treturn temperature", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def gas_zfactor(T_pr, P_pr):\n # T_pr : calculated pseudoreduced temperature\n # P_pr : calculated pseudoreduced pressure \n from scipy.optimize import fsolve # non-linear solver\n import numpy as np\n\n a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475\n a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210\n\n def f(y):\n rho_pr, z = y\n c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))\n c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))\n c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))\n c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))\n\n f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1\n f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))\n return[f1, f2]\n\n solve = fsolve(f, [1, 1]) # initial guess\n return(solve[0], solve[1]) # result is density, z-factor", "def fahrenheit(T_in_celsius):\n return (T_in_celsius * 9 / 5) + 32", "def expansion(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_tp = liq_g(1,1,temp,pres)\n alpha = g_tp / g_p\n return alpha", "def water_vapour(t):\n T_0 = 273.15\n T_rw = 35.86 # over water\n a = 17.269\n # cdo -mulc,610.78 -exp -div -mulc,17.5 -subc,273.15 a\n return 610.78 * np.exp(a * (t - T_0) / (t - T_rw))", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T", "def eco_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_high_c)", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def calcTempInEV(expFitCoeffWithVoltUnits):\n\t\t# constants\n\t\teV=1.60218e-19;\n\t\tq=1.6e-19\n\t\t\n\t\t# temperature in eV\n\t\treturn q*expFitCoeffWithVoltUnits/eV", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def temperature(self):\n value = float(self._parent.query('R{}'.format(self._idx))[1:])\n return pq.Quantity(value, pq.Kelvin)", "def formation_temperature(surface_temperature, gradient, depth):\n form_temp = surface_temperature + gradient * depth\n return form_temp", "def thermal_state(self, beta: float = .1) -> numpy.ndarray:\n rho = numpy.exp(-beta * self.cost)\n return rho / numpy.sum(rho)", "def get_attic_temperature(theta_sat: np.ndarray, theta_ac: np.ndarray) -> np.ndarray:\n\n # temperature difference coefficient\n h = 1.0\n\n return theta_sat * h + theta_ac * (1 - h)", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value", "def temperature_from_potential_temperature(pressure, potential_temperature, reference_pressure=P0):\n return potential_temperature * exner_function(pressure, reference_pressure=reference_pressure)", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def _temperature(self, p_input:float) -> float:\n if self._unit_in == 'R':\n temp_K = p_input*5.0/9.0\n elif self._unit_in == 'F':\n temp_K = (p_input+459.67)/9.0*5.0\n elif self._unit_in == 'C':\n temp_K = p_input+273.15\n elif self._unit_in == 'K':\n temp_K = p_input\n \n if self._unit_out == 'R':\n return (temp_K*9.0/5.0)\n elif self._unit_out == 'F':\n return (temp_K*9.0/5.0-459.67) \n elif self._unit_out == 'C':\n return (temp_K-273.15)\n elif self._unit_out == 'K':\n return temp_K", "def update_temperature(self):\n self.iteration += 1 \n self.T = self.T0 * 0.9935**self.iteration", "def getTemperature(self):\n return self.temperature", "def heat_flux_out(T_inf, T_old, hc_air, emmi):\n\n #nz = T_old.shape[0]\n ny = T_old.shape[0]\n nx = T_old.shape[1]\n\n Q_out = np.zeros((ny, nx))\n h_eff = np.zeros((ny, nx))\n T_eff = np.zeros((ny, nx))\n for i in range(nx):\n for j in range(ny):\n T_eff[j, i] = ((T_old[j, i]**3) + (T_inf * T_old[j, i]**2)\n + (T_old[j, i] * T_inf**2) + T_inf**3)\n\n h_eff[j, i] = hc_air + (emmi*STEF_BOL_C*T_eff[j, i])\n\n Q_out[j, i] = h_eff[j, i] * (T_old[j, i] - T_inf)\n\n return Q_out", "def read_object_temperatureF(self, ):\n return self.read_object_temperatureC() * (9.0/5.0) + 32.0", "def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c", "def get_temperature(self):\n pass", "def ionization_constant_water(temperature=298.15, density=None):\n import numpy as np\n\n # using Model II from Bandura etal\n # model parameters\n n = 6\n alpha_0 = -0.864671\n alpha_1 = 8659.19\n alpha_2 = -22786.2\n beta_0 = 0.642044\n beta_1 = -56.8534\n beta_2 = -0.375754\n\n # Water parameters\n Mw = 18.01528\n\n # temperature\n T = temperature\n\n # density\n if density:\n D = density\n else:\n D = density_water(T)\n\n pKWG = 0.61415 \\\n + 48251.33 / T \\\n - 67707.93 / T**2.0 \\\n + 10102100.0 / T**3.0\n\n Z = D * np.exp(alpha_0 \\\n + alpha_1/T \\\n + alpha_2/T**2 *np.power(D,2.0/3.0)\n )\n\n pKw = -2*n*(\n np.log10(1 + Z) - (Z/(Z + 1)) * D * (\n beta_0 + beta_1/T + beta_2*D\n )\n ) + pKWG + 2 * np.log10(Mw/1000.0)\n\n return np.power(10, -pKw)", "def calculate_celerity(period, depth, gravity):\r\n return geometry.gmCalculateCelerity(period, depth, gravity)", "def _temperature_policy(self, N: np.ndarray) -> np.ndarray:\n t = 1 / self.temperature\n pi = N ** t + np.finfo(np.float32).eps # Add tiny value to remove 0s\n pi = pi / pi.sum()\n return pi", "def effective_temperature(R,times):\n T_eff = np.empty((len(R),times))\n #T_c = ((3 * G * M_body) / (8 * np.pi * sigma)) #constants for a blackbody \n for i in range (len(R)):\n for t in range(times): \n T_eff[i][t] = ((m_dot[i][t])/(R[i]**3.0))**(1.0/4.0)\n return T_eff", "def calc_supply_temp(tr, Q, m, cp, case):\n if m > 0:\n if case == \"DH\":\n ts = tr + Q / (m * cp)\n else:\n ts = tr - Q / (m * cp)\n else:\n ts = 0\n return ts" ]
[ "0.7279272", "0.68165576", "0.6797786", "0.6684487", "0.66219014", "0.6579864", "0.6570214", "0.6559898", "0.649577", "0.64194864", "0.63722163", "0.6345614", "0.6337939", "0.6337814", "0.6305888", "0.6286312", "0.6284374", "0.6271602", "0.6268423", "0.62625694", "0.62529576", "0.62480354", "0.62434256", "0.6219022", "0.6205769", "0.62008363", "0.61886775", "0.61633605", "0.6153172", "0.61523736", "0.6142886", "0.6138658", "0.61331356", "0.6131802", "0.60888356", "0.60715157", "0.60712886", "0.60671556", "0.60627663", "0.6032802", "0.6031146", "0.6026677", "0.602504", "0.6020692", "0.6011433", "0.6008431", "0.60076565", "0.60003465", "0.59979975", "0.59821904", "0.59781516", "0.59618366", "0.595415", "0.5940936", "0.59350663", "0.5928525", "0.5927561", "0.59241194", "0.59234256", "0.5919663", "0.59142226", "0.5905145", "0.5902006", "0.5891895", "0.5888309", "0.5886067", "0.5882062", "0.5881698", "0.58715725", "0.5868934", "0.58674747", "0.5867025", "0.58495915", "0.5848728", "0.58425426", "0.5840377", "0.58343774", "0.5826279", "0.58240813", "0.58238375", "0.58200395", "0.5818309", "0.5797296", "0.57965916", "0.57850564", "0.5758969", "0.57580245", "0.5756962", "0.57553506", "0.5755176", "0.5753814", "0.57532537", "0.57518065", "0.5751042", "0.57456774", "0.574134", "0.5729222", "0.57270586", "0.5725671", "0.57193094" ]
0.76827806
0
Determines the convective heat transfer coefficient, either free, or forced. In the absence of any wind, the "free" wind_heat_transfer_coefficient is returned. If there is wind present, then this parameter is known as the "forced" wind_heat_transfer_coefficient.
def wind_heat_transfer_coefficient(self) -> float: return 3.8 + 2 * self.wind_speed # return 4.5 + 2.9 * self.wind_speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def get_chiller_temperature(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_ACT)", "def getHeatFlux(self, T):\n\t\tQ = self.heat_transfer_coefficient * (self.T_wall - T)\n\t\treturn Q", "def conductive_heat_flux(discr, eos, cv, grad_t):\n transport = eos.transport_model()\n return -transport.thermal_conductivity(eos, cv)*grad_t", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def setHeatTransferCoeff(self, u):\n return _cantera.wall_setHeatTransferCoeff(self.__wall_id, u)", "def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh", "def heatFlowRate(self):\n return _cantera.wall_Q(self.__wall_id)", "def calc_maintenance_cost (self):\n\n if str(self.comp_specs['operational costs']) \\\n != 'UNKNOWN':\n self.maintenance_cost = \\\n self.comp_specs['operational costs']\n else:\n self.maintenance_cost = \\\n (self.comp_specs['percent o&m'] / 100.0) * self.capital_costs\n #~ print 'self.maintenance_cost',self.maintenance_cost", "def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c", "def target_temperature(self):\n if self.current_operation == 'Heat & Cool':\n return None\n if self.current_operation == 'Heat only':\n return int(self._api._heatto)\n elif self.current_operation == 'Cool only':\n return int(self._api._coolto)\n return None", "def fuel_cost(self, update=False):\n if update or self._dfs['fuel_cost'] is None:\n self._dfs['fuel_cost'] = pudl.analysis.mcoe.fuel_cost(self)\n return self._dfs['fuel_cost']", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def target_temperature(self) -> float | None:\n if self.hvac_mode == HVACMode.COOL:\n return self.target_temperature_high\n if self.hvac_mode == HVACMode.HEAT:\n return self.target_temperature_low\n return None", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def transport_cost_per_t(self):\n return safe_divide(self.reseller.operating_expenses(), self.quantity_fieldside)", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def best_coupling(self):\n\n return self.coupling().max()", "def target_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_high_c\"))\r\n return kelvin_to_celsius(self._target_temperature_high)", "def coupling(self):\n couple = coupling_parameter(\n self.T_e, self.n_e, (self.particle, self.particle), self.Z\n )\n if couple < 0.01:\n warnings.warn(\n f\"Coupling parameter is {couple}, you might have strong coupling effects\",\n CouplingWarning,\n )\n\n return couple", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def get_chiller_temperature_setpoint(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_SET)", "def distribute(self, date_time, air_temp, vapor_pressure=None,\n dew_point=None, cloud_factor=None):\n\n self._logger.debug('%s Distributing thermal' % date_time)\n\n # calculate clear sky thermal\n if self.clear_sky_method == 'marks1979':\n cth = np.zeros_like(air_temp, dtype=np.float64)\n envphys_c.ctopotherm(\n air_temp, dew_point,\n self.dem,\n self.sky_view_factor,\n cth,\n self.config['marks1979_nthreads'])\n\n elif self.clear_sky_method == 'dilley1998':\n cth = clear_sky.Dilly1998(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'prata1996':\n cth = clear_sky.Prata1996(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'angstrom1918':\n cth = clear_sky.Angstrom1918(air_temp, vapor_pressure/1000)\n\n # terrain factor correction\n if (self.sky_view_factor is not None) and \\\n (self.clear_sky_method != 'marks1979'):\n # apply (emiss * skvfac) + (1.0 - skvfac) to the longwave\n cth = cth * self.sky_view_factor + (1.0 - self.sky_view_factor) * \\\n STEF_BOLTZ * air_temp**4\n\n # make output variable\n self.thermal_clear = cth.copy()\n\n # correct for the cloud factor\n # ratio of measured/modeled solar indicates the thermal correction\n if self.correct_cloud:\n if self.cloud_method == 'garen2005':\n cth = cloud.Garen2005(cth,\n cloud_factor)\n\n elif self.cloud_method == 'unsworth1975':\n cth = cloud.Unsworth1975(cth,\n air_temp,\n cloud_factor)\n\n elif self.cloud_method == 'kimball1982':\n cth = cloud.Kimball1982(cth,\n air_temp,\n vapor_pressure/1000,\n cloud_factor)\n\n elif self.cloud_method == 'crawford1999':\n cth = cloud.Crawford1999(cth,\n air_temp,\n cloud_factor)\n\n # make output variable\n self.thermal_cloud = cth.copy()\n\n # correct for vegetation\n if self.correct_veg:\n cth = vegetation.thermal_correct_canopy(cth,\n air_temp,\n self.veg_tau,\n self.veg_height)\n\n # make output variable\n self.thermal_veg = cth.copy()\n\n self.thermal = utils.set_min_max(cth, self.min, self.max)", "def target_temperature(self) -> float | None:\n if self._device.mode == ThermostatMode.COOL and self._device.cooling_setpoint:\n return self._device.scaled_cooling_setpoint\n\n if self._device.heating_setpoint:\n return self._device.scaled_heating_setpoint\n\n return None", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )", "def discharge_coefficient(self) -> _VectorisedFloat:\n window_ratio = np.array(self.window_width / self.window_height)\n coefs = np.empty(window_ratio.shape + (2, ), dtype=np.float64)\n\n coefs[window_ratio < 0.5] = (0.06, 0.612)\n coefs[np.bitwise_and(0.5 <= window_ratio, window_ratio < 1)] = (0.048, 0.589)\n coefs[np.bitwise_and(1 <= window_ratio, window_ratio < 2)] = (0.04, 0.563)\n coefs[window_ratio >= 2] = (0.038, 0.548)\n M, cd_max = coefs.T\n\n window_angle = 2.*np.rad2deg(np.arcsin(self.opening_length/(2.*self.window_height)))\n return cd_max*(1-np.exp(-M*window_angle))", "def CalcForceDistribution(self):\n\t\t\n\t\tself.F = self.s * (self.Tether - self.X)\n\t\t\n\t\treturn self.F", "def DragCoeff(h,Vc,Temp_m,Thrust,S):\n T,p,rho = isa(h)\n return Thrust/(0.5*rho*VTrue(h,Vc,p,Temp_m)**2*S)", "def min_background_concentration(self) -> _VectorisedFloat:\n return self.CO2_atmosphere_concentration", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def coherence(self):\r\n return np.abs(self.coherency) ** 2", "def gradient_cf(self, potential, get_energy=True):\n xn, xe, lpn, lpe, alpha, o1, o2 = self(None)\n fn_, fe_ = potential((xn, xe))\n fn_ = (fn_ + self.tw * lpn) * self.wn\n fe_ = (fe_ - lpe) * self.we\n fn = fn_ * alpha\n fe = fe_ * alpha\n dmu = tf.math.divide_no_nan(tf.reduce_sum(fn * self.xn, axis=-1, keepdims=True), self.sigma)\n dsg = tf.math.divide_no_nan(tf.reduce_sum(fn * self.x22, axis=-1, keepdims=True), self.sigma)\n dmu1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi, -1, keepdims=True), o1)\n dmu2 = tf.reduce_sum(fe * self.xj, -1, keepdims=True) / o2\n dsg1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi22, -1, keepdims=True), o1)\n dsg2 = tf.reduce_sum(fe * self.xj22, -1, keepdims=True) / o2\n\n dmu += (tf.concat([dmu1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dmu2[:, :, 810:, ...], 2, True)], 2))\n\n dsg += (tf.concat([dsg1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dsg2[:, :, 810:, ...], 2, True)], 2))\n\n dalpha = (tf.reduce_sum(fn_, [2, 4], keepdims=True) + tf.reduce_sum(fe_, [2, 4], keepdims=True))\n dw = alpha * (dalpha - tf.reduce_sum(dalpha * alpha, 3, keepdims=True))\n energy = tf.zeros(fn.shape[:2], tf.float64) if not get_energy else \\\n -(tf.reduce_sum(fn, [2, 3, 4]) + tf.reduce_sum(fe, [2, 3, 4]))\n return (-dmu * sqrt2, -dsg, -dw), energy", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def cost(distance, highway, bicycle, incline, preferences):\n\n #unpack preferences\n (flatness_pref, bicycle_pref, distance_pref,\n motorway_pref, highway_pref, residential_pref) = preferences\n multiplier = 1 + bike_multiplier(bicycle, bicycle_pref) + road_multiplier(highway, bicycle_pref, motorway_pref, highway_pref, residential_pref)\n if multiplier <= 0:\n multiplier = 0.01\n incl = incline_multiplier(float(incline))*flatness_pref\n cost = float(distance) * multiplier + incl\n if cost <= 0:\n cost = 0.01\n return cost", "def target_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_c)", "def net_alchemical_force(self):\n return self._cpp_obj.net_force", "def target_temperature(self):\n if self._client.mode == self._client.MODE_HEAT:\n return self._client.heattemp\n if self._client.mode == self._client.MODE_COOL:\n return self._client.cooltemp\n return None", "def LiftCoeff(h,Vc,Temp_m,W,S):\n T,p,rho = isa(h)\n return W*g0/(0.5*rho*VTrue(h,Vc,p,Temp_m)**2*S)", "def get_specific_heat() -> float:\n return 1006.0", "def forcing(self):\n if not self._fr or not self._frstar:\n raise ValueError('Need to compute Fr, Fr* first.')\n return -Matrix([self._f_d, self._f_dnh])", "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def thermal_operating_costs_rule(_m, y, s):\r\n\r\n return sum((m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])) * m.p[g, y, s, t]\r\n for g in m.G_THERM for t in m.T)", "def computeSmoothEnergy(self):\n _cgco.gcoComputeSmoothEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])", "def local_cost(self) -> Number:\n mu_E = self._payoff_weight_energy\n mu_T = self._payoff_weight_time\n D_loc = SIMULATION_PARAMETERS['LOCAL_CPU_CYCLES']\n F_loc = self.cpu_frequency\n T_loc = D_loc / F_loc\n E_loc = D_loc / self.cpu_effeciency\n return mu_T * T_loc + mu_E * E_loc", "def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh", "def get_cold_junction_temperature(self):\n return self._mcp9600.get('COLD_JUNCTION').temperature", "def costFunction(self):\n priorDiff = np.matrix(self.model.stateVector - self.model.prior).T\n measurementDiff = np.matrix(self.model.observation\n - self.model.modelCalculation).T\n chisq = measurementDiff.T * self.errSinv * measurementDiff\n chisq += priorDiff.T * self.priorSinv * priorDiff\n \n return chisq[0,0]", "def calc_chromatic_coupling(self):\n raise NotImplementedError('Chromatic Coupling is not Implemented yet.')", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def _get_concentration(self, state):\n return self.fc(state.float_features).exp() + self.EPSILON", "def get_dar_coupling_throughput(self, wvs, wvs0=None):\n if wvs0 is None:\n wvs0 = np.mean(wvs)\n\n n = self.telescope.get_nair(wvs)\n n0 = self.telescope.get_nair(wvs0)\n\n dar = np.abs(nair.compute_dar(n, n0, np.radians(self.zenith)))\n\n lam_d = (wvs * u.um) / (self.telescope.diameter.to(u.um)) * 206265 * 1000\n\n coupling_th = np.interp(dar/lam_d, self.fiber_coupling['sep'], self.fiber_coupling['eta'], right=0)\n\n return coupling_th", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable for renewables']:\n powerhouse_control_cost = self.cd['switchgear cost']\n\n #~ road_needed = self.comp_specs['road needed for transmission line']\n\n\n if str(self.comp_specs['transmission capital cost'])\\\n != 'UNKNOWN':\n transmission_line_cost = \\\n int(self.comp_specs['transmission capital cost'])\n else:\n if str(self.comp_specs['distance to resource']) \\\n != 'UNKNOWN':\n distance = \\\n float(self.comp_specs\\\n ['distance to resource'])\n transmission_line_cost = \\\n distance*self.comp_specs['est. transmission line cost']\n\n secondary_load_cost = 0\n if self.comp_specs['secondary load']:\n secondary_load_cost = self.comp_specs['secondary load cost']\n\n if str(self.comp_specs['generation capital cost']) \\\n != 'UNKNOWN':\n wind_cost = \\\n int(self.comp_specs['generation capital cost'])\n self.cost_per_kw = np.nan\n else:\n for i in range(len(self.comp_specs['estimated costs'])):\n if int(self.comp_specs['estimated costs'].iloc[i].name) < \\\n self.load_offset_proposed:\n if i == len(self.comp_specs['estimated costs']) - 1:\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n continue\n\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n\n wind_cost = self.load_offset_proposed * cost\n self.cost_per_kw = cost\n\n #~ print powerhouse_control_cost\n #~ print transmission_line_cost\n #~ print secondary_load_cost\n #~ print wind_cost\n self.capital_costs = powerhouse_control_cost + transmission_line_cost +\\\n secondary_load_cost + wind_cost\n\n #~ print 'self.capital_costs',self.capital_costs", "def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0", "def get_force(self):\n displ = self.get_displ()\n equil = displ / np.linalg.norm(displ) * self.L0\n return self.k * (displ - equil)", "def calc_maintenance_cost(self):\n\n self.maintenance_cost = self.capital_costs * .01", "def cond_boiler_op_cost(Q_therm_W, Q_design_W, T_return_to_boiler_K):\n if Q_therm_W > 0.0:\n\n # boiler efficiency\n eta_boiler = cond_boiler_operation(Q_therm_W, Q_design_W, T_return_to_boiler_K)\n\n E_aux_Boiler_req_W = BOILER_P_AUX * Q_therm_W\n\n Q_primary_W = Q_therm_W / eta_boiler\n else:\n Q_primary_W = 0.0\n E_aux_Boiler_req_W = 0.0\n\n return Q_primary_W, E_aux_Boiler_req_W", "def linear_heat_transfer(x, t, K_medium, rho_medium, c_medium, T_medium_initial, H_heat_transfer, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n\n h = H_heat_transfer/K_medium\n erfc_factor_1 = erfc(x/(2*np.sqrt(k*t)))\n\n #combine factors in logdomain, since the exp-factors quickly approach\n #infinity while erfc-factor goes to zero\n log_exp_factor_1 = h*x\n log_exp_factor_2 = k*t*h**2\n log_erfc_factor_2 = np.log(erfc(x/(2*np.sqrt(k*t)) + h*np.sqrt(k*t)))\n exp_erfc_factor = np.exp(log_exp_factor_1 + log_exp_factor_2 + log_erfc_factor_2)\n\n return (erfc_factor_1 - exp_erfc_factor)*(T_external_applied - T_medium_initial) + T_medium_initial", "def infer(self, potential, iterations, cf=True, x0=None):\n energy = tf.zeros(shape=[x0.shape[0], 1 + x0.shape[1]], dtype=self.dtype)\n x0 = tf.tile(x0, [1, 1, 1, self.mu.shape[-2], 1])\n for i in tf.range(iterations):\n self.conditioned(x0)\n verbose = tf.equal(tf.math.mod(i, 50), 0) or tf.equal(i + 1, iterations)\n if cf:\n grd, energy = self.gradient_cf(potential, verbose)\n else:\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(self.trainable_variables)\n energy = self.bethe_free_energy(potential)\n grd = tape.gradient(energy, self.trainable_variables)\n\n self.optimizer.apply_gradients(zip(grd, self.trainable_variables))\n # reasonable clip for the distribution of bottom layer\n self.mu[:, 0, :729, ...].assign(tf.clip_by_value(self.mu[:, 0, :729, ...], 0., 1.))\n self.sigma[:, 0, :729, ...].assign(tf.clip_by_value(self.sigma[:, 0, :729, ...], 0.02, 1.))\n\n if verbose:\n tf.print(tf.strings.format('iter: {} dmu = {}, dsigma = {}, dw={}, Energy = {}', (\n i, tf.reduce_mean(tf.abs(grd[0])), tf.reduce_mean(tf.abs(grd[1])), tf.reduce_mean(tf.abs(grd[2])),\n tf.reduce_mean(energy))))\n return energy", "def gtf(self):\n\t #if tank is empty, conductance is 0\n\t if self.tx <= 0:\n\t return 0.\n\t\t#returns 0.5, as a function of TAI\n\t else:\n\t return 0.5", "def mcoe(self, update=False,\n min_heat_rate=5.5, min_fuel_cost_per_mwh=0.0,\n min_cap_fact=0.0, max_cap_fact=1.5):\n if update or self._dfs['mcoe'] is None:\n self._dfs['mcoe'] = pudl.analysis.mcoe.mcoe(\n self,\n min_heat_rate=min_heat_rate,\n min_fuel_cost_per_mwh=min_fuel_cost_per_mwh,\n min_cap_fact=min_cap_fact,\n max_cap_fact=max_cap_fact)\n return self._dfs['mcoe']", "def _therm_cond(self):\n xy = dict() # used to label the components e.g 1->CO2,2->N2\n for (i, j) in enumerate(self.component_list, 1):\n xy[i] = j\n\n k_vap = 0\n for i in range(1, len(self.component_list) + 1):\n sumij = 0\n for j in range(1, len(self.component_list) + 1):\n Aij = (1 + (self.visc_d_comp[xy[i]] / self.visc_d_comp[xy[j]])**0.5 *\n (self.mw_comp[xy[j]] / self.mw_comp[xy[i]])**0.25)**2 *\\\n (8 * (1 + self.mw_comp[xy[i]] / self.mw_comp[xy[j]]))**-0.5\n sumij += self.mole_frac_comp[xy[j]] * Aij\n k_vap += self.mole_frac_comp[xy[i]] * self.therm_cond_comp[xy[i]] / sumij\n\n try:\n self.therm_cond = Expression(expr=k_vap,\n doc='Vapor thermal'\n 'conductivity [J/(m.K.s)]')\n except AttributeError:\n self.del_component(self.therm_cond)\n raise", "def investment_decision_thermal_rule(_m, g, y):\r\n\r\n return (((m.DELTA[y] / m.INTEREST_RATE) * m.GAMMA[g] * m.I_C[g, y])\r\n + sum(m.DELTA[j] * m.C_FOM[g] for j in m.Y if j >= y)\r\n - m.mu_1[g, y]\r\n + ((m.DELTA[m.Y.last()] / m.INTEREST_RATE) * m.C_FOM[g])\r\n + sum(- m.sigma_3[g, j, s, t] for s in m.S for t in m.T for j in m.Y if j >= y)\r\n == 0)", "def diffuse_coefficient(self):\n return self._diffuse_coefficient", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def _cost_route_fine(self):\n return self.fine", "def estimate_diffusion_coefficient(n_CFx: int) -> t.Diffusivity:\n return t.Diffusivity(10 ** (-4.5360 + -0.1088 * n_CFx), \"cm^2/s\")", "def _calc_ft(Tci, Thi, Tco, Tho, N_shells) -> 'ft':\n if (Tco - Tci)/Tco < 0.01 or (Thi-Tho)/Tho < 0.01:\n return 1\n try:\n return ht.F_LMTD_Fakheri(Thi, Tho, Tci, Tco,\n shells=N_shells)\n except ValueError:\n return 0.6 # Accounts for worst case scenario", "def get_hcost(self):\n hvcost = self.get_hvcost()\n dcost = self.get_dcost()\n hcost = hvcost + dcost\n return hcost", "def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6", "def inflatedCost(self):\n\t\tinflated = self.cost\n\n\t\t# https://www.in2013dollars.com/Wine-at-home/price-inflation/2020-to-2021?amount=10000\n\t\tif self.acquisition.year <= 2018: # 2018-to-2019\n\t\t\tinflated *= 1.010727\n\n\t\tif self.acquisition.year <= 2019: # 2019-to-2020\n\t\t\tinflated *= 1.002446\n\n\t\tif self.acquisition.year <= 2020: # 2020-to-2021\n\t\t\tinflated *= 1.010612\n\n\t\tif self.acquisition.year <= 2021: # 2021-to-2022\n\t\t\tinflated *= 1.011850\n\n\t\treturn round(inflated, 2)", "def target_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperatue_high_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_high_c)", "def eco_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_high_c)", "def _normed_concentration(self, time: float) -> _VectorisedFloat:\n # The model always starts at t=0, but we avoid running concentration calculations\n # before the first presence as an optimisation.\n if time <= self._first_presence_time():\n return self.min_background_concentration()/self.normalization_factor()\n \n next_state_change_time = self._next_state_change(time)\n\n RR = self.removal_rate(next_state_change_time)\n # If RR is 0, conc_limit does not play a role but its computation \n # would raise an error -> we set it to zero.\n try:\n conc_limit = self._normed_concentration_limit(next_state_change_time)\n except ZeroDivisionError:\n conc_limit = 0.\n\n t_last_state_change = self.last_state_change(time)\n conc_at_last_state_change = self._normed_concentration_cached(t_last_state_change)\n\n delta_time = time - t_last_state_change\n fac = np.exp(-RR * delta_time)\n\n return conc_limit * (1 - fac) + conc_at_last_state_change * fac", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def locked_temp_min_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_min_c\"))\r\n return kelvin_to_celsius(self._locked_temp_min)", "def target_temperature_high(self):\n if self._client.mode == self._client.MODE_AUTO:\n return self._client.cooltemp\n return None", "def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def define_ufl_stress_work_diff(self):\n\n if hasattr(self, 'ufl_stress_work_diff'):\n return None\n\n if self.displacement != 0:\n # Derivative of stress term w.r.t. to displacement.\n self.ufl_stress_work_du = dlf.derivative(self.ufl_stress_work,\n self.displacement,\n self.trial_vector)\n else:\n self.ufl_stress_work_du = 0\n\n if self.velocity != 0:\n self.ufl_stress_work_dv = dlf.derivative(self.ufl_stress_work,\n self.velocity,\n self.trial_vector)\n else:\n self.ufl_stress_work_dv = 0\n\n if self.pressure != 0:\n self.ufl_stress_work_dp = dlf.derivative(self.ufl_stress_work,\n self.pressure,\n self.trial_scalar)\n else:\n self.ufl_stress_work_dp = 0\n\n return None", "def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction", "def get_fc3(supercell,\n primitive,\n displacements,\n forces,\n fc_calculator=None,\n fc_calculator_options=None,\n is_compact_fc=False,\n log_level=0):\n\n if fc_calculator == 'alm':\n from phono3py.interface.alm import get_fc3\n return get_fc3(supercell,\n primitive,\n displacements,\n forces,\n options=fc_calculator_options,\n is_compact_fc=is_compact_fc,\n log_level=log_level)\n else:\n msg = (\"Force constants calculator of %s was not found .\"\n % fc_calculator)\n raise RuntimeError(msg)", "def get_cruft(self):\n\n raise computerjanitor.UnimplementedMethod(self.get_cruft)", "def get_cheap_conformer(self):\n num_confs = min(500, max(50, len(self.mol.atoms) * 3))\n rd_mol, rd_index_map = conformers.embed_rdkit(label=self.label, mol=self.mol, num_confs=num_confs)\n xyzs, energies = conformers.rdkit_force_field(label=self.label, rd_mol=rd_mol, rd_index_map=rd_index_map,\n mol=self.mol, force_field='MMFF94', return_xyz_strings=True)\n if energies:\n min_energy = min(energies)\n min_energy_index = energies.index(min_energy)\n self.cheap_conformer = xyzs[min_energy_index]\n elif xyzs:\n self.cheap_conformer = xyzs[0]\n else:\n logger.warning('Could not generate a cheap conformer for {0}'.format(self.label))\n self.cheap_conformer = None", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def getCapitalChargeFactor(self):\n return 1 / self._interestRate - 1 / (pow(1 + self._interestRate, self._economicLifetime) * self._interestRate)", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def _cost_load_theft(self):\n theft_prob = self.theft_probability\n theft = np.random.choice([0, 1], p=[1-theft_prob, theft_prob])\n if theft == 1:\n cost = self._calculate_profit_weight() * PRICE_PER_KG\n return cost\n else:\n return 0", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def get_conductivity(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[0]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'get_conductivity error: {err}')\n return -1", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def cost(self):\n abs_cost = sum(f['price'] * f['qty'] for f in self.fills)\n return -abs_cost if self.is_ask() else abs_cost", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def fpct(self):\n # 1 is probably the best number in most cases because the game is often CPU-bound.\n # the following number could be chosen instead someday\n tps = self.real_speed * 1000 / VIRTUAL_TIME_INTERVAL\n # Avoid unrealistic ping values.\n ping = min(self.max_ping, self.ping)\n result = int(tps * ping * config.fpct_coef) + 1\n return min(config.fpct_max, result)", "def best_coupling_frequency(self):\n\n idx_best = self.coupling().argmax()\n\n return self.freq.f[idx_best]" ]
[ "0.6343963", "0.59971076", "0.59249985", "0.5791971", "0.576443", "0.5683115", "0.565232", "0.55878526", "0.5570194", "0.54607165", "0.5381237", "0.52986056", "0.5273817", "0.5269059", "0.5203606", "0.5199913", "0.5169069", "0.5163147", "0.5161178", "0.51067317", "0.510497", "0.5098822", "0.50977695", "0.5091799", "0.5088686", "0.50850314", "0.50850314", "0.5050838", "0.50430024", "0.50383663", "0.50196457", "0.5007527", "0.5007485", "0.5006707", "0.5004865", "0.49978605", "0.4989394", "0.49838457", "0.49735674", "0.49537858", "0.49493426", "0.494904", "0.49486387", "0.49470538", "0.49460146", "0.49382645", "0.49359253", "0.49297574", "0.4929614", "0.49166504", "0.49098557", "0.4904686", "0.48983502", "0.48855397", "0.4885508", "0.4877309", "0.4875149", "0.48748937", "0.48642212", "0.48590156", "0.48560908", "0.4849536", "0.4840836", "0.4839072", "0.48343676", "0.4831373", "0.48298457", "0.48266608", "0.48263085", "0.48221284", "0.48103663", "0.48067504", "0.48061502", "0.4798044", "0.47948518", "0.47915915", "0.47873503", "0.47783795", "0.4768591", "0.4763097", "0.4762324", "0.47588003", "0.4756652", "0.47473717", "0.47465208", "0.47408658", "0.47375646", "0.47322574", "0.4731408", "0.4729779", "0.47288495", "0.47267315", "0.4725189", "0.4721651", "0.47205853", "0.47191757", "0.4713861", "0.47127703", "0.47111037", "0.4710614" ]
0.69990456
0
Return a nice representation of the weather conditions.
def __repr__(self) -> str: return ( "WeatherConditions(" f"ambient_temperature: {self.ambient_temperature:.3f}K, " f"azimuthal_angle: {self.azimuthal_angle}deg, " f"declination: {self.declination}deg, " f"density: {self.density_of_air:.3f}kg/m^3, " f"dynamic_viscosity: {self.dynamic_viscosity_of_air:.3f}kg/m*s, " f"heat_capacity: {self.heat_capacity_of_air}:.3fJ/kg*K, " f"irradiance: {self.irradiance:.3f}W/m^2, " f"kinematic_viscosity: {self.kinematic_viscosity_of_air:.3f}m^2/s, " f"sky_temperature: {self.sky_temperature:.3f}K, " f"thermal_conductivity: {self.thermal_conductivity_of_air:.3f}W/m*K, " f"thermal_expansion_coefficient: {self.thermal_expansivity_of_air:.3f}K^-1, " f"wind_heat_transfer_coefficient: {self.wind_heat_transfer_coefficient:2f}W/m*K, " f"wind_speed: {self.wind_speed:.3f}m/s, " ")" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditions(self, json):\n conditions = str(json['forecast']['simpleforecast']['forecastday'][0]['conditions'])\n return conditions", "def genWeather():\n\n weather = random.choice(weather_conditions.keys())\n condition = weather_conditions[weather]\n (tMax, tMin) = condition[\"temperature\"]\n (pMax, pMin) = condition[\"pressure\"]\n (hMax, hMin) = condition[\"humidity\"]\n\n return weather + \"|\" + str(round(random.uniform(tMax, tMin), 1)) + \"|\" + \\\n str(round(random.uniform(pMax, pMin), 1)) + \"|\" + \\\n str(random.randrange(hMax, hMin, -1))", "def convert_weather(self, description):\n conditions = {\n 'clear sky': 'clear',\n 'few clouds': 'clouds with some sunshine',\n 'scattered clouds': 'cloudy',\n 'broken clouds': 'cloudy',\n 'shower rain': 'showers',\n 'thunderstorm': 'thunder and lightning',\n 'mist': 'fog'\n }\n\n if description in conditions:\n return conditions.get(description, \"Look out of the window.\")\n else:\n return description", "def conditions(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"conditions\")", "def __str__(self):\n temperature = None\n offset = ' ' * 4\n if self._forecast_type == ForecastType.TODAY:\n temperature = (f'{offset}{self._current_temp}\\xb0\\n'\n f'{offset}High {self._high_temp}\\xb0 / '\n f'Low {self._low_temp}\\xb0 ')\n else:\n temperature = (f'{offset}High {self._high_temp}\\xb0 / '\n f'Low {self._low_temp}\\xb0 ')\n return (f'>> {self.forecast_date}\\n'\n f'{temperature}'\n f'({self._description})\\n'\n f'{offset}Wind: '\n f'{self._wind} / Humidity: {self._humidity}\\n')", "def weather(self):\r\n try:\r\n return str(self.connect()['weather'][0]['description'])\r\n except:\r\n return '@weather'", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n msg = '- Window size: ' + str(self.window_size) + \" by \" + str(self.window_size)\n msg += '\\n'\n msg += ' - Expression for r.mapcalc to determine column water vapor: '\n return msg + str(self.column_water_vapor_expression)", "def getData():\n\t\n\ttry:\n\t\tgoogleWeather = pywapi.get_weather_from_google(location)\n\t\tcondition = googleWeather['current_conditions']['condition']\n\t\ttemp = googleWeather['current_conditions']['temp_c']\n\t\treturn \"<weather location=\\\"\" + location + \"\\\" condition=\\\"\" + condition + \"\\\" temp=\\\"\" + temp + \"c\" + \"\\\"/>\"\n\texcept:\n\t\treturn \"\"", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.items()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def weather_helper():\n\n weather = get_weather('Chicago')\n conditions = weather['weather'][0]['description']\n temperature = weather['main']['temp']\n location = weather['name']\n\n curr_weather = 'It is currently %s degrees with %s in %s' % (temperature, conditions, location)\n return curr_weather", "def getWaterConditions(self):\n return self._getConditions(restrict=['CS-Eau'])", "def __str__(self):\r\n s = ''\r\n for i, (k, v) in enumerate(self.meters.items()):\r\n if i > 0:\r\n s += ' '\r\n s += k + ' ' + str(v)\r\n return s", "def details(weather):\n\treturn \"\"\"<table class=\"forecast bg-success\"><tr><th colspan=\"2\" class=\"text-center lead\">Weather for {location} at {time}<th></tr>\n\t<tr><td>Temp: {temperature}<i class=\"wi wi-celsius\"></i> Feels Like: {feelsLike}<i class=\"wi wi-celsius\"></i></td><td rowspan=\"9\"><img src=\"map.gif?{latitude},{longitude}\" width=\"600\" height=\"371\"/><td></tr>\n\t<tr><td>Low: {low}<i class=\"wi wi-celsius\"></i> High: {high}<i class=\"wi wi-celsius\"></i></td></tr>\n\t<tr><td>Sunrise <i class=\"wi wi-sunrise\"></i>: {sunrise} Sunset <i class=\"wi wi-sunset\"></i>: {sunset}</td></tr>\n\t<tr><td>Wind: {windSpeed} kph from {windBearing} <i class=\"wi wi-wind.towards-{windDirection}-deg\"></i></td></tr>\n\t<tr><td>Summary <i class=\"wi wi-{icon}\"></i>: {summary}</td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td>&nbsp;</td><td>&nbsp;</td></tr>\n\t</table>\"\"\".format(**weather)", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.iteritems()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def current_weather(self):\n weather = DarkSky(self.coordinates)\n current_weather = weather.current_weather\n reply = [f\"*Current Weather in {self.format_location(self.location)}*\"]\n summary = current_weather['summary']\n temp = current_weather['temperature']\n feels_like = current_weather['apparentTemperature']\n rain_chance = current_weather['precipProbability']\n wind_speed = current_weather['windSpeed']\n cloud_cover = current_weather['cloudCover']\n icon = current_weather['icon']\n emoji = self.get_emoji(icon)\n logging.info(f\"Weather Icon: {icon}\")\n weather_message = [\n f\"{emoji} *{summary}*\",\n f\">*Temperature: `{temp}`*\",\n f\">*Feels Like: `{feels_like}`*\",\n f\">*Chance of Rain: `{rain_chance}`*\",\n f\">*Wind Speed: `{wind_speed}`*\",\n f\">*Cloud Cover: `{cloud_cover}`*\"\n ]\n reply.append(\"\\n\".join(weather_message))\n return \"\\n\".join(reply)", "def __str__(self):\n status = (\"\\na: %.2f \\n\" % self.a +\n \"e: %.2f \\n\" % self.e +\n \"inc: %.2f deg \\n\" % (self.inc * 180/math.pi) +\n \"om: %.2f deg \\n\" % (self.om * 180/math.pi) +\n \"Om: %.2f deg \\n\" % (self.Om * 180/math.pi) +\n \"H: %.2f \\n\" % self.H\n )\n return status", "def generate_weather_conditions(temperature, temp_type):\n\n if temp_type == \"MIN\" or temperature < 5:\n if temperature > 10:\n return 0\n elif temperature >= 0:\n return (10.-temperature)/10.\n else:\n return 1\n\n elif temp_type == \"AVG\":\n\n if temperature > 25:\n return 0\n elif temperature >= 15:\n return (25.-temperature)/(25.-15)\n elif temperature >= 5:\n return (temperature-5.)/(15-5.)\n\n elif temp_type == \"MAX\":\n if temperature > 40:\n return 1\n elif temperature >= 20:\n return (temperature-20)/(40.-20)\n else:\n return 0", "def get_conditions(self):\n return (self.temp, self.humid)", "def get_weather_violation(weather,minimums):\n # Implement this function\n #print(weather)\n #print(minimums)\n \n result = ''\n #print(weather['wind'])\n if weather == None:\n# result = 'Unknown'\n return 'Unknown'\n #elif bad_visibility(weather['visibility'],minimums[1]) == False and bad_winds(weather['wind'], minimums[2], minimums[3]) == False and bad_ceiling(weather['sky'],minimums[0]) == False:\n #result = ''\n \n if bad_winds(weather['wind'], minimums[2], minimums[3]) == True:\n result = 'Winds' if result == '' else 'Weather'\n \n if bad_visibility(weather['visibility'], minimums[1]) == True:\n result = 'Visibility' if result == '' else 'Weather'\n \n if bad_ceiling(weather['sky'], minimums[0]) == True:\n result = 'Ceiling' if result == '' else 'Weather'\n \n #elif \n \n return result", "def format_status(self) -> str:\n if not self.ready_to_trade:\n return \"Market connectors are not ready.\"\n lines = []\n\n if len(self.stored_executors) > 0:\n lines.extend([\n \"\\n########################################## Closed Executors ##########################################\"])\n\n for executor in self.stored_executors:\n lines.extend([f\"|Signal id: {executor.timestamp}\"])\n lines.extend(executor.to_format_status())\n lines.extend([\n \"-----------------------------------------------------------------------------------------------------------\"])\n\n if len(self.active_executors) > 0:\n lines.extend([\n \"\\n########################################## Active Executors ##########################################\"])\n\n for executor in self.active_executors:\n lines.extend([f\"|Signal id: {executor.timestamp}\"])\n lines.extend(executor.to_format_status())\n if self.candles.is_ready:\n lines.extend([\n \"\\n############################################ Market Data ############################################\\n\"])\n signal, take_profit, stop_loss, indicators = self.get_signal_tp_and_sl()\n lines.extend([f\"Signal: {signal} | Take Profit: {take_profit} | Stop Loss: {stop_loss}\"])\n lines.extend([f\"BB%: {indicators[0]} | MACDh: {indicators[1]} | MACD: {indicators[2]}\"])\n lines.extend([\"\\n-----------------------------------------------------------------------------------------------------------\\n\"])\n else:\n lines.extend([\"\", \" No data collected.\"])\n\n return \"\\n\".join(lines)", "def all_characteristics_as_string(self):\n chars1 = _all_characteristics_\n chars2 = [ch.title() for ch in chars1]\n chars2[chars2.index('Internalstructure')] = 'InternalStructure'\n\n s = ('%-18s %-24s %-2s'%('Characteristic', 'Semantic value','#'))\n s+= '\\n'\n s+= ('%-18s %-24s %-2s' % ('-', '-', '-')) + '\\n'\n\n for i in range(len(chars1)):\n attrs = (chars2[i],\\\n getattr(self,chars2[i])(),\n getattr(self,chars1[i]))\n s += '%-18s | %-24s | %-2d' % attrs\n s += '\\n'\n return s[:-1] # cut the trailing newline character", "def device_state_attributes(self):\n # TODO: convert RH from Elk to AH ?\n #if self.current_humidity > 0:\n # humidity = self.current_humidity\n data = {\n 'hidden': self._hidden,\n 'temp_unit' : self.temperature_unit,\n }\n if self._device.temp_outside is not None and self._device.temp_outside > -460:\n data['temp_outside'] = self._device.temp_outside\n if self._device.temp_3 is not None and self._device.temp_3 > -460:\n data['temp_3'] = self._device.temp_3\n if self._device.temp_4 is not None and self._device.temp_4 > -460:\n data['temp_4'] = self._device.temp_4\n return data", "def generate_condition_data(self):\n # set 'Conditions' column to NA\n self.output['Conditions'] = 'NA'\n\n # instantiate new MarkovChain object\n MC = MarkovChain()\n\n # apply forecast function on 'Conditions' column based on temperature\n # and humidity values for each observation period\n params = self.output[[\"Temperature\", \"Humidity\"]]\n self.output[['Conditions']] = params.apply(\n lambda x: MC.forecast_weather(x.values[0], x.values[1]), axis=1)", "def state(self):\n result = \"\"\n if self._type == \"weather\":\n result = self._connector.get_condition()\n elif self._type == \"weather_report\":\n result = re.search(\n \"\\w+, \\d{2}\\.\\d{2}\\.\\d{2}, \\d{2}:\\d{2}\",\n self._connector.get_weather_report(),\n ).group()\n elif self._type == \"temperature\":\n result = self._connector.get_temperature()\n elif self._type == \"dewpoint\":\n result = self._connector.get_dewpoint()\n elif self._type == \"pressure\":\n result = self._connector.get_pressure()\n elif self._type == \"wind_speed\":\n result = self._connector.get_wind_speed()\n elif self._type == \"wind_direction\":\n result = self._connector.get_wind_direction()\n elif self._type == \"wind_gusts\":\n result = self._connector.get_wind_gusts()\n elif self._type == \"precipitation\":\n result = self._connector.get_precipitation()\n elif self._type == \"precipitation_probability\":\n result = self._connector.get_precipitation_probability()\n elif self._type == \"precipitation_duration\":\n result = self._connector.get_precipitation_duration()\n elif self._type == \"cloud_coverage\":\n result = self._connector.get_cloud_coverage()\n elif self._type == \"visibility\":\n result = self._connector.get_visibility()\n elif self._type == \"sun_duration\":\n result = self._connector.get_sun_duration()\n elif self._type == \"sun_irradiance\":\n result = self._connector.get_sun_irradiance()\n elif self._type == \"fog_probability\":\n result = self._connector.get_fog_probability()\n elif self._type == \"humidity\":\n result = self._connector.get_humidity()\n return result", "def __str__(self):\n state_1 = \"Time: \" + str(self._time)\n state_2 = \"Current Cookies: \" + str(self._current_cookies)\n state_3 = \"CPS: \" + str(self._cps)\n state_4 = \"Total Cookies: \" + str(self._total_cookies)\n return state_1 + \" \" + state_2 + \" \" + state_3 + \" \" + state_4", "def PrintWeather(Weather):\n print('Temperature : {}°C'.format(Weather[0]))\n print('Humidity : {} %'.format(Weather[1]))\n print('Description : {}'.format(Weather[2])+'\\n')\n return 1", "def wattsString(self):\n return self.watts is None and \"unknown\" or str(self.watts)", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"conv_activations = {}\\n\".format(self.conv_activations)\n status += \"conv_architecture = {}\\n\".format(self.conv_architecture)\n status += \"kernel_sizes = {}\\n\".format(self.kernel_sizes)\n status += \"pool_kernel = {}\\n\".format(self.pool_kernel)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def to_string(self):\n if self.is_power_onoff():\n return 'Power On/Off'\n else:\n gain = str(hex(int(self['gain_speed'])))\n out = self['target'].ljust(20) + ' ' + self['filters'].ljust(11) + ' ' + self['x_bin'] + 'x' + self['y_bin'] + ' ' + gain[2:].upper()\n \n \n if self.number_windows() > 0:\n out += ' ' + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + ' ' + self['x1_start'].ljust(3) + ' ' + self['y1_start'].ljust(4)\n if self.number_windows() > 1:\n out += ' ' + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + ' ' + self['x2_start'].ljust(3) + ' ' + self['y2_start'].ljust(4)\n \n if 'Comment' in self:\n out += ' ' + self['Comment']\n return out", "def format(self):\r\n\r\n earth = \"???\" if self.maskearth else self.earth\r\n air = \"???\" if self.maskair else self.air\r\n fire = \"???\" if self.maskfire else self.fire\r\n water = \"???\" if self.maskwater else self.water\r\n\r\n if any((self.earth, self.fire, self.water)):\r\n statsline = f'Stats: {earth}/{air}/{fire}/{water}'\r\n elif self.air:\r\n statsline = f'Air: {air}'\r\n else:\r\n statsline = ''\r\n\r\n return (\r\n f'Character {self.name}, [{self.token}]. '\r\n f'Init: {self.init} {statsline} Owner: {self.user.name}'\r\n )", "def getZaptelConf(self):\n output = []\n for portInd, portLine in enumerate(self.portLines):\n if self[portInd]['type'] != 'na':\n values = self[portInd]\n values['type'] = values['type'] == 'fxs' and \"fxo\" or 'fxs' #Hmm crazy zaptel idea that your fxo is your fxs in zapata but the correct way around in zaptel\n output.append(\"%(type)s%(signalling)s=\" % self[portInd] + str(portLine[0]))\n return output", "def __str__(self) -> str:\n return (\n f\"GlacierFlowModel '{self.model_name}' \"\n f\"{'' if self.steady_state else 'not '}in steady state with:\"\n f\"\\n - m: {self.m:20.5f} [m/m]\"\n f\"\\n - ela: {self.ela:20.2f} [m MSL]\"\n f\"\\n - resolution: {self.res:20.2f} [m]\"\n f\"\\n - extent: min max\"\n f\"\\n {self.extent[0]:10.1f} \"\n f\"{self.extent[1]:10.1f} [x]\"\n f\"\\n {self.extent[2]:10.1f} \"\n f\"{self.extent[3]:10.1f} [y]\"\n )", "def conditions(self) -> Optional[Sequence['_meta.v1.outputs.ConditionPatch']]:\n return pulumi.get(self, \"conditions\")", "def conditions(self) -> Sequence['outputs.StatusConditionResponse']:\n return pulumi.get(self, \"conditions\")", "def mountain_weather_assessment(backcast):\n \n if backcast.total_precip < 6:\n return 'Less than 6 inches of precipitation in the past 7 days, climb on.'\n if backcast.total_precip > 36:\n return f\"There's been {backcast.total_precip} of precip in the past 7 days, you probably shouldn't climb.\"\n if backcast.sun_count > 72 and backcast.high_temp > 40:\n return f\"It's precipitated {backcast.total_precip} inches recently here, but also been sunny for {backcast.sun_count} hours and reached {backcast.high_temp} degrees F. Use your discretion.\"\n if backcast.precip_count > 30 and backcast.avg_temp <= 40:\n return f\"It's precipitated {backcast.total_precip} inches recently here, over {backcast.precip_count} hours out of the last 7 days, with an average temp of {backcast.avg_temp}F. Use your discretion and please stay safe.\"\n return f\"Not sure how to assess this information.\"", "def __str__(self):\n #Get an ordered list of the elements strings so it outputs always the same\n #string given a mass function.\n elements = []\n for element in self.focals:\n elements.append((element, str(element)))\n sortedList = sorted(elements, key=lambda x:x[1])\n \n result = \"\"\n first = True\n for t in sortedList:\n if first:\n result += t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n first = False\n else:\n result += \", \" + t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n return \"{\" + result + \"}\"", "def print_weather(self, days):\n if days == 1:\n open_weather = urlopen(self.full_url).read().decode(\"utf8\")\n read_json = json.loads(open_weather)\n outside = self.get_outside_outlook(read_json[\"weather\"])\n wind_speed = read_json[\"wind\"][\"speed\"]\n wind_direction = self.deg_to_compass(read_json[\"wind\"][\"deg\"])\n current_temp = self.convert_temp(read_json[\"main\"][\"temp\"])\n print(\"Current Temperature: {:.2f}\\xb0\\n\"\n \"Sky: {}\\n\"\n \"Wind speed: {} MPH\\n\"\n \"Wind direction: {}\".format(current_temp, outside, wind_speed, wind_direction))\n else:\n open_weather = urlopen(self.full_url).read().decode(\"utf8\")\n read_json = json.loads(open_weather)\n outside = read_json[\"list\"]\n \"\"\"\n Should be:\n for temp in outside:\n stuff = temp[\"weather\"]\n for i in stuff:\n print(i['description'])\n\n Each of these will need to be added to a list or a dictionary to print relationally\n \"\"\"\n print(outside)", "def __str__(self):\n struct_repr = \", \".join([\n \"is_gyrometer_calibration_ok: \" + str(self.is_gyrometer_calibration_ok),\n \"is_accelerometer_calibration_ok: \" + str(self.is_accelerometer_calibration_ok),\n \"is_magnetometer_calibration_ok: \" + str(self.is_magnetometer_calibration_ok),\n \"is_level_calibration_ok: \" + str(self.is_level_calibration_ok),\n \"is_local_position_ok: \" + str(self.is_local_position_ok),\n \"is_global_position_ok: \" + str(self.is_global_position_ok),\n \"is_home_position_ok: \" + str(self.is_home_position_ok)\n ])\n\n return f\"Health: [{struct_repr}]\"", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"input_dim = {}\\n\".format(self.input_dim)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def print_info(self):\n outstr = '================================================= Ambient Noise Cross-correlation Database =================================================\\n'\n outstr += self.__str__()+'\\n'\n outstr += '--------------------------------------------------------------------------------------------------------------------------------------------\\n'\n if 'NoiseXcorr' in self.auxiliary_data.list():\n outstr += 'NoiseXcorr - Cross-correlation seismogram\\n'\n if 'StaInfo' in self.auxiliary_data.list():\n outstr += 'StaInfo - Auxiliary station information\\n'\n if 'DISPbasic1' in self.auxiliary_data.list():\n outstr += 'DISPbasic1 - Basic dispersion curve, no jump correction\\n'\n if 'DISPbasic2' in self.auxiliary_data.list():\n outstr += 'DISPbasic2 - Basic dispersion curve, with jump correction\\n'\n if 'DISPpmf1' in self.auxiliary_data.list():\n outstr += 'DISPpmf1 - PMF dispersion curve, no jump correction\\n'\n if 'DISPpmf2' in self.auxiliary_data.list():\n outstr += 'DISPpmf2 - PMF dispersion curve, with jump correction\\n'\n if 'DISPbasic1interp' in self.auxiliary_data.list():\n outstr += 'DISPbasic1interp - Interpolated DISPbasic1\\n'\n if 'DISPbasic2interp' in self.auxiliary_data.list():\n outstr += 'DISPbasic2interp - Interpolated DISPbasic2\\n'\n if 'DISPpmf1interp' in self.auxiliary_data.list():\n outstr += 'DISPpmf1interp - Interpolated DISPpmf1\\n'\n if 'DISPpmf2interp' in self.auxiliary_data.list():\n outstr += 'DISPpmf2interp - Interpolated DISPpmf2\\n'\n if 'FieldDISPbasic1interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPbasic1interp - Field data of DISPbasic1\\n'\n if 'FieldDISPbasic2interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPbasic2interp - Field data of DISPbasic2\\n'\n if 'FieldDISPpmf1interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPpmf1interp - Field data of DISPpmf1\\n'\n if 'FieldDISPpmf2interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPpmf2interp - Field data of DISPpmf2\\n'\n outstr += '============================================================================================================================================\\n'\n print outstr\n return", "def __str__(self):\n\n string = ''\n string += \"Battery Voltage: \" + \"{0:.2f}\".format(self.getBatteryVoltage()) + '\\n'\n string += \"Data Logging: \" + str(self.getDataLoggingStatus()) + '\\n'\n string += \"Data Filename: \" + self.getFilename() +'\\n'\n\n string += \"Time Since Instrument Reset (s): \" + \"{0:.2f}\".format(self.getResetTime()) + '\\n'\n string += \"Data Age (s): \" + \"{0:.2f}\".format(self.getDataAge()) + '\\n'\n string += '\\n'\n string += \"Interior Temperature 1 (F): \" + \"{0:.2f}\".format(self.getInteriorTemperature1()) + '\\n'\n string += \"Interior Temperature 2 (F): \" + \"{0:.2f}\".format(self.getInteriorTemperature2()) + '\\n'\n string += \"Interior Temperature 3 (F): \" + \"{0:.2f}\".format(self.getInteriorTemperature3()) + '\\n'\n string += \"Exterior Temperature (F): \" + \"{0:.2f}\".format(self.getExteriorTemperature()) + '\\n'\n string += \"Pressure (PSI): \" + \"{0:.2f}\".format(self.getPressure()) + '\\n'\n string += \"Humidity (%): \" + \"{0:.2f}\".format(self.getHumidity()) + '\\n'\n string += '\\n'\n\n string += \"GPS Time: \" + str(self.getGpsTime1()) + '\\n'\n string += \"Latitude: \" + \"{0:.9f}\".format(self.getLatitude1()) + '\\n'\n string += \"Longitude: \" + \"{0:.9f}\".format(self.getLongitude1()) + '\\n'\n string += \"Altitude (ft): \" + \"{0:.2f}\".format(self.getAltitude1()) + '\\n'\n string += \"Speed (MPH): \" + \"{0:.2f}\".format(self.getSpeed()) + '\\n'\n string += '\\n'\n\n string += \"GPS Time: \" + str(self.getGpsTime2()) + '\\n'\n string += \"Latitude: \" + \"{0:.9f}\".format(self.getLatitude2()) + '\\n'\n string += \"Longitude: \" + \"{0:.9f}\".format(self.getLongitude2()) + '\\n'\n string += \"Altitude (ft): \" + \"{0:.2f}\".format(self.getAltitude2()) + '\\n'\n string += '\\n'\n\n ax, ay, az = self.getAcceleration()\n string += \"Acceleration (x, y, z): \"\n string += \"{0:.2f}\".format(ax) + \", \"\n string += \"{0:.2f}\".format(ay) + \", \"\n string += \"{0:.2f}\".format(az) + '\\n'\n\n gx, gy, gz = self.getRates()\n string += \"Rates (x, y, z): \"\n string += \"{0:.2f}\".format(gx) + \", \"\n string += \"{0:.2f}\".format(gy) + \", \"\n string += \"{0:.2f}\".format(gz) + '\\n'\n\n mx, my, mz = self.getMagneticReading()\n string += \"Magnetic Field (x, y, z): \"\n string += \"{0:.2f}\".format(mx) + \", \"\n string += \"{0:.2f}\".format(my) + \", \"\n string += \"{0:.2f}\".format(mz) + '\\n'\n\n roll, pitch, yaw = self.getAttitude()\n string += \"Roll (deg): \" + \"{0:.2f}\".format(roll) + '\\n'\n string += \"Pitch (deg): \" + \"{0:.2f}\".format(pitch) + '\\n'\n string += \"Yaw (deg): \" + \"{0:.2f}\".format(yaw) + '\\n'\n string += '\\n'\n relayStates = self.getRelayStates()\n \n\n string += \"Relay States: \" \n string += (( \"ON \") if relayStates[0] else ( \"OFF \")) \n string += (( \"ON \") if relayStates[1] else ( \"OFF \"))\n string += (( \"ON \") if relayStates[2] else ( \"OFF \"))\n string += (( \"ON \") if relayStates[3] else ( \"OFF \"))\n string += '\\n'\n\n\n return string", "def __str__(self):\n struct_repr = \", \".join([\n \"active: \" + str(self.active),\n \"actuator: \" + str(self.actuator)\n ])\n\n return f\"ActuatorOutputStatus: [{struct_repr}]\"", "def description(self):\r\n try:\r\n return str(self.connect()['weather'][0]['description'])\r\n except:\r\n return '@weather_description'", "def format_status(self) -> str:\n if not self.ready_to_trade:\n return \"Market connectors are not ready.\"\n lines = []\n\n balance_df = self.get_balance_df()\n lines.extend([\"\", \" Balances:\"] + [\" \" + line for line in balance_df.to_string(index=False).split(\"\\n\")])\n\n exchanges_df = self.exchanges_df()\n lines.extend([\"\", \" Exchanges:\"] + [\" \" + line for line in exchanges_df.to_string(index=False).split(\"\\n\")])\n\n try:\n orders_df = self.active_orders_df()\n lines.extend([\"\", \" Active Orders:\"] + [\" \" + line for line in orders_df.to_string(index=False).split(\"\\n\")])\n except ValueError:\n lines.extend([\"\", \" No active maker orders.\"])\n\n return \"\\n\".join(lines)", "def conditions(self) -> Optional[Sequence['_meta.v1.outputs.Condition']]:\n return pulumi.get(self, \"conditions\")", "def __repr__(self):\n values = ', '.join(f'{k}={v}' for k, v in self.variables.items())\n return f'D({values})'", "def get_weather_info(forecast):\n day_forecast = {}\n day_forecast['condition_text'] = forecast['day']['condition']['text']\n #this icon is a url to an image that describes the weather condition\n day_forecast['condition_icon'] = forecast['day']['condition']['icon']\n day_forecast['max_temp'] = forecast['day']['maxtemp_c']\n day_forecast['min_temp'] = forecast['day']['mintemp_c']\n day_forecast['avg_temp'] = forecast['day']['avgtemp_c']\n date = datetime.strptime(forecast['date'], \"%Y-%m-%d\").strftime(\"%b %d:%a\")\n date_format = date.split(':')\n day_forecast['day'] = date_format[0]\n day_forecast['weekday'] = date_format[1]\n return day_forecast", "def __repr__(self) -> str:\n s = \"\\n\"\n fmt = \"{:7.3f}\"\n for i in range(len(self.w)):\n s += \" \".join(fmt.format(w) for w in self.w[i])\n s += \" | \" + fmt.format(self.b[i]) + \"\\n\"\n return s", "def get_weather(self):\n return self.__weather", "def __str__(self):\n s=''\n for r in range(self.n):\n for c in range(self.n):\n s += str(self.state[r][c]) + ' '\n s += '\\n'\n s += str('hcost') + ' : ' + str(self.hcost)\n s += '\\n'\n return s", "def output(self):\n val = \"\"\n if self.home:\n val = \"1\"\n if self.away:\n if val != \"\": val = val + \",\"\n val = val + \"2\"\n if self.night:\n if val != \"\": val = val + \",\"\n val = val + \"3\"\n if self.vacation:\n if val != \"\": val = val + \",\"\n val = val + \"4\"\n return val", "def _truth_condition(self, state):\n return '{models}'.format(\n models=models(state, self.rewrite(), '$'),\n )", "def __str__(self):\n # First obtain a string describing the underlying data model.\n strg = super(MiriTelescopeEmissionModel, self).__str__()\n \n # Add the extras\n if self.meta.instrument.filter is not None:\n strg += \"Data valid for filter=\\'%s\\' \" % \\\n self.meta.instrument.filter\n else:\n strg += \"Data valid for UNKNOWN filter \"\n if self.meta.telescope_temperature is not None:\n strg += \"and telescope temperature=%.2fK\" % \\\n self.meta.telescope_temperature\n else:\n strg += \"and UNKNOWN telescope temperature\"\n return strg", "def __repr__(self):\n s = \"s = $%.2f, x = $%.2f, t = %.2f (years), sigma = %.3f, rf = %.3f\" %(self.s, self.x, self.t, self.sigma, self.rf)\n return s", "def cloudiness(self):\r\n try:\r\n return str(self.connect()['clouds']['all'])\r\n except:\r\n return '@weather_cloudiness'", "def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])", "def __str__(self, quiet: bool = True) -> str:\n\n q_type = self.q_type\n\n if quiet:\n q_type = \"\"\n\n string = \"\"\n\n if self.representation == \"\":\n string = f\"({self.t}, {self.x}, {self.y}, {self.z}) {q_type}\"\n\n elif self.representation == \"polar\":\n rep = self.txyz_2_representation(\"polar\")\n string = f\"({rep[0]} A, {rep[1]} 𝜈x, {rep[2]} 𝜈y, {rep[3]} 𝜈z) {q_type}\"\n\n elif self.representation == \"spherical\":\n rep = self.txyz_2_representation(\"spherical\")\n string = f\"({rep[0]} t, {rep[1]} R, {rep[2]} θ, {rep[3]} φ) {q_type}\"\n\n return string", "def strings(self, sensor_data=None):\r\n disc = self.discrete(sensor_data)\r\n def toString(val):\r\n if val==self.far:\r\n return 'far'\r\n elif val==self.close:\r\n return 'close'\r\n else: \r\n return 'collision'\r\n return list(map(toString, disc))", "def report(self):\n print(f\"Water: {self.resources['water']}ml\")\n print(f\"Milk: {self.resources['milk']}ml\")\n print(f\"Coffee: {self.resources['coffee']}g\")", "def print_info(self):\n print('Condition list: (Cell definitions)')\n if len(list(self.condition_dict.keys())) > 0:\n for ID in list(self.condition_dict.keys()):\n ident = self.condition_dict[ID][0]['identifier']\n print(\n f'\\t[{ident}]: {len(self.condition_dict[ID])} definition(s)')\n else:\n print('\\tNo instances.')\n print()\n print('Modcell types: (Cell mappings on module)')\n if len(list(self.modcells.keys())) > 0:\n for ident in list(self.modcells.keys()):\n print(f'\\t[{ident}]: {len(self.modcells[ident])} definition(s)')\n else:\n print('\\tNo instances.')\n print()\n print('String definitions (Series of modcells)')\n passed = True\n if len(list(self.string_cond.keys())) > 0:\n for str_key in self.string_cond:\n try:\n print(\n f\"\\t[{str_key}]: {len(self.multilevel_ivdata['string'][str_key]['V'])} definition(s)\")\n except:\n passed = False\n continue\n if not passed:\n print('String definitions are defined by deducing the combination of module definitions. So, for an accurate display of the string-level definitions, call this module after enacting .simulate()')\n else:\n print('\\tNo instances.')\n print()", "def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res", "def __repr__(self):\n\n values = (\n f\"tickers: {tuple(self._tickers)}\\n\"\n f\"filters: {tuple(self._filters)}\\n\"\n f\"rows: {self._rows}\\n\"\n f\"order: {self._order}\\n\"\n f\"signal: {self._signal}\\n\"\n f\"table: {self._table}\\n\"\n f\"table: {self._custom}\"\n )\n\n return values", "def get_condition(self) -> dict:\n url = self.base_url + \"/condition\"\n condition = self._session.get(url).json()\n keys = [\"bandwidth\", \"latency\", \"jitter\", \"loss\"]\n result = {k: v for (k, v) in condition.items() if k in keys}\n return result", "def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str", "def __str__(self):\n return self.designation + ' ' +self.winery + ' wine'", "def __str__(self):\n return \"{}, fuel={}, odometer={} reliability= {}%\".format(self.name, self.fuel,\n self.odometer, self.reliability)", "def getDebugText(self):\n timeDifference = time.time() - self.time_created\n hours = math.floor(timeDifference / 3600)\n minutes = math.floor((timeDifference % 3600) / 60)\n seconds = math.floor(timeDifference % 3600 % 60)\n\n output = \"\\n\" * 50\n output += \"Time started: %s\\n\" % time.ctime(self.time_created)\n output += \"Time now: %s\\n\" % time.ctime()\n output += \"Time elapsed: %02d:%02d:%02d\\n\" % (hours, minutes, seconds)\n output += (\"=\" * 80) + \"\\n\"\n output += \"Health potions used: %d\\n\" % self.hp_pots_used\n output += \"Health potions per hour: %d\\n\" % (self.hp_pots_used / (\n timeDifference / 3600))\n output += \"Mana potions used: %d\\n\" % self.mana_pots_used\n output += \"Mana potions per hour: %d\\n\" % (self.mana_pots_used / (\n timeDifference / 3600))\n return output", "def toQif(self):\n out=list();\n if 'date' in self:\n out.append(\"D{}\".format(self['date']));\n if 'amount' in self:\n out.append(\"T{}\".format(self['amount']));\n if 'memo' in self and len(self['memo'])>3:\n out.append(\"M{}\".format(self['memo']));\n if 'payee' in self and len(self['payee'])>3:\n out.append(\"P{}\".format(self['payee']));\n out.append(\"^\");\n return \"\\n\".join(out);", "def getConditionNames():\n return _conditions.keys()", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"classes = {}\\n\".format(self.classes)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def summarize_observing_conditions(fitsFiles):\n count = len(fitsFiles)\n\n # Here is the data we are going to collect from the fits headers\n year = np.zeros(count, dtype=int)\n month = np.zeros(count, dtype=int)\n day = np.zeros(count, dtype=int)\n hour = np.zeros(count, dtype=int)\n minute = np.zeros(count, dtype=int)\n airmass = np.zeros(count, dtype=float)\n water_column = np.zeros(count, dtype=float)\n \n for ii in range(len(fitsFiles)):\n # Get header info\n hdr = pyfits.getheader(fitsFiles[ii])\n\n airmass[ii] = float(hdr['AIRMASS'])\n\n date = hdr['DATE-OBS'].split('-')\n _year = int(date[0])\n _month = int(date[1])\n _day = int(date[2])\n\n utc = hdr['UTC'].split(':')\n _hour = int(utc[0])\n _minute = int(utc[1])\n _second = int(math.floor(float(utc[2])))\n\n utc = datetime.datetime(_year, _month, _day, _hour, _minute, _second)\n utc2hst = datetime.timedelta(hours=-10)\n hst = utc + utc2hst\n\n year[ii] = hst.year\n month[ii] = hst.month\n day[ii] = hst.day\n hour[ii] = hst.hour\n minute[ii] = hst.minute\n\n # Get the water column in mm of H2O\n water_column[ii] = weather.cso_water_column(_year, _month, _day, \n _hour, _minute)\n\n # Now lets fetch the CFHT weather data\n (temperature, pressure, humidity, wind_speed, wind_dir) = \\\n weather.cfht_weather_data(year, month, day, hour, minute)\n\n # Print out a nicely formatted table\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('Filename', 'Year', 'M', 'D', 'h', 'm', 'AirM', 'H2O', 'Temp', \n 'Press', 'Humi', 'Wind', 'Dir'))\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('HST', '', '', '', '', '', '', 'mm', 'C', 'mbar', '%', 'km/h', 'deg'))\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('--------', '----', '--', '--', '--', '--', '----', '----', '-----', \n '-----', '----', '----', '----'))\n\n for ii in range(len(fitsFiles)):\n print('%-20s %4d %2d %2d %2d %2d ' % \\\n (fitsFiles[ii], year[ii], month[ii], day[ii], hour[ii], minute[ii]),)\n print('%4.2f %4.2f %5.1f %5.1f %4.1f %4.1f %4d' % \\\n (airmass[ii], water_column[ii], temperature[ii], pressure[ii],\n humidity[ii], wind_speed[ii], wind_dir[ii]))\n\n # Print out the average values\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('--------', '----', '--', '--', '--', '--', '----', '----', '-----', \n '-----', '----', '----', '----'))\n print('%-20s %4d %2d %2d %2d %2d ' % \\\n ('Average', year.mean(), month.mean(), day.mean(), hour.mean(), \n minute.mean()),)\n print('%4.2f %4.2f %5.1f %5.1f %4.1f %4.1f %4d' % \\\n (airmass.mean(), water_column.mean(), temperature.mean(), \n pressure.mean(), humidity.mean(), wind_speed.mean(), wind_dir.mean()))\n print('%-20s %4d %2d %2d %2d %2d ' % \\\n ('Std. Dev.', year.std(), month.std(), day.std(), hour.std(), \n minute.std()),)\n print('%4.2f %4.2f %5.1f %5.1f %4.1f %4.1f %4d' % \\\n (airmass.std(), water_column.std(), temperature.std(), \n pressure.std(), humidity.std(), wind_speed.std(), wind_dir.std()))", "def __str__(self):\n #{{{ Nicely print values\n text = 'Null values for databases: %s' % self.dbcentral.list()\n\n for value in self.null_vals.keys():\n text += \"\\t%s: %s\" % (value,self.null_vals[value])\n\n return text", "def formated_print(dic):\n print dic['Year'] + '/' + get_month_number(dic['Month']) + '/' + \\\n dic['Day'] + ' | ' + dic['Hour'] + ':' + dic['Min'] + ':' + \\\n dic['Seg'] + ' | ' + dic['Energy'] + ' Watts'", "def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'", "def __repr__(self):\n string = ''\n for key, val in self.setting().items():\n string += '{}({})\\n'.format(key, val)\n return string", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def __str__(self):\n return f'{self._name} has {self._calories} calories, {self._carbohydrates}' +\\\n f'g. carbohydrates, {self._fat}g. of fat and {self._proteins}g. of proteins'", "def __str__(self) -> str:\n\n return (\n f\"The {self.name} temperature sensor has a value of \"\n f\"{self.value} degrees {self.unit}.\"\n )", "def __str__(self):\n out_str = \"\\n\".join(`\"%.5f, %.5f, %.1f, %s, %s\" % (point[0], point[1], point[2], point[3], point[4])` for point in self.__traectory_list)\n return \"\\'x, y, altitude, capture time, capture date'\\n\"+out_str", "def get_status(self):\n request_format = \"{oscillating:01d} {initialising:01d} {initialised:01d} {width:03d} \" \\\n \"{offset:+04d} {speed:02d} {acceleration:03d} {cycles:05d} {backlash:03d}\"\n status_string = request_format.format(\n oscillating=int(self.device.is_oscillating()),\n initialising=int(self.device.is_initialising()),\n initialised=int(self.device.has_been_initialised()),\n width=int(self.device.get_window_width()),\n offset=int(self.device.get_offset()),\n speed=int(self.device.get_speed()),\n acceleration=int(self.device.get_acceleration()),\n cycles=int(self.device.get_complete_cycles()),\n backlash=int(self.device.get_backlash())\n )\n return status_string", "def __str__(self, output=[]):\n\n output = [['only heat', self.only_heat],\n ] + output\n\n class_str = 'Phonon simulation properties:\\n\\n'\n class_str += super().__str__(output)\n\n return class_str", "def condition(self) -> str:\r\n condition = self._first_timeserie[\"data\"][\"next_1_hours\"][\"summary\"][\r\n \"symbol_code\"\r\n ]\r\n return format_condition(condition)", "def status(self):\n \n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-52s\\n\"\"\"\n # print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n # print 80 * \"-\"\n # print self.get_image()\n if self.cloudserver:\n # let's build the IPs first\n status = self.cloudserver.status\n \n else:\n status = \"OFF\"\n\n res2=\"\"\n ip1 = \"%s:%s\" % (self.networks[0], self.ip_addresses[self.networks[0]])\n if len(self.networks) > 1:\n res2 += \"\\n\"\n for network in self.networks[1:]:\n ipstr = \"%s:%s\" % (network, self.ip_addresses[network])\n res2+=tmpl2 % (\"-\", ipstr)\n # print res2\n # if len(self.ip_addresses.keys()) > 1:\n # ip1 = self.ip_addresses.values()[0]\n res1 = tmpl1 % (self.machine_name, ip1, status)\n return res1 + res2", "def generate_watch_output():\r\n values = H.unicode_string('')\r\n if S.WATCH is None:\r\n return values\r\n for watch_data in S.WATCH:\r\n watch_entry = ''\r\n if watch_data and isinstance(watch_data, dict):\r\n # Whether watch expression is enabled or disabled\r\n if 'enabled' in watch_data.keys():\r\n if watch_data['enabled']:\r\n watch_entry += '|+|'\r\n else:\r\n watch_entry += '|-|'\r\n # Watch expression\r\n if 'expression' in watch_data.keys():\r\n watch_entry += ' \"%s\"' % watch_data['expression']\r\n # Evaluated value\r\n if watch_data['value'] is not None:\r\n watch_entry += ' = ' + generate_context_output(watch_data['value'])\r\n else:\r\n watch_entry += \"\\n\"\r\n values += H.unicode_string(watch_entry)\r\n return values", "def __str__(self):\n state = ''\n state += ' '.join([str(x) for x in self.pos]) + ' '\n state += ''.join([str(x) + ' ' + str(y) + ' ' for x,\n y in zip(self.BU, self.BD)])\n for e in self.BF:\n state += ' '.join([str(x) for x in e])\n state += ' '\n state += ' '.join([str(x) for x in self.LU]) + ' '\n state += ' '.join([str(x) for x in self.LD]) + ' '\n\n return state", "def reaction_str(self):\n\n def format(number):\n return str(number).rstrip(\".0\") + \" \"\n\n reactant_bits = []\n product_bits = []\n for met in sorted(self._metabolites, key=attrgetter(\"id\")):\n coefficient = self._metabolites[met]\n if coefficient >= 0:\n product_bits.append(format(coefficient) + met.id)\n else:\n reactant_bits.append(format(abs(coefficient)) + met.id)\n\n reaction_string = ' + '.join(reactant_bits)\n if self.gapfill_direction == '=':\n reaction_string += ' <=> '\n elif self.gapfill_direction == '<':\n reaction_string += ' <-- '\n elif self.gapfill_direction == '>':\n reaction_string += ' --> '\n reaction_string += ' + '.join(product_bits)\n return reaction_string", "def __repr__(self):\n string = \"Current state: \\n\"\n if self.state[0] == 0: # We're on the left side\n string += \"M: \"\n string += str(self.state[1]).ljust(10)\n string += \"M: \"\n string += str(TOTAL_NO_MISSIONARIES - self.state[1]).ljust(10)\n string += \"\\n\"\n\n string += \"C: \"\n string += str(self.state[2]).ljust(10)\n string += \"C: \"\n string += str(TOTAL_NO_CANNIBALS - self.state[2]).ljust(10)\n string += \"\\n\"\n\n string += \"Boat position: left\\n\"\n else: # We're on the right side\n string += \"M: \"\n string += str(TOTAL_NO_MISSIONARIES - self.state[1]).ljust(10)\n string += \"M: \"\n string += str(self.state[1])\n string += \"\\n\"\n\n string += \"C: \"\n string += str(TOTAL_NO_CANNIBALS - self.state[2]).ljust(10)\n string += \"C: \"\n string += str(self.state[2]).ljust(10)\n string += \"\\n\"\n\n string += \"Boat position: right\\n\"\n string += \"\\n\"\n return string", "def encode(self):\n color_str = []\n if self.brightness is not None:\n color_str.append(f\"brightness:{self.brightness}\")\n if self.hue is not None:\n color_str.append(f\"hue:{self.hue}\")\n if self.saturation is not None:\n color_str.append(f\"saturation:{self.saturation}\")\n if self.kelvin is not None:\n color_str.append(f\"kelvin:{self.kelvin}\")\n\n return ' '.join(color_str)", "def __repr__(self):\n\n repme = \"delta0= {!r}, c_upd= {!r}, n_upd= {!r}, smooth= {!r}\"\\\n .format(self.delta0, self.c_upd, self.n_upd, self.smooth)\n\n return \"WangLandau({!s})\".format(repme)", "def __str__(self):\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s", "def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s", "def device_state_attributes(self) -> str:\n return {\n \"remo_device_id\": self._remo_device.id,\n \"remo_device_name\": self._remo_device.name,\n \"remo_firmware_version\": self._remo_device.firmware_version,\n \"remo_temperature_offset\": self._remo_device.temperature_offset,\n \"remo_humidity_offset\": self._remo_device.humidity_offset\n }", "def describe(self) -> str:\r\n env = str(self.env)\r\n observation_space = str(self.observations)\r\n action_space = str(self.actions)\r\n policy = str(self.policy)\r\n gamma = f\"Gamma({self.gamma})\"\r\n\r\n return env, observation_space, action_space, policy, gamma", "def __str__(self):\n\t\n\t\tresult = \"\"\n\t\tresult += \"Torsional Spring Specs: \\n\"\n\t\tresult += \"Shape Eq. Slope: {0}\\n\".format(str(self.shape_slope))\n\t\tresult += \"Z Thickness: {0}\\n\".format(str(self.z_thick))\n\t\tresult += \"In-Plane Thickness: {0}\\n\".format(str(self.thick))\n\t\tresult += \"Spiral Length: {0}\\n\".format(str(self.length))\n\n\t\treturn result", "def full_broadcast(city_name):\n today_weather(city_name)\n for i in range(1, 4):\n output = (\n (datetime.date.today() + datetime.timedelta(days=i)).strftime(\"%d/%m/%Y\")\n + \"Temperature: \"\n + \"{}\".format(\n get_weather(\n city_name,\n (datetime.date.today() + datetime.timedelta(days=i)).strftime(\n \"%d/%m/%Y\"\n ),\n )\n )\n )\n print(output)", "def __str__(self):\n return f'''\n {super().__str__()}\n Brand: {self._breand}\n Pover: {self._power} (W)\n Nozzle: {self._nozzle} (pieces)\n '''", "def __str__(self) -> str:\n if self.scalar_vector:\n return f\"({self.w:-.4f} {self.x:+.4f}i {self.y:+.4f}j {self.z:+.4f}k)\"\n return f\"({self.x:-.4f}i {self.y:+.4f}j {self.z:+.4f}k {self.w:+.4f})\"", "def __str__(self):\n output = \"There are {!s} cities with {!s} connections in {}.\".format(\n self.__railways.vertex_count(), self.__railways.edge_count(),\n self.__timeTable.split('.')[0])\n return output", "def __repr__(self):\n\n output = list()\n output.append('{resonance_id:6s}'.format(**self.par))\n output.append('{h_larmor_frq:6.1f}'.format(**self.par))\n output.append('{temperature:4.1f}'.format(**self.par))\n output.append('{:10.5f}'.format(self.val))\n output.append('{:10.5f}'.format(self.err))\n\n if self.cal:\n output.append('{:10.5f}'.format(self.cal))\n\n return ' '.join(output)" ]
[ "0.63701195", "0.6170547", "0.6153412", "0.6075571", "0.59051156", "0.5875144", "0.582113", "0.5815576", "0.5801642", "0.5736014", "0.5721063", "0.5704934", "0.56886274", "0.5640063", "0.56320167", "0.5610608", "0.5609194", "0.55756634", "0.5571646", "0.5566451", "0.5524659", "0.5514211", "0.5509786", "0.55024344", "0.54777163", "0.5442844", "0.54310066", "0.5416631", "0.5409348", "0.5381201", "0.53624815", "0.53510803", "0.5347678", "0.5343814", "0.532623", "0.5316304", "0.5313363", "0.53050655", "0.5290525", "0.5289795", "0.52775407", "0.527598", "0.52743644", "0.52648294", "0.5261644", "0.5258245", "0.52527606", "0.5251696", "0.524947", "0.5248626", "0.5247982", "0.52384645", "0.52331436", "0.52323276", "0.52113813", "0.5208211", "0.5205113", "0.5203182", "0.5203057", "0.5200823", "0.5200325", "0.51986223", "0.51972926", "0.5197267", "0.5196014", "0.51855606", "0.51725596", "0.5171399", "0.5168826", "0.5168691", "0.5162104", "0.51562935", "0.51413375", "0.5139421", "0.5138165", "0.5136719", "0.51361555", "0.5128181", "0.51195925", "0.51118", "0.5111716", "0.5110758", "0.510889", "0.51077485", "0.5105787", "0.5104485", "0.51041925", "0.5101914", "0.509739", "0.5094452", "0.5091404", "0.50840515", "0.508364", "0.5078986", "0.5077018", "0.50751925", "0.5070547", "0.5070371", "0.5068583", "0.5067776" ]
0.79582655
0
A generator function for looping through various times.
def time_iterator( *, first_time: datetime.datetime, last_time: datetime.datetime, resolution: int, timezone: datetime.timezone, ) -> Generator[datetime.datetime, None, None]: current_time = first_time while current_time < last_time: yield current_time.replace(tzinfo=timezone) current_time += relativedelta( hours=resolution // 3600, minutes=(resolution // 60) % 60, seconds=resolution % 60, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def times(self):\n \n class IterTimes:\n def __init__(self, st):\n self.c = 0\n self.st = st\n \n def __iter__(self):\n return self\n \n def next(self):\n t = self.st.time(self.c)\n if t == None or self.c == self.st.longitud:\n raise StopIteration\n else:\n self.c += 1\n return t\n \n return IterTimes(self)", "def exercise_gen(ret_val, times):", "def time_processor(self):\n while True:\n rexp = (yield)\n self.time = float(rexp.group(1))\n self.time_str = rexp.group(1)\n # Reset subIteration counters\n for k in self.subiter_map:\n self.subiter_map[k] = 0\n self._tick = False", "def getTimes():", "def getTimes():", "def getTimes():", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def _fill_time(beats, shard, *args, **kwargs):\n total = 0\n for delay in shard(*args, **kwargs):\n total += delay.count\n yield delay\n yield Beat(beats - total)", "def get_time(self):\n for _ in range(5):\n current_time = datetime.datetime.now().strftime('%H:%M:%S')\n self.logger.info(current_time)\n num: int = self.rand_num_gen.get_number()\n display = current_time + f\": {num}\\n\"\n yield display.encode()\n sleep(1)\n try:\n raise RuntimeError(\"something unexpected happened!\")\n except:\n trace_info = traceback.format_exc()\n self.logger.error(\"uncaught exception: %s\", trace_info)\n yield trace_info", "def countdown():\n for i in range(100, 0, -1):\n yield i", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def random_values():\n while True:\n yield random()", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2", "def data_repeated(data):\n\n def gen(count):\n for _ in range(count):\n yield data\n\n yield gen", "def counter_wrapper(generator):\n for value in generator:\n yield value", "def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item", "def __iter__(self):\n\n # Wanted to do this, but count() only accepts numbers:\n # return count(start, timedelta(days=self.step))\n\n next = self.start\n while (True):\n yield next\n next = next + timedelta(days=self.step)", "def multiple_gen(modulus):\n count = 1\n while True:\n yield modulus * count\n count += 1", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def counter_wrapper_2(generator):\n yield from generator", "def __iter__(self):\n start_times = (start for start, end in self.tss)\n names = (name.rstrip() for name in self.inps)\n for ind, (c, t) in enumerate(zip(names, start_times)):\n yield (c, t, ind)", "def repeat(obj, times=None):\n if times is None:\n return Iter(itertools.repeat(obj))\n return Iter(itertools.repeat(obj, times))", "def __iter__(self):\n if len(self) == 0:\n return\n current = self.first_timestamp\n delta = datetime.timedelta(0, self.interval)\n while current <= self.last_timestamp:\n yield current\n current += delta", "def timeit_context() -> Generator:\n result = TimeItResult()\n started_time = time.time()\n try:\n yield result\n finally:\n result.time_passed = time.time() - started_time", "def static_trajectory(Tinit, n):\n for i in xrange(n):\n yield Tinit", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def customer_generator(env, inventory_stock):\n for i in itertools.count():\n yield env.timeout(random.randint(*T_INTER))\n env.process(customer(env, inventory_stock, 'Customer_'+str(i+1)))", "def itime(iterable, seconds):\n items = iter(iterable)\n\n end = time.time() + seconds\n yield items.next()\n\n for item in itertools.takewhile(lambda _: time.time() < end, items):\n yield item", "def counter():\n for value in range(5):\n yield \"<{}>\".format(value)", "def test_generator_method_name(self):\n for i in range(0, 4):\n yield 'try_odd', i", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def times(self, fn):\n for i in range(0, self._):\n fn()\n return self", "def simple():\n yield 1\n yield 2\n yield 3", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def wait(self, fps=2):\n i = 1\n t = clock()\n while 1:\n free_count = 0\n skip = 0\n while free_count == 0:\n while clock()-t < float(i)/fps:\n free_count += 1\n if free_count == 0:\n i += 1\n skip += 1\n yield i-1, skip\n i += 1", "def generator_fn():\n for thumbs, counts in data_dir.hotspot_data(num_timestamps=num_timestamps):\n for thumb, count in zip(thumbs, counts):\n yield thumb, count", "def source_deterministic(env, number, interval, counter):\n for i in range(number):\n bus_arrival = 12.0 # deterministic - fixed frequency\n c = customer(env, 'Customer_%02d' % (i + 1), counter, time_in_stop=bus_arrival)\n env.process(c)\n t = random.expovariate(1.0 / interval)\n yield env.timeout(t)", "def cycle(obj):\r\n while True:\r\n for item in obj:\r\n yield item", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def times(self):\n return list(range(self._max_time + 1))", "def test_func_generator():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i", "def step_sequence_gen(track, click=False, fillvalue=0.0, t=None, srate=None):\n if t is None:\n t = time_gen(srate=srate)\n else:\n t = iter(t)\n t0 = next(t)\n t1 = t0\n duration = 1\n for tple in track:\n if hasattr(tple,'__getitem__'):\n value = tple[0]\n if len(tple) > 1:\n duration = tple[1]\n else:\n value = tple\n while t0 + duration > t1:\n yield value\n if click:\n value = fillvalue\n t1 = next(t)\n t0 = t1", "def record_time(times, enabled, *args):\n if not enabled:\n yield\n else:\n start = time.time()\n yield\n end = time.time()\n times.append((' '.join(args), start, end))", "def test_func_generator_name():\n for i in range(0, 4):\n yield 'try_odd', i", "def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield", "def _ids(self):\n prev_values = set()\n while True:\n next_value = self._time_ns()\n while True:\n if next_value not in prev_values:\n break\n next_value += 1000\n prev_values.add(next_value)\n yield next_value", "def repeatfunc(cls, func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def __iter__(self):\n self._loop_idx = 0\n self._target_time = time.time()\n return self", "def inference_generator(env, storage, pipe, arrival_rate):\n\n global num_clients, trace, last_inf_times, request_times\n for i in itertools.count():\n random_request_time = random.expovariate(arrival_rate)\n cumulative_request_time = last_inf_times + random_request_time\n last_inf_times = cumulative_request_time\n request_times.append(cumulative_request_time)\n yield env.timeout(random_request_time)\n num_clients +=1\n d = {'idx' : num_clients, 'request_time' : env.now}\n pipe.put(d)", "def timed(name):\n t0 = time.time()\n yield\n t1 = time.time()\n print(\"..%-24s: %8.4f\" % (name, t1 - t0))", "def iterate(self, start, end):\n if not self.db:\n self.db = self.get_db()\n\n p = start[:-4]\n s = struct.unpack(\"!L\", start[-4:])[0]\n e = struct.unpack(\"!L\", end[-4:])[0]\n time.sleep(self.READ_DELAY)\n while s <= e:\n v = random.random() * 100\n yield p + struct.pack(\"!L\", s), struct.pack(\"!d\", v)\n s += self.STEP", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat", "def repeat(value: T, times: int) -> List[T]:\n return [value] * times", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def stream():\n while True:\n yield random_point()", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def comp_pattern_generator(iterable):\n for chord in iterable:\n yield (600, chord)", "async def sleep_generator(pos: int):\n if pos <= 0:\n raise ValueError(f\"none_zero must be some positive integer, got {pos}\")\n scale = 1 / pos / 10\n for i in range(pos):\n t = scale * (pos - i)\n LOGGER.info(f\"Number {i}, going to sleep {t} s\")\n await asyncio.sleep(t)\n LOGGER.info(f\"Number {i}, slept {t} s\")\n yield i", "def floating_point_generator():\n i = 0\n while True:\n yield str((i % 5) * 1.1)\n i += 1", "def make_iter(capture, channel):\n\n def cycle():\n threading.Timer(INTERVAL, cycle).start()\n publish_frame(capture, channel)\n\n return cycle", "def test_generator_inline(self):\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i", "def numbers():\n for number in range(1, 76):\n yield number", "def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1", "def _exponential_timeout_generator(initial, maximum, multiplier, deadline):\n if deadline is not None:\n deadline_datetime = (\n datetime_helpers.utcnow() +\n datetime.timedelta(seconds=deadline))\n else:\n deadline_datetime = datetime.datetime.max\n\n timeout = initial\n while True:\n now = datetime_helpers.utcnow()\n yield min(\n # The calculated timeout based on invocations.\n timeout,\n # The set maximum timeout.\n maximum,\n # The remaining time before the deadline is reached.\n float((deadline_datetime - now).seconds))\n timeout = timeout * multiplier", "def timer():\n start = time.time()\n\n yield\n\n end = time.time()\n\n print('Elapsed: {:.2f}s'.format(end - start))", "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def changefreqs_generator():\n changefreqs = ['weekly', 'daily']\n yield from itertools.chain(changefreqs, itertools.repeat('monthly'))", "def sleeper(self):\n for waittime in (.01, .02, .05, .1, .2, .5):\n yield waittime\n while True:\n waittime = min(waittime + .2, 5)\n yield waittime", "def __iter__(self): # for each frequency, yield a QTile\n for freq in self._iter_frequencies():\n yield snrQTile(self.q, freq, self.duration, self.sampling,\n mismatch=self.mismatch, shift = self.freq_timeshift(freq))", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def generate(th1):\n pro = 1\n while True:\n if th1.is_alive() and pro < 99:\n pro = pro + 1\n time.sleep(1)\n yield \"data:\" + str(pro) + \"\\n\\n\"\n else:\n if not th1.is_alive():\n yield \"data:\" + str(100) + \"\\n\\n\"", "def convergence_processor(self):\n while True:\n rexp = (yield)\n self.converged = True\n self.converged_time = int(rexp.group(2))", "def cycle(start, times):\n current_gen = start\n for _ in range(times):\n next_gen = defaultdict(int)\n all_locs = get_all_neighbors(current_gen.keys())\n all_locs.update(current_gen.keys())\n for loc in all_locs:\n neighbors = get_neighbors(loc)\n count = sum(current_gen[n] for n in neighbors)\n if count in (2, 3) and current_gen[loc] == 1:\n next_gen[loc] = 1\n elif count == 3 and current_gen[loc] == 0:\n next_gen[loc] = 1\n current_gen = next_gen\n return current_gen", "def color_cycle():\n while True:\n for color in colors:\n yield color", "def gen_sequence(a, b, c):\n i = 1\n while True:\n yield a * i**2 + b * i + c\n i += 1", "def test_nested_gen(n):\n for a in range(n):\n yield (b for b in range(a))", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def __iter__(self):\n for run in self.runs:\n yield run", "def timer(description):\n t0 = time.time()\n yield\n print(f'[{description}] done in {time.time() - t0:.0f} s')", "def static(fps, duration):\n\n frames = int(duration * fps)\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing, frames)\n return animate", "def iterwhite():\n while True:\n for n in rng.randn(100):\n yield n", "def gen_next_time(intervals, start_time=[6, 0, 0], end_time=[23, 0, 0]):\n now = datetime.now()\n year = now.year\n month = now.month\n day = now.day\n\n starttime, endtime = gen_start_end_times(\n start_time=start_time, end_time=end_time\n )\n\n next_datetime = starttime\n\n while next_datetime < endtime:\n\n if next_datetime < now:\n while next_datetime < now:\n next_datetime += timedelta(seconds=intervals)\n else:\n next_datetime += timedelta(seconds=intervals)\n\n yield next_datetime", "def very_simple():\n yield 1", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def perf_timer():\n start_time = datetime.now()\n yield\n end_time = datetime.now()\n log.info(end_time - start_time)", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def __iter__(self):\n for x in self.seq: yield x", "def get_times(self):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def __iter__(self):\n yield from self.gen", "def __iter__(self):\n for value in dict.__iter__(self):\n for count in range(self[value]):\n yield value", "def ticker_generator():\n return (v for v in load_equities().values)", "def id_generator():\n\t\tcount = 0\n\t\twhile True:\n\t\t\tyield count\n\t\t\tcount += 1", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def tempo_gen(tempo=120, srate=None):\n dt = tempo / (60 * get_srate(srate))\n return (i * dt for i in count())" ]
[ "0.7211171", "0.7095168", "0.676974", "0.66167104", "0.66167104", "0.66167104", "0.6530617", "0.6517885", "0.64912164", "0.64652574", "0.6435925", "0.64267135", "0.6413881", "0.6370479", "0.6354844", "0.63290936", "0.62823224", "0.62550485", "0.6254462", "0.62441665", "0.622235", "0.62171364", "0.617532", "0.6170421", "0.61660296", "0.61528873", "0.61487913", "0.6146654", "0.6145423", "0.6127929", "0.60942054", "0.6067145", "0.60662895", "0.6037557", "0.60282916", "0.6019171", "0.6007657", "0.59970874", "0.59839386", "0.5971751", "0.5954244", "0.59488374", "0.5941522", "0.5941312", "0.59118944", "0.59109956", "0.5903925", "0.5903162", "0.5903144", "0.5878092", "0.5861819", "0.5843706", "0.58397096", "0.58381337", "0.5837873", "0.5834509", "0.5834459", "0.582228", "0.58073133", "0.58073133", "0.5792741", "0.5778669", "0.57713056", "0.5745666", "0.57446265", "0.5716468", "0.5712249", "0.5707731", "0.5706086", "0.5701153", "0.57005215", "0.5700209", "0.5698443", "0.5696942", "0.5692017", "0.56857663", "0.56822", "0.5679249", "0.5674118", "0.567319", "0.567132", "0.56678385", "0.5667513", "0.56622714", "0.5651936", "0.5649722", "0.5646785", "0.56455207", "0.5645516", "0.5632664", "0.5632296", "0.56309676", "0.56276304", "0.56180185", "0.56177086", "0.5616168", "0.56055856", "0.5590438", "0.558428", "0.55803823" ]
0.597769
39
Names of endogenous variables
def endog_names(self): return self.data.ynames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMeteorologicalVariableNames(self, product):\r\n return []", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def getOthVarNames( self ):\n\n if self.othVarNames:\n return self.othVarNames.keys()\n\n n = self.adb.get( \"nOthVars\" )\n for indx in range( n ):\n name = self.adb.get( \"othVarName\",\n indx ) \n self.othVarNames[ name ] = indx\n\n return self.othVarNames.keys()", "def getVariableNames(self, product):\r\n\r\n variable_names = self.getDataVariableNames(product) + \\\r\n self.getMaskVariableNames(product) + \\\r\n self.getMeteorologicalVariableNames(product) + \\\r\n self.getSensorVariableNames(product) + \\\r\n self.getInfoVariableNames(product)\r\n\r\n return variable_names", "def get_all_variables_names(self):\n return self.project.get_variable_names() + self.design.get_variable_names()", "def get_variable_names(self):\n return [var[1] for var in self.variables]", "def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)", "def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def getInfoVariableNames(self, product):\r\n return []", "def getOeiVarNames( self ):\n\n if self.oeiVarNames:\n return self.oeiVarNames.keys()\n\n n = self.adb.get( \"nOeiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oeiVarName\",\n indx ) \n self.oeiVarNames[name] = indx\n\n return self.oeiVarNames.keys()", "def getOqiVarNames( self ):\n\n if self.oqiVarNames:\n return self.oqiVarNames.keys()\n\n n = self.adb.get( \"nOqiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oqiVarName\",\n indx ) \n self.oqiVarNames[name] = indx\n\n return self.oqiVarNames.keys()", "def get_variable_names(self):\n return [VariableString(s) for s in\n self._design.GetVariables()+self._design.GetPostProcessingVariables()]", "def getSensorVariableNames(self, product):\r\n return []", "def get_variables(self):\n return [self.g_t, self.m_t]", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def varNames(self):\n return self.__varNames", "def getDataVariableNames(self, product):\r\n return []", "def getSensorVariableNames(self, product):\r\n\r\n return []", "def getOhcVarNames( self ):\n\n if self.ohcVarNames:\n return self.ohcVarNames.keys()\n \n n = self.adb.get( \"nOhcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ohcVarName\",\n indx ) \n self.ohcVarNames[name] = indx\n\n return self.ohcVarNames.keys()", "def variables(names, **kwargs):\n return symbols(names, cls=Variable, seq=True, **kwargs)", "def name(self):\n base_str = 'd{}{}_'.format(self.derivative_count if\n self.derivative_count > 1 else '', self.expr)\n for var, count in self.variable_count:\n base_str += 'd{}{}'.format(var, count if count > 1 else '')\n return base_str", "def getMeteorologicalVariableNames(self, product):\r\n\r\n meteorological_variable_names = []\r\n\r\n return meteorological_variable_names", "def get_all_variables(self):\n return self.start.get_all_variables() + self.end.get_all_variables()", "def variables(self):\n return ()", "def variables(self):\r\n return self.get_field('variable')", "def variables(self):\n return {u for u in self if u.type == 'var'}", "def get_variable_names(self):\n varNames = []\n for var in self.variables:\n # EstimationVariable\n varNames.append(var.name)\n return varNames", "def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()", "def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]", "def getOfcVarNames( self ):\n\n if self.ofcVarNames:\n return self.ofcVarNames.keys()\n \n n = self.adb.get( \"nOfcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ofcVarName\",\n indx ) \n self.ofcVarNames[name] = indx\n\n return self.ofcVarNames.keys()", "def get_layer_var_names(self):\n return(self.params)", "def getOriVarNames( self ):\n\n if self.oriVarNames:\n return self.oriVarNames.keys()\n\n n = self.adb.get( \"nOriVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oriVarName\",\n indx ) \n self.oriVarNames[name] = indx\n\n return self.oriVarNames.keys()", "def vars(cls):\n raise NotImplementedError(\"This is an abstract function that needs to be implemented for each value function\")", "def printing_vars(self):\n print(\"Name is \", self.name)", "def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars", "def data_variable_names(self):\n data_names = []\n mesh = self.mesh_names()\n prefix = mesh[0]+'_'\n for vname in self.nc.variables.keys():\n if vname.startswith(prefix):\n if self.nc.dimensions.has_key(vname):\n continue\n if hasattr(self.nc.variables[vname],'cf_role'):\n continue\n data_names.append( vname[len(prefix):] )\n return data_names", "def names():\n pass", "def vars(self):\n return self.v", "def var(self, name):\n raise NotImplementedError", "def getSolutionExportVariableNames(cls):\n return {}", "def get_element_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_elem_var\"][:]]", "def variable_names(self):\n\n status, stdout, stderr = self.__xcall__(['--print-variables'])\n\n if status != 0:\n raise RuntimeError(\"error querying --print-variables for package `%s': %s\" % (self.name, stderr))\n\n return stdout.strip().split()", "def __setVarNames(self):\n result = set()\n\n # detecting variables\n for templatePart in self.inputString().split(\"{\"):\n if templatePart is '' or \"}\" not in templatePart:\n continue\n\n endIndex = templatePart.find('}')\n result.add(templatePart[:endIndex])\n\n self.__varNames = list(result)", "def variable(self):", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def var_names(self):\n return self._var_names", "def get_all_variables(self):\n raise NotImplementedError()", "def variables_declared (self) :\r\n\t\tresult = {}\r\n\r\n\t\tfor var in self.variables :\r\n\t\t\tresult[var.name.upper()] = var\r\n\t\t\r\n\t\treturn result", "def __str__(self):\n return f\"Variable(type={self._type}, id={self._id}, value={self.status}, init={self.init})\"", "def nvar(self):\n return len(self.__vars)", "def get_vars(self):\n return [self.mu, self.var]", "def _variable_types(self):\n return self._variable_single_types + self._variable_array_types", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def variables(self) -> OrderedDict:\n pass", "def r_vars(size, used=None):\n return r_symbols(size, VAR_SYMBOLS, ARGS.variable_length, used)", "def give_variables_names(variables):\r\n names = map(lambda var: var.name, variables)\r\n h = hist(names)\r\n bad_var = lambda var: not var.name or h[var.name] > 1\r\n\r\n for i, var in enumerate(filter(bad_var, variables)):\r\n var.name = (var.name or \"\") + \"_%d\" % i\r\n\r\n if not unique(map(str, variables)):\r\n raise ValueError(\"Not all variables have unique names.\"\r\n \"Maybe you've named some of the variables identically\")\r\n\r\n return variables", "def get_node_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_nod_var\"][:]]", "def variables(self):\n return self._.vars", "def getVariables(self)->Dict[str,str]:\n pass", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)", "def getDataVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_60m)", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def stats_variable_names(res):\n def varname(s):\n pos = s.find(':')\n return s if pos==-1 else s[0:pos]\n return set( [ varname(key) for key in res.keys()] )", "def get_variable_full_name(var):\n if var._save_slice_info:\n return var._save_slice_info.full_name\n else:\n return var.op.name", "def generate_variable_names():\n while True:\n name = uuid.uuid4()\n yield f\"_{name.hex}\"", "def variables(self):\n return [term.variable for term in self.terms]", "def print_all_variables():\n for idx, v in enumerate(tf.all_variables()):\n print(\" var %d: %s %s\" % (idx, v.get_shape(), v.name))", "def get_output_names(self):\n outputNames = []\n for outVar in self.outputs:\n # outVar is of type InOutVar and the object that it contains is a PyFMI variable\n outputNames.append(outVar.get_object().name)\n return outputNames", "def names(self):\n\t\treturn", "def variables(self):\n return tuple(flatten([a.variables for a in self.args]))", "def visit_varname(self, node, children):\n # Return only dict nodes\n return {'type':'var','val':str(node)}", "def getOsiVarNames( self ):\n\n if self.osiVarNames:\n return self.osiVarNames.keys()\n \n n = self.adb.get( \"nOsiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"osiVarName\",\n indx ) \n self.osiVarNames[ name ]= indx\n\n return self.osiVarNames.keys()", "def get_all_variables(self):\n return []", "def show_variables(self):\r\n\r\n variablelist = [(x_temp,self.variables[x_temp]) for x_temp in sorted(self.variables.keys())]\r\n display.noteprint(('/C/ '+labels.VARIABLES.upper(), EOL.join([x_temp[0]+BLANK\r\n +COLON+BLANK\r\n +abridge(str(x_temp[1]),40)\r\n for x_temp in variablelist])))", "def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name", "def names(self):\n return self.dark_name, self.light_name", "def variables_used (self) :\r\n\t\treturn []", "def vars(self) -> {(EVar, Pool)}:\n raise NotImplementedError()", "def var():\n def _var(quoted_name):\n name = quoted_name.subexpression.name\n if (value := get_name(name)) is not None:\n return value\n else:\n raise TypeError(f\"Binding {name} not found\")\n yield (\"(λ &[name] . any)\", _var)", "def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]", "def get_variables(self) -> np.array:\n pass", "def variables(self):\n return self._variablesDef", "def _get_vars(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle': 'var b1=n.round(t,2);',\n 'square': 'var b1=n.round(t,2);',\n 'diamond': 'var b1=n.round(t*1.3,2);',\n 'hexagram': 'var b1=n.round(t,2);var b2=n.round(t/2,2);var b3=n.round(t*Math.sqrt(3)/2,2);'\n }[symbol]\n return {\n 37: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 38: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 39: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 40: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 34: 'var d1=n.round(t,2);',\n 33: 'var d1=n.round(t*1.4,2);',\n 35: 'var d1=n.round(t*1.2,2);var d2=n.round(t*0.85,2);',\n 36: 'var d1=n.round(t/2,2);var d2=n.round(t,2);'\n }[symbol]", "def get_variables_by_name(given_name, scope=None):\n suffix = '/' + given_name + ':|^' + given_name + ':'\n return get_variables(scope=scope, suffix=suffix)", "def variables(self) -> OrderedDict:\n return OrderedDict({'mu': self.mu, 'sig': self.sig})", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def _var_name_generator():\n count = itertools.count()\n while True:\n yield '_var_' + str(count.next())", "def variable_string(self, name):\n return \"$(\" + name + \")\"", "def __str__(self):\n return '{}.{} >> {}'.format(self.scope, self.name,\n '/'.join(map(str, self.variables)))", "def put_var_names(self, var_type, num_vars, var_names):\n if var_type.upper() not in EX_VAR_TYPES:\n raise ExodusIIWriterError(\n \"var_type {0} not recognized\".format(var_type))\n # var names must all be of same length due to Fortran restrictions\n var_names = [\"{0:{1}s}\".format(x, MAX_STR_LENGTH)[:MAX_STR_LENGTH]\n for x in var_names]\n ierr = exolib.py_expvan(self.exoid, var_type.lower(), var_names)\n if ierr:\n raise ExodusIIWriterError(\"Error putting var names\")", "def getMaskVariableNames(self, product):\r\n return []", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def get_variables(self):\n\t\treturn self.variables", "def _setup_special_names(self):\n special_names = []\n dynamic_params = tuple(set(self._fget_params_list + self._fset_params_list))\n # Check whether class variables of DynamicProperty type are present\n for attr_name, attr in getmembers(self.__class__):\n if isinstance(attr, DynamicProperty):\n special_names += [attr_name + \"_\" + key for key in dynamic_params]\n # Check if special variables are defined at class level\n for attr, value in getmembers(self.__class__):\n if attr in special_names:\n # Copy class special variable at instance level, prefixing reserved_prefix\n setattr(self, self.__reserved_prefix + attr, value)\n return special_names" ]
[ "0.64263195", "0.6372972", "0.63582456", "0.6340898", "0.6314623", "0.625726", "0.62262017", "0.62185633", "0.61736417", "0.61590856", "0.61525357", "0.61390096", "0.6113262", "0.6112568", "0.6102501", "0.60962504", "0.6091112", "0.6083438", "0.6079277", "0.6068774", "0.60612804", "0.6044552", "0.60366374", "0.60131043", "0.6000316", "0.5997138", "0.5987385", "0.59729964", "0.5970381", "0.59599495", "0.5950197", "0.5948254", "0.59304345", "0.5922799", "0.5919998", "0.58687663", "0.5864936", "0.58380824", "0.5838037", "0.5830587", "0.58292884", "0.5806649", "0.57983327", "0.5759079", "0.57569134", "0.57284945", "0.5725828", "0.5725764", "0.5720664", "0.56934005", "0.56866896", "0.5686396", "0.5668344", "0.56661433", "0.5649939", "0.56449616", "0.5643111", "0.56412464", "0.56224203", "0.5595914", "0.55921084", "0.5589689", "0.5589689", "0.5589689", "0.5589689", "0.5586577", "0.55528677", "0.553553", "0.5533948", "0.5532035", "0.55314094", "0.55286884", "0.5528151", "0.55280584", "0.55261135", "0.5524591", "0.55244446", "0.5521872", "0.551207", "0.5510684", "0.550892", "0.5508908", "0.55034995", "0.55014247", "0.55012506", "0.5497999", "0.5497491", "0.54862165", "0.54826796", "0.5480371", "0.5475101", "0.5465996", "0.54585564", "0.5448978", "0.54481894", "0.54480624", "0.54462653", "0.5445449", "0.5444748", "0.5440367" ]
0.6065987
20
Names of exogenous variables
def exog_names(self): return self.data.xnames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def get_all_variables_names(self):\n return self.project.get_variable_names() + self.design.get_variable_names()", "def get_variable_names(self):\n return [var[1] for var in self.variables]", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def get_variable_names(self):\n varNames = []\n for var in self.variables:\n # EstimationVariable\n varNames.append(var.name)\n return varNames", "def get_variable_names(self):\n return [VariableString(s) for s in\n self._design.GetVariables()+self._design.GetPostProcessingVariables()]", "def getVariableNames(self, product):\r\n\r\n variable_names = self.getDataVariableNames(product) + \\\r\n self.getMaskVariableNames(product) + \\\r\n self.getMeteorologicalVariableNames(product) + \\\r\n self.getSensorVariableNames(product) + \\\r\n self.getInfoVariableNames(product)\r\n\r\n return variable_names", "def getOthVarNames( self ):\n\n if self.othVarNames:\n return self.othVarNames.keys()\n\n n = self.adb.get( \"nOthVars\" )\n for indx in range( n ):\n name = self.adb.get( \"othVarName\",\n indx ) \n self.othVarNames[ name ] = indx\n\n return self.othVarNames.keys()", "def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names", "def variables(self):\n return {u for u in self if u.type == 'var'}", "def getOeiVarNames( self ):\n\n if self.oeiVarNames:\n return self.oeiVarNames.keys()\n\n n = self.adb.get( \"nOeiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oeiVarName\",\n indx ) \n self.oeiVarNames[name] = indx\n\n return self.oeiVarNames.keys()", "def getInfoVariableNames(self, product):\r\n return []", "def variables(names, **kwargs):\n return symbols(names, cls=Variable, seq=True, **kwargs)", "def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]", "def getOqiVarNames( self ):\n\n if self.oqiVarNames:\n return self.oqiVarNames.keys()\n\n n = self.adb.get( \"nOqiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oqiVarName\",\n indx ) \n self.oqiVarNames[name] = indx\n\n return self.oqiVarNames.keys()", "def getMeteorologicalVariableNames(self, product):\r\n return []", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def varNames(self):\n return self.__varNames", "def getOhcVarNames( self ):\n\n if self.ohcVarNames:\n return self.ohcVarNames.keys()\n \n n = self.adb.get( \"nOhcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ohcVarName\",\n indx ) \n self.ohcVarNames[name] = indx\n\n return self.ohcVarNames.keys()", "def getSensorVariableNames(self, product):\r\n return []", "def getSensorVariableNames(self, product):\r\n\r\n return []", "def variable_names(self):\n\n status, stdout, stderr = self.__xcall__(['--print-variables'])\n\n if status != 0:\n raise RuntimeError(\"error querying --print-variables for package `%s': %s\" % (self.name, stderr))\n\n return stdout.strip().split()", "def give_variables_names(variables):\r\n names = map(lambda var: var.name, variables)\r\n h = hist(names)\r\n bad_var = lambda var: not var.name or h[var.name] > 1\r\n\r\n for i, var in enumerate(filter(bad_var, variables)):\r\n var.name = (var.name or \"\") + \"_%d\" % i\r\n\r\n if not unique(map(str, variables)):\r\n raise ValueError(\"Not all variables have unique names.\"\r\n \"Maybe you've named some of the variables identically\")\r\n\r\n return variables", "def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)", "def getSolutionExportVariableNames(cls):\n return {}", "def data_variable_names(self):\n data_names = []\n mesh = self.mesh_names()\n prefix = mesh[0]+'_'\n for vname in self.nc.variables.keys():\n if vname.startswith(prefix):\n if self.nc.dimensions.has_key(vname):\n continue\n if hasattr(self.nc.variables[vname],'cf_role'):\n continue\n data_names.append( vname[len(prefix):] )\n return data_names", "def getOriVarNames( self ):\n\n if self.oriVarNames:\n return self.oriVarNames.keys()\n\n n = self.adb.get( \"nOriVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oriVarName\",\n indx ) \n self.oriVarNames[name] = indx\n\n return self.oriVarNames.keys()", "def get_element_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_elem_var\"][:]]", "def getDataVariableNames(self, product):\r\n return []", "def get_variables(self):\n return [self.g_t, self.m_t]", "def name(self):\n base_str = 'd{}{}_'.format(self.derivative_count if\n self.derivative_count > 1 else '', self.expr)\n for var, count in self.variable_count:\n base_str += 'd{}{}'.format(var, count if count > 1 else '')\n return base_str", "def getVariableInfo(self, variables, name):\r\n\r\n return [var.return_variable_dict() for var in variables if var.name == name][0]", "def getOsiVarNames( self ):\n\n if self.osiVarNames:\n return self.osiVarNames.keys()\n \n n = self.adb.get( \"nOsiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"osiVarName\",\n indx ) \n self.osiVarNames[ name ]= indx\n\n return self.osiVarNames.keys()", "def getOfcVarNames( self ):\n\n if self.ofcVarNames:\n return self.ofcVarNames.keys()\n \n n = self.adb.get( \"nOfcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ofcVarName\",\n indx ) \n self.ofcVarNames[name] = indx\n\n return self.ofcVarNames.keys()", "def vars(cls):\n raise NotImplementedError(\"This is an abstract function that needs to be implemented for each value function\")", "def variables(self):\r\n return self.get_field('variable')", "def get_layer_var_names(self):\n return(self.params)", "def var(self, name):\n raise NotImplementedError", "def __setVarNames(self):\n result = set()\n\n # detecting variables\n for templatePart in self.inputString().split(\"{\"):\n if templatePart is '' or \"}\" not in templatePart:\n continue\n\n endIndex = templatePart.find('}')\n result.add(templatePart[:endIndex])\n\n self.__varNames = list(result)", "def var_names(self):\n return self._var_names", "def _variable_types(self):\n return self._variable_single_types + self._variable_array_types", "def getMeteorologicalVariableNames(self, product):\r\n\r\n meteorological_variable_names = []\r\n\r\n return meteorological_variable_names", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def variables_declared (self) :\r\n\t\tresult = {}\r\n\r\n\t\tfor var in self.variables :\r\n\t\t\tresult[var.name.upper()] = var\r\n\t\t\r\n\t\treturn result", "def stats_variable_names(res):\n def varname(s):\n pos = s.find(':')\n return s if pos==-1 else s[0:pos]\n return set( [ varname(key) for key in res.keys()] )", "def get_input_names(self):\n inputNames = []\n for inVar in self.inputs:\n # inVar is of type InOutVar and the object that it contains is a PyFMI variable\n inputNames.append(inVar.get_object().name)\n return inputNames", "def put_var_names(self, var_type, num_vars, var_names):\n if var_type.upper() not in EX_VAR_TYPES:\n raise ExodusIIWriterError(\n \"var_type {0} not recognized\".format(var_type))\n # var names must all be of same length due to Fortran restrictions\n var_names = [\"{0:{1}s}\".format(x, MAX_STR_LENGTH)[:MAX_STR_LENGTH]\n for x in var_names]\n ierr = exolib.py_expvan(self.exoid, var_type.lower(), var_names)\n if ierr:\n raise ExodusIIWriterError(\"Error putting var names\")", "def printing_vars(self):\n print(\"Name is \", self.name)", "def _setup_special_names(self):\n special_names = []\n dynamic_params = tuple(set(self._fget_params_list + self._fset_params_list))\n # Check whether class variables of DynamicProperty type are present\n for attr_name, attr in getmembers(self.__class__):\n if isinstance(attr, DynamicProperty):\n special_names += [attr_name + \"_\" + key for key in dynamic_params]\n # Check if special variables are defined at class level\n for attr, value in getmembers(self.__class__):\n if attr in special_names:\n # Copy class special variable at instance level, prefixing reserved_prefix\n setattr(self, self.__reserved_prefix + attr, value)\n return special_names", "def get_vars(self):\n return [self.mu, self.var]", "def names():\n pass", "def use_vars():\n obj = Subclass(\"Austin\", \"Red\")\n print(vars(obj))\n print(obj.__dict__)\n if vars(obj) == obj.__dict__:\n print(True)\n print(type(vars(obj)))\n \"\"\"This throws a TypeError because built-in types do not have a __dict__ attribute.\"\"\"\n # print(vars(0))", "def vars(self):\n return self.v", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars", "def get_input_var_names(self):\n return self._input_var_names", "def variables(self):\n return self._.vars", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def get_all_variables(self):\n raise NotImplementedError()", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()", "def get_output_names(self):\n outputNames = []\n for outVar in self.outputs:\n # outVar is of type InOutVar and the object that it contains is a PyFMI variable\n outputNames.append(outVar.get_object().name)\n return outputNames", "def variables(self) -> OrderedDict:\n return OrderedDict({'mu': self.mu, 'sig': self.sig})", "def vars(self) -> {(EVar, Pool)}:\n raise NotImplementedError()", "def var():\n def _var(quoted_name):\n name = quoted_name.subexpression.name\n if (value := get_name(name)) is not None:\n return value\n else:\n raise TypeError(f\"Binding {name} not found\")\n yield (\"(λ &[name] . any)\", _var)", "def _get_vars(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle': 'var b1=n.round(t,2);',\n 'square': 'var b1=n.round(t,2);',\n 'diamond': 'var b1=n.round(t*1.3,2);',\n 'hexagram': 'var b1=n.round(t,2);var b2=n.round(t/2,2);var b3=n.round(t*Math.sqrt(3)/2,2);'\n }[symbol]\n return {\n 37: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 38: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 39: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 40: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 34: 'var d1=n.round(t,2);',\n 33: 'var d1=n.round(t*1.4,2);',\n 35: 'var d1=n.round(t*1.2,2);var d2=n.round(t*0.85,2);',\n 36: 'var d1=n.round(t/2,2);var d2=n.round(t,2);'\n }[symbol]", "def getMaskVariableNames(self, product):\r\n return []", "def variables(self):\n return [term.variable for term in self.terms]", "def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]", "def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name", "def variables(model: Model) -> AbstractSet[str]:\r\n assert is_model(model)\r\n return model.keys()", "def print_all_variables():\n for idx, v in enumerate(tf.all_variables()):\n print(\" var %d: %s %s\" % (idx, v.get_shape(), v.name))", "def get_node_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_nod_var\"][:]]", "def variables(model: Model) -> AbstractSet[str]:\n assert is_model(model)\n return model.keys()", "def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def show_variables(self):\r\n\r\n variablelist = [(x_temp,self.variables[x_temp]) for x_temp in sorted(self.variables.keys())]\r\n display.noteprint(('/C/ '+labels.VARIABLES.upper(), EOL.join([x_temp[0]+BLANK\r\n +COLON+BLANK\r\n +abridge(str(x_temp[1]),40)\r\n for x_temp in variablelist])))", "def getSolRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.solNames.keys()", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def vars(*tensor_types):\n return map(var, tensor_types)", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)", "def getVariables(self)->Dict[str,str]:\n pass", "def get_all_variables(instance):\n return [v for v in dir(instance) if not callable(getattr(instance, v))]", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def getDataVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_60m)", "def dictOfVariables(self):\n return {x.name: x for x in self.variables}", "def get_all_var_names(self):\n\n if hasattr(self, \"all_var_names\"):\n return self.all_var_names\n\n # Append all variables in model (defined in YAML).\n aux_all_var_names = []\n aux_all_var_names.extend(self.sim_config_params)\n aux_all_var_names.extend(self.sim_inputs)\n aux_all_var_names.extend(self.sim_outputs)\n aux_all_var_names.extend(self.sim_other_vars)\n\n # Remove duplicates (if any) -- Keeping initial order\n all_var_names = [aux_all_var_names[i] for i in range(len(aux_all_var_names)) \\\n if aux_all_var_names[i] not in aux_all_var_names[:i]]\n\n # Store for following calls\n self.all_var_names = all_var_names\n return self.all_var_names", "def retrieve_name(self, var):\r\n\t\tfor fi in reversed(inspect.stack()):\r\n\t\t\tnames = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\r\n\t\t\tif len(names) > 0:\r\n\t\t\t\treturn names[0]\r\n\t\treturn \"<unknown>\"", "def vars(svars):\n return np.array([pm.var(var) for var in svars.split()])", "def generate_variable_names():\n while True:\n name = uuid.uuid4()\n yield f\"_{name.hex}\"", "def visit_varname(self, node, children):\n # Return only dict nodes\n return {'type':'var','val':str(node)}", "def getInfoVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_60m)\r\n return []", "def get_variable_values(self, vars):\n raise NotImplementedError()", "def get_variable_full_name(var):\n if var._save_slice_info:\n return var._save_slice_info.full_name\n else:\n return var.op.name", "def get_variable_names(text):\n names = []\n if '@@' in text:\n matches = _property_pattern.findall(text)\n for token, key in matches:\n names.append(key)\n\n return names", "def get_all_variables(self):\n return []" ]
[ "0.69048613", "0.6785922", "0.67740816", "0.6711591", "0.6687262", "0.6663156", "0.6635133", "0.66310126", "0.6589906", "0.6581961", "0.6580228", "0.6575236", "0.6552368", "0.6544824", "0.6535504", "0.6525284", "0.6511582", "0.64991194", "0.64609987", "0.641673", "0.6381078", "0.6379418", "0.6365293", "0.6352937", "0.63271374", "0.6306018", "0.62861323", "0.62565476", "0.6240985", "0.6227237", "0.62216944", "0.620346", "0.62016004", "0.61982244", "0.6197064", "0.6185238", "0.617523", "0.61743015", "0.6171904", "0.6116352", "0.6103282", "0.60806125", "0.6057325", "0.60489136", "0.603551", "0.6034992", "0.60331947", "0.6026331", "0.60188305", "0.5999864", "0.59965295", "0.59958863", "0.5988674", "0.5960137", "0.59338677", "0.59249157", "0.5922055", "0.58924544", "0.5892304", "0.5885361", "0.58795", "0.586849", "0.5846989", "0.58311594", "0.58290035", "0.5825151", "0.5818875", "0.5810105", "0.5809393", "0.580774", "0.58058137", "0.57959306", "0.5779041", "0.5775341", "0.57750213", "0.57699573", "0.5769692", "0.57675976", "0.576251", "0.57568246", "0.5748901", "0.57463074", "0.57463074", "0.57463074", "0.5743074", "0.57425624", "0.5740519", "0.5732459", "0.5725715", "0.5718498", "0.57123667", "0.57079047", "0.57073474", "0.57052594", "0.570229", "0.569894", "0.5698264", "0.569772", "0.5697154", "0.5693202" ]
0.5873568
61